4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
12 #include <linux/module.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
18 #include <linux/vmalloc.h>
20 #include <asm/uaccess.h>
21 #include <asm/tlbflush.h>
24 DEFINE_RWLOCK(vmlist_lock
);
25 struct vm_struct
*vmlist
;
27 static void *__vmalloc_node(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
,
30 static void vunmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
)
34 pte
= pte_offset_kernel(pmd
, addr
);
36 pte_t ptent
= ptep_get_and_clear(&init_mm
, addr
, pte
);
37 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
38 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
41 static inline void vunmap_pmd_range(pud_t
*pud
, unsigned long addr
,
47 pmd
= pmd_offset(pud
, addr
);
49 next
= pmd_addr_end(addr
, end
);
50 if (pmd_none_or_clear_bad(pmd
))
52 vunmap_pte_range(pmd
, addr
, next
);
53 } while (pmd
++, addr
= next
, addr
!= end
);
56 static inline void vunmap_pud_range(pgd_t
*pgd
, unsigned long addr
,
62 pud
= pud_offset(pgd
, addr
);
64 next
= pud_addr_end(addr
, end
);
65 if (pud_none_or_clear_bad(pud
))
67 vunmap_pmd_range(pud
, addr
, next
);
68 } while (pud
++, addr
= next
, addr
!= end
);
71 void unmap_kernel_range(unsigned long addr
, unsigned long size
)
75 unsigned long start
= addr
;
76 unsigned long end
= addr
+ size
;
79 pgd
= pgd_offset_k(addr
);
80 flush_cache_vunmap(addr
, end
);
82 next
= pgd_addr_end(addr
, end
);
83 if (pgd_none_or_clear_bad(pgd
))
85 vunmap_pud_range(pgd
, addr
, next
);
86 } while (pgd
++, addr
= next
, addr
!= end
);
87 flush_tlb_kernel_range(start
, end
);
90 static void unmap_vm_area(struct vm_struct
*area
)
92 unmap_kernel_range((unsigned long)area
->addr
, area
->size
);
95 static int vmap_pte_range(pmd_t
*pmd
, unsigned long addr
,
96 unsigned long end
, pgprot_t prot
, struct page
***pages
)
100 pte
= pte_alloc_kernel(pmd
, addr
);
104 struct page
*page
= **pages
;
105 WARN_ON(!pte_none(*pte
));
108 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, prot
));
110 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
114 static inline int vmap_pmd_range(pud_t
*pud
, unsigned long addr
,
115 unsigned long end
, pgprot_t prot
, struct page
***pages
)
120 pmd
= pmd_alloc(&init_mm
, pud
, addr
);
124 next
= pmd_addr_end(addr
, end
);
125 if (vmap_pte_range(pmd
, addr
, next
, prot
, pages
))
127 } while (pmd
++, addr
= next
, addr
!= end
);
131 static inline int vmap_pud_range(pgd_t
*pgd
, unsigned long addr
,
132 unsigned long end
, pgprot_t prot
, struct page
***pages
)
137 pud
= pud_alloc(&init_mm
, pgd
, addr
);
141 next
= pud_addr_end(addr
, end
);
142 if (vmap_pmd_range(pud
, addr
, next
, prot
, pages
))
144 } while (pud
++, addr
= next
, addr
!= end
);
148 int map_vm_area(struct vm_struct
*area
, pgprot_t prot
, struct page
***pages
)
152 unsigned long addr
= (unsigned long) area
->addr
;
153 unsigned long end
= addr
+ area
->size
- PAGE_SIZE
;
157 pgd
= pgd_offset_k(addr
);
159 next
= pgd_addr_end(addr
, end
);
160 err
= vmap_pud_range(pgd
, addr
, next
, prot
, pages
);
163 } while (pgd
++, addr
= next
, addr
!= end
);
164 flush_cache_vmap((unsigned long) area
->addr
, end
);
167 EXPORT_SYMBOL_GPL(map_vm_area
);
170 * Map a vmalloc()-space virtual address to the physical page.
172 struct page
*vmalloc_to_page(const void *vmalloc_addr
)
174 unsigned long addr
= (unsigned long) vmalloc_addr
;
175 struct page
*page
= NULL
;
176 pgd_t
*pgd
= pgd_offset_k(addr
);
181 if (!pgd_none(*pgd
)) {
182 pud
= pud_offset(pgd
, addr
);
183 if (!pud_none(*pud
)) {
184 pmd
= pmd_offset(pud
, addr
);
185 if (!pmd_none(*pmd
)) {
186 ptep
= pte_offset_map(pmd
, addr
);
188 if (pte_present(pte
))
189 page
= pte_page(pte
);
196 EXPORT_SYMBOL(vmalloc_to_page
);
199 * Map a vmalloc()-space virtual address to the physical page frame number.
201 unsigned long vmalloc_to_pfn(const void *vmalloc_addr
)
203 return page_to_pfn(vmalloc_to_page(vmalloc_addr
));
205 EXPORT_SYMBOL(vmalloc_to_pfn
);
207 static struct vm_struct
*__get_vm_area_node(unsigned long size
, unsigned long flags
,
208 unsigned long start
, unsigned long end
,
209 int node
, gfp_t gfp_mask
)
211 struct vm_struct
**p
, *tmp
, *area
;
212 unsigned long align
= 1;
215 BUG_ON(in_interrupt());
216 if (flags
& VM_IOREMAP
) {
219 if (bit
> IOREMAP_MAX_ORDER
)
220 bit
= IOREMAP_MAX_ORDER
;
221 else if (bit
< PAGE_SHIFT
)
226 addr
= ALIGN(start
, align
);
227 size
= PAGE_ALIGN(size
);
231 area
= kmalloc_node(sizeof(*area
), gfp_mask
& GFP_RECLAIM_MASK
, node
);
237 * We always allocate a guard page.
241 write_lock(&vmlist_lock
);
242 for (p
= &vmlist
; (tmp
= *p
) != NULL
;p
= &tmp
->next
) {
243 if ((unsigned long)tmp
->addr
< addr
) {
244 if((unsigned long)tmp
->addr
+ tmp
->size
>= addr
)
245 addr
= ALIGN(tmp
->size
+
246 (unsigned long)tmp
->addr
, align
);
249 if ((size
+ addr
) < addr
)
251 if (size
+ addr
<= (unsigned long)tmp
->addr
)
253 addr
= ALIGN(tmp
->size
+ (unsigned long)tmp
->addr
, align
);
254 if (addr
> end
- size
)
257 if ((size
+ addr
) < addr
)
259 if (addr
> end
- size
)
267 area
->addr
= (void *)addr
;
272 write_unlock(&vmlist_lock
);
277 write_unlock(&vmlist_lock
);
279 if (printk_ratelimit())
280 printk(KERN_WARNING
"allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
284 struct vm_struct
*__get_vm_area(unsigned long size
, unsigned long flags
,
285 unsigned long start
, unsigned long end
)
287 return __get_vm_area_node(size
, flags
, start
, end
, -1, GFP_KERNEL
);
289 EXPORT_SYMBOL_GPL(__get_vm_area
);
292 * get_vm_area - reserve a contiguous kernel virtual area
293 * @size: size of the area
294 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
296 * Search an area of @size in the kernel virtual mapping area,
297 * and reserved it for out purposes. Returns the area descriptor
298 * on success or %NULL on failure.
300 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
302 return __get_vm_area(size
, flags
, VMALLOC_START
, VMALLOC_END
);
305 struct vm_struct
*get_vm_area_node(unsigned long size
, unsigned long flags
,
306 int node
, gfp_t gfp_mask
)
308 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
, node
,
312 /* Caller must hold vmlist_lock */
313 static struct vm_struct
*__find_vm_area(const void *addr
)
315 struct vm_struct
*tmp
;
317 for (tmp
= vmlist
; tmp
!= NULL
; tmp
= tmp
->next
) {
318 if (tmp
->addr
== addr
)
325 /* Caller must hold vmlist_lock */
326 static struct vm_struct
*__remove_vm_area(const void *addr
)
328 struct vm_struct
**p
, *tmp
;
330 for (p
= &vmlist
; (tmp
= *p
) != NULL
;p
= &tmp
->next
) {
331 if (tmp
->addr
== addr
)
341 * Remove the guard page.
343 tmp
->size
-= PAGE_SIZE
;
348 * remove_vm_area - find and remove a continuous kernel virtual area
349 * @addr: base address
351 * Search for the kernel VM area starting at @addr, and remove it.
352 * This function returns the found VM area, but using it is NOT safe
353 * on SMP machines, except for its size or flags.
355 struct vm_struct
*remove_vm_area(const void *addr
)
358 write_lock(&vmlist_lock
);
359 v
= __remove_vm_area(addr
);
360 write_unlock(&vmlist_lock
);
364 static void __vunmap(const void *addr
, int deallocate_pages
)
366 struct vm_struct
*area
;
371 if ((PAGE_SIZE
-1) & (unsigned long)addr
) {
372 printk(KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
377 area
= remove_vm_area(addr
);
378 if (unlikely(!area
)) {
379 printk(KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
385 debug_check_no_locks_freed(addr
, area
->size
);
387 if (deallocate_pages
) {
390 for (i
= 0; i
< area
->nr_pages
; i
++) {
391 struct page
*page
= area
->pages
[i
];
397 if (area
->flags
& VM_VPAGES
)
408 * vfree - release memory allocated by vmalloc()
409 * @addr: memory base address
411 * Free the virtually continuous memory area starting at @addr, as
412 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
413 * NULL, no operation is performed.
415 * Must not be called in interrupt context.
417 void vfree(const void *addr
)
419 BUG_ON(in_interrupt());
422 EXPORT_SYMBOL(vfree
);
425 * vunmap - release virtual mapping obtained by vmap()
426 * @addr: memory base address
428 * Free the virtually contiguous memory area starting at @addr,
429 * which was created from the page array passed to vmap().
431 * Must not be called in interrupt context.
433 void vunmap(const void *addr
)
435 BUG_ON(in_interrupt());
438 EXPORT_SYMBOL(vunmap
);
441 * vmap - map an array of pages into virtually contiguous space
442 * @pages: array of page pointers
443 * @count: number of pages to map
444 * @flags: vm_area->flags
445 * @prot: page protection for the mapping
447 * Maps @count pages from @pages into contiguous kernel virtual
450 void *vmap(struct page
**pages
, unsigned int count
,
451 unsigned long flags
, pgprot_t prot
)
453 struct vm_struct
*area
;
455 if (count
> num_physpages
)
458 area
= get_vm_area((count
<< PAGE_SHIFT
), flags
);
461 if (map_vm_area(area
, prot
, &pages
)) {
470 static void *__vmalloc_area_node(struct vm_struct
*area
, gfp_t gfp_mask
,
471 pgprot_t prot
, int node
)
474 unsigned int nr_pages
, array_size
, i
;
476 nr_pages
= (area
->size
- PAGE_SIZE
) >> PAGE_SHIFT
;
477 array_size
= (nr_pages
* sizeof(struct page
*));
479 area
->nr_pages
= nr_pages
;
480 /* Please note that the recursion is strictly bounded. */
481 if (array_size
> PAGE_SIZE
) {
482 pages
= __vmalloc_node(array_size
, gfp_mask
| __GFP_ZERO
,
484 area
->flags
|= VM_VPAGES
;
486 pages
= kmalloc_node(array_size
,
487 (gfp_mask
& GFP_RECLAIM_MASK
) | __GFP_ZERO
,
492 remove_vm_area(area
->addr
);
497 for (i
= 0; i
< area
->nr_pages
; i
++) {
501 page
= alloc_page(gfp_mask
);
503 page
= alloc_pages_node(node
, gfp_mask
, 0);
505 if (unlikely(!page
)) {
506 /* Successfully allocated i pages, free them in __vunmap() */
510 area
->pages
[i
] = page
;
513 if (map_vm_area(area
, prot
, &pages
))
522 void *__vmalloc_area(struct vm_struct
*area
, gfp_t gfp_mask
, pgprot_t prot
)
524 return __vmalloc_area_node(area
, gfp_mask
, prot
, -1);
528 * __vmalloc_node - allocate virtually contiguous memory
529 * @size: allocation size
530 * @gfp_mask: flags for the page level allocator
531 * @prot: protection mask for the allocated pages
532 * @node: node to use for allocation or -1
534 * Allocate enough pages to cover @size from the page level
535 * allocator with @gfp_mask flags. Map them into contiguous
536 * kernel virtual space, using a pagetable protection of @prot.
538 static void *__vmalloc_node(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
,
541 struct vm_struct
*area
;
543 size
= PAGE_ALIGN(size
);
544 if (!size
|| (size
>> PAGE_SHIFT
) > num_physpages
)
547 area
= get_vm_area_node(size
, VM_ALLOC
, node
, gfp_mask
);
551 return __vmalloc_area_node(area
, gfp_mask
, prot
, node
);
554 void *__vmalloc(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
)
556 return __vmalloc_node(size
, gfp_mask
, prot
, -1);
558 EXPORT_SYMBOL(__vmalloc
);
561 * vmalloc - allocate virtually contiguous memory
562 * @size: allocation size
563 * Allocate enough pages to cover @size from the page level
564 * allocator and map them into contiguous kernel virtual space.
566 * For tight control over page level allocator and protection flags
567 * use __vmalloc() instead.
569 void *vmalloc(unsigned long size
)
571 return __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
);
573 EXPORT_SYMBOL(vmalloc
);
576 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
577 * @size: allocation size
579 * The resulting memory area is zeroed so it can be mapped to userspace
580 * without leaking data.
582 void *vmalloc_user(unsigned long size
)
584 struct vm_struct
*area
;
587 ret
= __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
, PAGE_KERNEL
);
589 write_lock(&vmlist_lock
);
590 area
= __find_vm_area(ret
);
591 area
->flags
|= VM_USERMAP
;
592 write_unlock(&vmlist_lock
);
596 EXPORT_SYMBOL(vmalloc_user
);
599 * vmalloc_node - allocate memory on a specific node
600 * @size: allocation size
603 * Allocate enough pages to cover @size from the page level
604 * allocator and map them into contiguous kernel virtual space.
606 * For tight control over page level allocator and protection flags
607 * use __vmalloc() instead.
609 void *vmalloc_node(unsigned long size
, int node
)
611 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
, node
);
613 EXPORT_SYMBOL(vmalloc_node
);
615 #ifndef PAGE_KERNEL_EXEC
616 # define PAGE_KERNEL_EXEC PAGE_KERNEL
620 * vmalloc_exec - allocate virtually contiguous, executable memory
621 * @size: allocation size
623 * Kernel-internal function to allocate enough pages to cover @size
624 * the page level allocator and map them into contiguous and
625 * executable kernel virtual space.
627 * For tight control over page level allocator and protection flags
628 * use __vmalloc() instead.
631 void *vmalloc_exec(unsigned long size
)
633 return __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL_EXEC
);
636 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
637 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
638 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
639 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
641 #define GFP_VMALLOC32 GFP_KERNEL
645 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
646 * @size: allocation size
648 * Allocate enough 32bit PA addressable pages to cover @size from the
649 * page level allocator and map them into contiguous kernel virtual space.
651 void *vmalloc_32(unsigned long size
)
653 return __vmalloc(size
, GFP_VMALLOC32
, PAGE_KERNEL
);
655 EXPORT_SYMBOL(vmalloc_32
);
658 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
659 * @size: allocation size
661 * The resulting memory area is 32bit addressable and zeroed so it can be
662 * mapped to userspace without leaking data.
664 void *vmalloc_32_user(unsigned long size
)
666 struct vm_struct
*area
;
669 ret
= __vmalloc(size
, GFP_VMALLOC32
| __GFP_ZERO
, PAGE_KERNEL
);
671 write_lock(&vmlist_lock
);
672 area
= __find_vm_area(ret
);
673 area
->flags
|= VM_USERMAP
;
674 write_unlock(&vmlist_lock
);
678 EXPORT_SYMBOL(vmalloc_32_user
);
680 long vread(char *buf
, char *addr
, unsigned long count
)
682 struct vm_struct
*tmp
;
683 char *vaddr
, *buf_start
= buf
;
686 /* Don't allow overflow */
687 if ((unsigned long) addr
+ count
< count
)
688 count
= -(unsigned long) addr
;
690 read_lock(&vmlist_lock
);
691 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
692 vaddr
= (char *) tmp
->addr
;
693 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
695 while (addr
< vaddr
) {
703 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
714 read_unlock(&vmlist_lock
);
715 return buf
- buf_start
;
718 long vwrite(char *buf
, char *addr
, unsigned long count
)
720 struct vm_struct
*tmp
;
721 char *vaddr
, *buf_start
= buf
;
724 /* Don't allow overflow */
725 if ((unsigned long) addr
+ count
< count
)
726 count
= -(unsigned long) addr
;
728 read_lock(&vmlist_lock
);
729 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
730 vaddr
= (char *) tmp
->addr
;
731 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
733 while (addr
< vaddr
) {
740 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
751 read_unlock(&vmlist_lock
);
752 return buf
- buf_start
;
756 * remap_vmalloc_range - map vmalloc pages to userspace
757 * @vma: vma to cover (map full range of vma)
758 * @addr: vmalloc memory
759 * @pgoff: number of pages into addr before first page to map
761 * Returns: 0 for success, -Exxx on failure
763 * This function checks that addr is a valid vmalloc'ed area, and
764 * that it is big enough to cover the vma. Will return failure if
765 * that criteria isn't met.
767 * Similar to remap_pfn_range() (see mm/memory.c)
769 int remap_vmalloc_range(struct vm_area_struct
*vma
, void *addr
,
772 struct vm_struct
*area
;
773 unsigned long uaddr
= vma
->vm_start
;
774 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
777 if ((PAGE_SIZE
-1) & (unsigned long)addr
)
780 read_lock(&vmlist_lock
);
781 area
= __find_vm_area(addr
);
783 goto out_einval_locked
;
785 if (!(area
->flags
& VM_USERMAP
))
786 goto out_einval_locked
;
788 if (usize
+ (pgoff
<< PAGE_SHIFT
) > area
->size
- PAGE_SIZE
)
789 goto out_einval_locked
;
790 read_unlock(&vmlist_lock
);
792 addr
+= pgoff
<< PAGE_SHIFT
;
794 struct page
*page
= vmalloc_to_page(addr
);
795 ret
= vm_insert_page(vma
, uaddr
, page
);
804 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
805 vma
->vm_flags
|= VM_RESERVED
;
810 read_unlock(&vmlist_lock
);
813 EXPORT_SYMBOL(remap_vmalloc_range
);
816 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
819 void __attribute__((weak
)) vmalloc_sync_all(void)
824 static int f(pte_t
*pte
, pgtable_t table
, unsigned long addr
, void *data
)
826 /* apply_to_page_range() does all the hard work. */
831 * alloc_vm_area - allocate a range of kernel address space
832 * @size: size of the area
834 * Returns: NULL on failure, vm_struct on success
836 * This function reserves a range of kernel address space, and
837 * allocates pagetables to map that range. No actual mappings
838 * are created. If the kernel address space is not shared
839 * between processes, it syncs the pagetable across all
842 struct vm_struct
*alloc_vm_area(size_t size
)
844 struct vm_struct
*area
;
846 area
= get_vm_area(size
, VM_IOREMAP
);
851 * This ensures that page tables are constructed for this region
852 * of kernel virtual address space and mapped into init_mm.
854 if (apply_to_page_range(&init_mm
, (unsigned long)area
->addr
,
855 area
->size
, f
, NULL
)) {
860 /* Make sure the pagetables are constructed in process kernel
866 EXPORT_SYMBOL_GPL(alloc_vm_area
);
868 void free_vm_area(struct vm_struct
*area
)
870 struct vm_struct
*ret
;
871 ret
= remove_vm_area(area
->addr
);
875 EXPORT_SYMBOL_GPL(free_vm_area
);