4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
11 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/proc_fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugobjects.h>
21 #include <linux/kallsyms.h>
22 #include <linux/list.h>
23 #include <linux/rbtree.h>
24 #include <linux/radix-tree.h>
25 #include <linux/rcupdate.h>
26 #include <linux/bootmem.h>
28 #include <asm/atomic.h>
29 #include <asm/uaccess.h>
30 #include <asm/tlbflush.h>
33 /*** Page table manipulation functions ***/
35 static void vunmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
)
39 pte
= pte_offset_kernel(pmd
, addr
);
41 pte_t ptent
= ptep_get_and_clear(&init_mm
, addr
, pte
);
42 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
43 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
46 static void vunmap_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
)
51 pmd
= pmd_offset(pud
, addr
);
53 next
= pmd_addr_end(addr
, end
);
54 if (pmd_none_or_clear_bad(pmd
))
56 vunmap_pte_range(pmd
, addr
, next
);
57 } while (pmd
++, addr
= next
, addr
!= end
);
60 static void vunmap_pud_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
)
65 pud
= pud_offset(pgd
, addr
);
67 next
= pud_addr_end(addr
, end
);
68 if (pud_none_or_clear_bad(pud
))
70 vunmap_pmd_range(pud
, addr
, next
);
71 } while (pud
++, addr
= next
, addr
!= end
);
74 static void vunmap_page_range(unsigned long addr
, unsigned long end
)
80 pgd
= pgd_offset_k(addr
);
82 next
= pgd_addr_end(addr
, end
);
83 if (pgd_none_or_clear_bad(pgd
))
85 vunmap_pud_range(pgd
, addr
, next
);
86 } while (pgd
++, addr
= next
, addr
!= end
);
89 static int vmap_pte_range(pmd_t
*pmd
, unsigned long addr
,
90 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
95 * nr is a running index into the array which helps higher level
96 * callers keep track of where we're up to.
99 pte
= pte_alloc_kernel(pmd
, addr
);
103 struct page
*page
= pages
[*nr
];
105 if (WARN_ON(!pte_none(*pte
)))
109 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, prot
));
111 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
115 static int vmap_pmd_range(pud_t
*pud
, unsigned long addr
,
116 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
121 pmd
= pmd_alloc(&init_mm
, pud
, addr
);
125 next
= pmd_addr_end(addr
, end
);
126 if (vmap_pte_range(pmd
, addr
, next
, prot
, pages
, nr
))
128 } while (pmd
++, addr
= next
, addr
!= end
);
132 static int vmap_pud_range(pgd_t
*pgd
, unsigned long addr
,
133 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
138 pud
= pud_alloc(&init_mm
, pgd
, addr
);
142 next
= pud_addr_end(addr
, end
);
143 if (vmap_pmd_range(pud
, addr
, next
, prot
, pages
, nr
))
145 } while (pud
++, addr
= next
, addr
!= end
);
150 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
151 * will have pfns corresponding to the "pages" array.
153 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
155 static int vmap_page_range(unsigned long start
, unsigned long end
,
156 pgprot_t prot
, struct page
**pages
)
160 unsigned long addr
= start
;
165 pgd
= pgd_offset_k(addr
);
167 next
= pgd_addr_end(addr
, end
);
168 err
= vmap_pud_range(pgd
, addr
, next
, prot
, pages
, &nr
);
171 } while (pgd
++, addr
= next
, addr
!= end
);
172 flush_cache_vmap(start
, end
);
179 static inline int is_vmalloc_or_module_addr(const void *x
)
182 * ARM, x86-64 and sparc64 put modules in a special place,
183 * and fall back on vmalloc() if that fails. Others
184 * just put it in the vmalloc space.
186 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
187 unsigned long addr
= (unsigned long)x
;
188 if (addr
>= MODULES_VADDR
&& addr
< MODULES_END
)
191 return is_vmalloc_addr(x
);
195 * Walk a vmap address to the struct page it maps.
197 struct page
*vmalloc_to_page(const void *vmalloc_addr
)
199 unsigned long addr
= (unsigned long) vmalloc_addr
;
200 struct page
*page
= NULL
;
201 pgd_t
*pgd
= pgd_offset_k(addr
);
204 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
205 * architectures that do not vmalloc module space
207 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr
));
209 if (!pgd_none(*pgd
)) {
210 pud_t
*pud
= pud_offset(pgd
, addr
);
211 if (!pud_none(*pud
)) {
212 pmd_t
*pmd
= pmd_offset(pud
, addr
);
213 if (!pmd_none(*pmd
)) {
216 ptep
= pte_offset_map(pmd
, addr
);
218 if (pte_present(pte
))
219 page
= pte_page(pte
);
226 EXPORT_SYMBOL(vmalloc_to_page
);
229 * Map a vmalloc()-space virtual address to the physical page frame number.
231 unsigned long vmalloc_to_pfn(const void *vmalloc_addr
)
233 return page_to_pfn(vmalloc_to_page(vmalloc_addr
));
235 EXPORT_SYMBOL(vmalloc_to_pfn
);
238 /*** Global kva allocator ***/
240 #define VM_LAZY_FREE 0x01
241 #define VM_LAZY_FREEING 0x02
242 #define VM_VM_AREA 0x04
245 unsigned long va_start
;
246 unsigned long va_end
;
248 struct rb_node rb_node
; /* address sorted rbtree */
249 struct list_head list
; /* address sorted list */
250 struct list_head purge_list
; /* "lazy purge" list */
252 struct rcu_head rcu_head
;
255 static DEFINE_SPINLOCK(vmap_area_lock
);
256 static struct rb_root vmap_area_root
= RB_ROOT
;
257 static LIST_HEAD(vmap_area_list
);
259 static struct vmap_area
*__find_vmap_area(unsigned long addr
)
261 struct rb_node
*n
= vmap_area_root
.rb_node
;
264 struct vmap_area
*va
;
266 va
= rb_entry(n
, struct vmap_area
, rb_node
);
267 if (addr
< va
->va_start
)
269 else if (addr
> va
->va_start
)
278 static void __insert_vmap_area(struct vmap_area
*va
)
280 struct rb_node
**p
= &vmap_area_root
.rb_node
;
281 struct rb_node
*parent
= NULL
;
285 struct vmap_area
*tmp
;
288 tmp
= rb_entry(parent
, struct vmap_area
, rb_node
);
289 if (va
->va_start
< tmp
->va_end
)
291 else if (va
->va_end
> tmp
->va_start
)
297 rb_link_node(&va
->rb_node
, parent
, p
);
298 rb_insert_color(&va
->rb_node
, &vmap_area_root
);
300 /* address-sort this list so it is usable like the vmlist */
301 tmp
= rb_prev(&va
->rb_node
);
303 struct vmap_area
*prev
;
304 prev
= rb_entry(tmp
, struct vmap_area
, rb_node
);
305 list_add_rcu(&va
->list
, &prev
->list
);
307 list_add_rcu(&va
->list
, &vmap_area_list
);
310 static void purge_vmap_area_lazy(void);
313 * Allocate a region of KVA of the specified size and alignment, within the
316 static struct vmap_area
*alloc_vmap_area(unsigned long size
,
318 unsigned long vstart
, unsigned long vend
,
319 int node
, gfp_t gfp_mask
)
321 struct vmap_area
*va
;
327 BUG_ON(size
& ~PAGE_MASK
);
329 va
= kmalloc_node(sizeof(struct vmap_area
),
330 gfp_mask
& GFP_RECLAIM_MASK
, node
);
332 return ERR_PTR(-ENOMEM
);
335 addr
= ALIGN(vstart
, align
);
337 spin_lock(&vmap_area_lock
);
338 if (addr
+ size
- 1 < addr
)
341 /* XXX: could have a last_hole cache */
342 n
= vmap_area_root
.rb_node
;
344 struct vmap_area
*first
= NULL
;
347 struct vmap_area
*tmp
;
348 tmp
= rb_entry(n
, struct vmap_area
, rb_node
);
349 if (tmp
->va_end
>= addr
) {
350 if (!first
&& tmp
->va_start
< addr
+ size
)
362 if (first
->va_end
< addr
) {
363 n
= rb_next(&first
->rb_node
);
365 first
= rb_entry(n
, struct vmap_area
, rb_node
);
370 while (addr
+ size
> first
->va_start
&& addr
+ size
<= vend
) {
371 addr
= ALIGN(first
->va_end
+ PAGE_SIZE
, align
);
372 if (addr
+ size
- 1 < addr
)
375 n
= rb_next(&first
->rb_node
);
377 first
= rb_entry(n
, struct vmap_area
, rb_node
);
383 if (addr
+ size
> vend
) {
385 spin_unlock(&vmap_area_lock
);
387 purge_vmap_area_lazy();
391 if (printk_ratelimit())
393 "vmap allocation for size %lu failed: "
394 "use vmalloc=<size> to increase size.\n", size
);
395 return ERR_PTR(-EBUSY
);
398 BUG_ON(addr
& (align
-1));
401 va
->va_end
= addr
+ size
;
403 __insert_vmap_area(va
);
404 spin_unlock(&vmap_area_lock
);
409 static void rcu_free_va(struct rcu_head
*head
)
411 struct vmap_area
*va
= container_of(head
, struct vmap_area
, rcu_head
);
416 static void __free_vmap_area(struct vmap_area
*va
)
418 BUG_ON(RB_EMPTY_NODE(&va
->rb_node
));
419 rb_erase(&va
->rb_node
, &vmap_area_root
);
420 RB_CLEAR_NODE(&va
->rb_node
);
421 list_del_rcu(&va
->list
);
423 call_rcu(&va
->rcu_head
, rcu_free_va
);
427 * Free a region of KVA allocated by alloc_vmap_area
429 static void free_vmap_area(struct vmap_area
*va
)
431 spin_lock(&vmap_area_lock
);
432 __free_vmap_area(va
);
433 spin_unlock(&vmap_area_lock
);
437 * Clear the pagetable entries of a given vmap_area
439 static void unmap_vmap_area(struct vmap_area
*va
)
441 vunmap_page_range(va
->va_start
, va
->va_end
);
444 static void vmap_debug_free_range(unsigned long start
, unsigned long end
)
447 * Unmap page tables and force a TLB flush immediately if
448 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
449 * bugs similarly to those in linear kernel virtual address
450 * space after a page has been freed.
452 * All the lazy freeing logic is still retained, in order to
453 * minimise intrusiveness of this debugging feature.
455 * This is going to be *slow* (linear kernel virtual address
456 * debugging doesn't do a broadcast TLB flush so it is a lot
459 #ifdef CONFIG_DEBUG_PAGEALLOC
460 vunmap_page_range(start
, end
);
461 flush_tlb_kernel_range(start
, end
);
466 * lazy_max_pages is the maximum amount of virtual address space we gather up
467 * before attempting to purge with a TLB flush.
469 * There is a tradeoff here: a larger number will cover more kernel page tables
470 * and take slightly longer to purge, but it will linearly reduce the number of
471 * global TLB flushes that must be performed. It would seem natural to scale
472 * this number up linearly with the number of CPUs (because vmapping activity
473 * could also scale linearly with the number of CPUs), however it is likely
474 * that in practice, workloads might be constrained in other ways that mean
475 * vmap activity will not scale linearly with CPUs. Also, I want to be
476 * conservative and not introduce a big latency on huge systems, so go with
477 * a less aggressive log scale. It will still be an improvement over the old
478 * code, and it will be simple to change the scale factor if we find that it
479 * becomes a problem on bigger systems.
481 static unsigned long lazy_max_pages(void)
485 log
= fls(num_online_cpus());
487 return log
* (32UL * 1024 * 1024 / PAGE_SIZE
);
490 static atomic_t vmap_lazy_nr
= ATOMIC_INIT(0);
493 * Purges all lazily-freed vmap areas.
495 * If sync is 0 then don't purge if there is already a purge in progress.
496 * If force_flush is 1, then flush kernel TLBs between *start and *end even
497 * if we found no lazy vmap areas to unmap (callers can use this to optimise
498 * their own TLB flushing).
499 * Returns with *start = min(*start, lowest purged address)
500 * *end = max(*end, highest purged address)
502 static void __purge_vmap_area_lazy(unsigned long *start
, unsigned long *end
,
503 int sync
, int force_flush
)
505 static DEFINE_SPINLOCK(purge_lock
);
507 struct vmap_area
*va
;
508 struct vmap_area
*n_va
;
512 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
513 * should not expect such behaviour. This just simplifies locking for
514 * the case that isn't actually used at the moment anyway.
516 if (!sync
&& !force_flush
) {
517 if (!spin_trylock(&purge_lock
))
520 spin_lock(&purge_lock
);
523 list_for_each_entry_rcu(va
, &vmap_area_list
, list
) {
524 if (va
->flags
& VM_LAZY_FREE
) {
525 if (va
->va_start
< *start
)
526 *start
= va
->va_start
;
527 if (va
->va_end
> *end
)
529 nr
+= (va
->va_end
- va
->va_start
) >> PAGE_SHIFT
;
531 list_add_tail(&va
->purge_list
, &valist
);
532 va
->flags
|= VM_LAZY_FREEING
;
533 va
->flags
&= ~VM_LAZY_FREE
;
539 BUG_ON(nr
> atomic_read(&vmap_lazy_nr
));
540 atomic_sub(nr
, &vmap_lazy_nr
);
543 if (nr
|| force_flush
)
544 flush_tlb_kernel_range(*start
, *end
);
547 spin_lock(&vmap_area_lock
);
548 list_for_each_entry_safe(va
, n_va
, &valist
, purge_list
)
549 __free_vmap_area(va
);
550 spin_unlock(&vmap_area_lock
);
552 spin_unlock(&purge_lock
);
556 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
557 * is already purging.
559 static void try_purge_vmap_area_lazy(void)
561 unsigned long start
= ULONG_MAX
, end
= 0;
563 __purge_vmap_area_lazy(&start
, &end
, 0, 0);
567 * Kick off a purge of the outstanding lazy areas.
569 static void purge_vmap_area_lazy(void)
571 unsigned long start
= ULONG_MAX
, end
= 0;
573 __purge_vmap_area_lazy(&start
, &end
, 1, 0);
577 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
578 * called for the correct range previously.
580 static void free_unmap_vmap_area_noflush(struct vmap_area
*va
)
582 va
->flags
|= VM_LAZY_FREE
;
583 atomic_add((va
->va_end
- va
->va_start
) >> PAGE_SHIFT
, &vmap_lazy_nr
);
584 if (unlikely(atomic_read(&vmap_lazy_nr
) > lazy_max_pages()))
585 try_purge_vmap_area_lazy();
589 * Free and unmap a vmap area
591 static void free_unmap_vmap_area(struct vmap_area
*va
)
593 flush_cache_vunmap(va
->va_start
, va
->va_end
);
594 free_unmap_vmap_area_noflush(va
);
597 static struct vmap_area
*find_vmap_area(unsigned long addr
)
599 struct vmap_area
*va
;
601 spin_lock(&vmap_area_lock
);
602 va
= __find_vmap_area(addr
);
603 spin_unlock(&vmap_area_lock
);
608 static void free_unmap_vmap_area_addr(unsigned long addr
)
610 struct vmap_area
*va
;
612 va
= find_vmap_area(addr
);
614 free_unmap_vmap_area(va
);
618 /*** Per cpu kva allocator ***/
621 * vmap space is limited especially on 32 bit architectures. Ensure there is
622 * room for at least 16 percpu vmap blocks per CPU.
625 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
626 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
627 * instead (we just need a rough idea)
629 #if BITS_PER_LONG == 32
630 #define VMALLOC_SPACE (128UL*1024*1024)
632 #define VMALLOC_SPACE (128UL*1024*1024*1024)
635 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
636 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
637 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
638 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
639 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
640 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
641 #define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
642 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
643 VMALLOC_PAGES / NR_CPUS / 16))
645 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
647 static bool vmap_initialized __read_mostly
= false;
649 struct vmap_block_queue
{
651 struct list_head free
;
652 struct list_head dirty
;
653 unsigned int nr_dirty
;
658 struct vmap_area
*va
;
659 struct vmap_block_queue
*vbq
;
660 unsigned long free
, dirty
;
661 DECLARE_BITMAP(alloc_map
, VMAP_BBMAP_BITS
);
662 DECLARE_BITMAP(dirty_map
, VMAP_BBMAP_BITS
);
665 struct list_head free_list
;
666 struct list_head dirty_list
;
668 struct rcu_head rcu_head
;
672 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
673 static DEFINE_PER_CPU(struct vmap_block_queue
, vmap_block_queue
);
676 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
677 * in the free path. Could get rid of this if we change the API to return a
678 * "cookie" from alloc, to be passed to free. But no big deal yet.
680 static DEFINE_SPINLOCK(vmap_block_tree_lock
);
681 static RADIX_TREE(vmap_block_tree
, GFP_ATOMIC
);
684 * We should probably have a fallback mechanism to allocate virtual memory
685 * out of partially filled vmap blocks. However vmap block sizing should be
686 * fairly reasonable according to the vmalloc size, so it shouldn't be a
690 static unsigned long addr_to_vb_idx(unsigned long addr
)
692 addr
-= VMALLOC_START
& ~(VMAP_BLOCK_SIZE
-1);
693 addr
/= VMAP_BLOCK_SIZE
;
697 static struct vmap_block
*new_vmap_block(gfp_t gfp_mask
)
699 struct vmap_block_queue
*vbq
;
700 struct vmap_block
*vb
;
701 struct vmap_area
*va
;
702 unsigned long vb_idx
;
705 node
= numa_node_id();
707 vb
= kmalloc_node(sizeof(struct vmap_block
),
708 gfp_mask
& GFP_RECLAIM_MASK
, node
);
710 return ERR_PTR(-ENOMEM
);
712 va
= alloc_vmap_area(VMAP_BLOCK_SIZE
, VMAP_BLOCK_SIZE
,
713 VMALLOC_START
, VMALLOC_END
,
715 if (unlikely(IS_ERR(va
))) {
717 return ERR_PTR(PTR_ERR(va
));
720 err
= radix_tree_preload(gfp_mask
);
727 spin_lock_init(&vb
->lock
);
729 vb
->free
= VMAP_BBMAP_BITS
;
731 bitmap_zero(vb
->alloc_map
, VMAP_BBMAP_BITS
);
732 bitmap_zero(vb
->dirty_map
, VMAP_BBMAP_BITS
);
733 INIT_LIST_HEAD(&vb
->free_list
);
734 INIT_LIST_HEAD(&vb
->dirty_list
);
736 vb_idx
= addr_to_vb_idx(va
->va_start
);
737 spin_lock(&vmap_block_tree_lock
);
738 err
= radix_tree_insert(&vmap_block_tree
, vb_idx
, vb
);
739 spin_unlock(&vmap_block_tree_lock
);
741 radix_tree_preload_end();
743 vbq
= &get_cpu_var(vmap_block_queue
);
745 spin_lock(&vbq
->lock
);
746 list_add(&vb
->free_list
, &vbq
->free
);
747 spin_unlock(&vbq
->lock
);
748 put_cpu_var(vmap_cpu_blocks
);
753 static void rcu_free_vb(struct rcu_head
*head
)
755 struct vmap_block
*vb
= container_of(head
, struct vmap_block
, rcu_head
);
760 static void free_vmap_block(struct vmap_block
*vb
)
762 struct vmap_block
*tmp
;
763 unsigned long vb_idx
;
765 spin_lock(&vb
->vbq
->lock
);
766 if (!list_empty(&vb
->free_list
))
767 list_del(&vb
->free_list
);
768 if (!list_empty(&vb
->dirty_list
))
769 list_del(&vb
->dirty_list
);
770 spin_unlock(&vb
->vbq
->lock
);
772 vb_idx
= addr_to_vb_idx(vb
->va
->va_start
);
773 spin_lock(&vmap_block_tree_lock
);
774 tmp
= radix_tree_delete(&vmap_block_tree
, vb_idx
);
775 spin_unlock(&vmap_block_tree_lock
);
778 free_unmap_vmap_area_noflush(vb
->va
);
779 call_rcu(&vb
->rcu_head
, rcu_free_vb
);
782 static void *vb_alloc(unsigned long size
, gfp_t gfp_mask
)
784 struct vmap_block_queue
*vbq
;
785 struct vmap_block
*vb
;
786 unsigned long addr
= 0;
789 BUG_ON(size
& ~PAGE_MASK
);
790 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
791 order
= get_order(size
);
795 vbq
= &get_cpu_var(vmap_block_queue
);
796 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
799 spin_lock(&vb
->lock
);
800 i
= bitmap_find_free_region(vb
->alloc_map
,
801 VMAP_BBMAP_BITS
, order
);
804 addr
= vb
->va
->va_start
+ (i
<< PAGE_SHIFT
);
805 BUG_ON(addr_to_vb_idx(addr
) !=
806 addr_to_vb_idx(vb
->va
->va_start
));
807 vb
->free
-= 1UL << order
;
809 spin_lock(&vbq
->lock
);
810 list_del_init(&vb
->free_list
);
811 spin_unlock(&vbq
->lock
);
813 spin_unlock(&vb
->lock
);
816 spin_unlock(&vb
->lock
);
818 put_cpu_var(vmap_cpu_blocks
);
822 vb
= new_vmap_block(gfp_mask
);
831 static void vb_free(const void *addr
, unsigned long size
)
833 unsigned long offset
;
834 unsigned long vb_idx
;
836 struct vmap_block
*vb
;
838 BUG_ON(size
& ~PAGE_MASK
);
839 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
841 flush_cache_vunmap((unsigned long)addr
, (unsigned long)addr
+ size
);
843 order
= get_order(size
);
845 offset
= (unsigned long)addr
& (VMAP_BLOCK_SIZE
- 1);
847 vb_idx
= addr_to_vb_idx((unsigned long)addr
);
849 vb
= radix_tree_lookup(&vmap_block_tree
, vb_idx
);
853 spin_lock(&vb
->lock
);
854 bitmap_allocate_region(vb
->dirty_map
, offset
>> PAGE_SHIFT
, order
);
856 spin_lock(&vb
->vbq
->lock
);
857 list_add(&vb
->dirty_list
, &vb
->vbq
->dirty
);
858 spin_unlock(&vb
->vbq
->lock
);
860 vb
->dirty
+= 1UL << order
;
861 if (vb
->dirty
== VMAP_BBMAP_BITS
) {
862 BUG_ON(vb
->free
|| !list_empty(&vb
->free_list
));
863 spin_unlock(&vb
->lock
);
866 spin_unlock(&vb
->lock
);
870 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
872 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
873 * to amortize TLB flushing overheads. What this means is that any page you
874 * have now, may, in a former life, have been mapped into kernel virtual
875 * address by the vmap layer and so there might be some CPUs with TLB entries
876 * still referencing that page (additional to the regular 1:1 kernel mapping).
878 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
879 * be sure that none of the pages we have control over will have any aliases
880 * from the vmap layer.
882 void vm_unmap_aliases(void)
884 unsigned long start
= ULONG_MAX
, end
= 0;
888 if (unlikely(!vmap_initialized
))
891 for_each_possible_cpu(cpu
) {
892 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
893 struct vmap_block
*vb
;
896 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
899 spin_lock(&vb
->lock
);
900 i
= find_first_bit(vb
->dirty_map
, VMAP_BBMAP_BITS
);
901 while (i
< VMAP_BBMAP_BITS
) {
904 j
= find_next_zero_bit(vb
->dirty_map
,
907 s
= vb
->va
->va_start
+ (i
<< PAGE_SHIFT
);
908 e
= vb
->va
->va_start
+ (j
<< PAGE_SHIFT
);
909 vunmap_page_range(s
, e
);
918 i
= find_next_bit(vb
->dirty_map
,
921 spin_unlock(&vb
->lock
);
926 __purge_vmap_area_lazy(&start
, &end
, 1, flush
);
928 EXPORT_SYMBOL_GPL(vm_unmap_aliases
);
931 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
932 * @mem: the pointer returned by vm_map_ram
933 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
935 void vm_unmap_ram(const void *mem
, unsigned int count
)
937 unsigned long size
= count
<< PAGE_SHIFT
;
938 unsigned long addr
= (unsigned long)mem
;
941 BUG_ON(addr
< VMALLOC_START
);
942 BUG_ON(addr
> VMALLOC_END
);
943 BUG_ON(addr
& (PAGE_SIZE
-1));
945 debug_check_no_locks_freed(mem
, size
);
946 vmap_debug_free_range(addr
, addr
+size
);
948 if (likely(count
<= VMAP_MAX_ALLOC
))
951 free_unmap_vmap_area_addr(addr
);
953 EXPORT_SYMBOL(vm_unmap_ram
);
956 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
957 * @pages: an array of pointers to the pages to be mapped
958 * @count: number of pages
959 * @node: prefer to allocate data structures on this node
960 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
962 * Returns: a pointer to the address that has been mapped, or %NULL on failure
964 void *vm_map_ram(struct page
**pages
, unsigned int count
, int node
, pgprot_t prot
)
966 unsigned long size
= count
<< PAGE_SHIFT
;
970 if (likely(count
<= VMAP_MAX_ALLOC
)) {
971 mem
= vb_alloc(size
, GFP_KERNEL
);
974 addr
= (unsigned long)mem
;
976 struct vmap_area
*va
;
977 va
= alloc_vmap_area(size
, PAGE_SIZE
,
978 VMALLOC_START
, VMALLOC_END
, node
, GFP_KERNEL
);
985 if (vmap_page_range(addr
, addr
+ size
, prot
, pages
) < 0) {
986 vm_unmap_ram(mem
, count
);
991 EXPORT_SYMBOL(vm_map_ram
);
993 void __init
vmalloc_init(void)
995 struct vmap_area
*va
;
996 struct vm_struct
*tmp
;
999 for_each_possible_cpu(i
) {
1000 struct vmap_block_queue
*vbq
;
1002 vbq
= &per_cpu(vmap_block_queue
, i
);
1003 spin_lock_init(&vbq
->lock
);
1004 INIT_LIST_HEAD(&vbq
->free
);
1005 INIT_LIST_HEAD(&vbq
->dirty
);
1009 /* Import existing vmlist entries. */
1010 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
1011 va
= alloc_bootmem(sizeof(struct vmap_area
));
1012 va
->flags
= tmp
->flags
| VM_VM_AREA
;
1013 va
->va_start
= (unsigned long)tmp
->addr
;
1014 va
->va_end
= va
->va_start
+ tmp
->size
;
1015 __insert_vmap_area(va
);
1017 vmap_initialized
= true;
1020 void unmap_kernel_range(unsigned long addr
, unsigned long size
)
1022 unsigned long end
= addr
+ size
;
1024 flush_cache_vunmap(addr
, end
);
1025 vunmap_page_range(addr
, end
);
1026 flush_tlb_kernel_range(addr
, end
);
1029 int map_vm_area(struct vm_struct
*area
, pgprot_t prot
, struct page
***pages
)
1031 unsigned long addr
= (unsigned long)area
->addr
;
1032 unsigned long end
= addr
+ area
->size
- PAGE_SIZE
;
1035 err
= vmap_page_range(addr
, end
, prot
, *pages
);
1043 EXPORT_SYMBOL_GPL(map_vm_area
);
1045 /*** Old vmalloc interfaces ***/
1046 DEFINE_RWLOCK(vmlist_lock
);
1047 struct vm_struct
*vmlist
;
1049 static struct vm_struct
*__get_vm_area_node(unsigned long size
,
1050 unsigned long flags
, unsigned long start
, unsigned long end
,
1051 int node
, gfp_t gfp_mask
, void *caller
)
1053 static struct vmap_area
*va
;
1054 struct vm_struct
*area
;
1055 struct vm_struct
*tmp
, **p
;
1056 unsigned long align
= 1;
1058 BUG_ON(in_interrupt());
1059 if (flags
& VM_IOREMAP
) {
1060 int bit
= fls(size
);
1062 if (bit
> IOREMAP_MAX_ORDER
)
1063 bit
= IOREMAP_MAX_ORDER
;
1064 else if (bit
< PAGE_SHIFT
)
1070 size
= PAGE_ALIGN(size
);
1071 if (unlikely(!size
))
1074 area
= kmalloc_node(sizeof(*area
), gfp_mask
& GFP_RECLAIM_MASK
, node
);
1075 if (unlikely(!area
))
1079 * We always allocate a guard page.
1083 va
= alloc_vmap_area(size
, align
, start
, end
, node
, gfp_mask
);
1089 area
->flags
= flags
;
1090 area
->addr
= (void *)va
->va_start
;
1094 area
->phys_addr
= 0;
1095 area
->caller
= caller
;
1097 va
->flags
|= VM_VM_AREA
;
1099 write_lock(&vmlist_lock
);
1100 for (p
= &vmlist
; (tmp
= *p
) != NULL
; p
= &tmp
->next
) {
1101 if (tmp
->addr
>= area
->addr
)
1106 write_unlock(&vmlist_lock
);
1111 struct vm_struct
*__get_vm_area(unsigned long size
, unsigned long flags
,
1112 unsigned long start
, unsigned long end
)
1114 return __get_vm_area_node(size
, flags
, start
, end
, -1, GFP_KERNEL
,
1115 __builtin_return_address(0));
1117 EXPORT_SYMBOL_GPL(__get_vm_area
);
1119 struct vm_struct
*__get_vm_area_caller(unsigned long size
, unsigned long flags
,
1120 unsigned long start
, unsigned long end
,
1123 return __get_vm_area_node(size
, flags
, start
, end
, -1, GFP_KERNEL
,
1128 * get_vm_area - reserve a contiguous kernel virtual area
1129 * @size: size of the area
1130 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1132 * Search an area of @size in the kernel virtual mapping area,
1133 * and reserved it for out purposes. Returns the area descriptor
1134 * on success or %NULL on failure.
1136 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
1138 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
,
1139 -1, GFP_KERNEL
, __builtin_return_address(0));
1142 struct vm_struct
*get_vm_area_caller(unsigned long size
, unsigned long flags
,
1145 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
,
1146 -1, GFP_KERNEL
, caller
);
1149 struct vm_struct
*get_vm_area_node(unsigned long size
, unsigned long flags
,
1150 int node
, gfp_t gfp_mask
)
1152 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
, node
,
1153 gfp_mask
, __builtin_return_address(0));
1156 static struct vm_struct
*find_vm_area(const void *addr
)
1158 struct vmap_area
*va
;
1160 va
= find_vmap_area((unsigned long)addr
);
1161 if (va
&& va
->flags
& VM_VM_AREA
)
1168 * remove_vm_area - find and remove a continuous kernel virtual area
1169 * @addr: base address
1171 * Search for the kernel VM area starting at @addr, and remove it.
1172 * This function returns the found VM area, but using it is NOT safe
1173 * on SMP machines, except for its size or flags.
1175 struct vm_struct
*remove_vm_area(const void *addr
)
1177 struct vmap_area
*va
;
1179 va
= find_vmap_area((unsigned long)addr
);
1180 if (va
&& va
->flags
& VM_VM_AREA
) {
1181 struct vm_struct
*vm
= va
->private;
1182 struct vm_struct
*tmp
, **p
;
1184 vmap_debug_free_range(va
->va_start
, va
->va_end
);
1185 free_unmap_vmap_area(va
);
1186 vm
->size
-= PAGE_SIZE
;
1188 write_lock(&vmlist_lock
);
1189 for (p
= &vmlist
; (tmp
= *p
) != vm
; p
= &tmp
->next
)
1192 write_unlock(&vmlist_lock
);
1199 static void __vunmap(const void *addr
, int deallocate_pages
)
1201 struct vm_struct
*area
;
1206 if ((PAGE_SIZE
-1) & (unsigned long)addr
) {
1207 WARN(1, KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
1211 area
= remove_vm_area(addr
);
1212 if (unlikely(!area
)) {
1213 WARN(1, KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
1218 debug_check_no_locks_freed(addr
, area
->size
);
1219 debug_check_no_obj_freed(addr
, area
->size
);
1221 if (deallocate_pages
) {
1224 for (i
= 0; i
< area
->nr_pages
; i
++) {
1225 struct page
*page
= area
->pages
[i
];
1231 if (area
->flags
& VM_VPAGES
)
1242 * vfree - release memory allocated by vmalloc()
1243 * @addr: memory base address
1245 * Free the virtually continuous memory area starting at @addr, as
1246 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1247 * NULL, no operation is performed.
1249 * Must not be called in interrupt context.
1251 void vfree(const void *addr
)
1253 BUG_ON(in_interrupt());
1256 EXPORT_SYMBOL(vfree
);
1259 * vunmap - release virtual mapping obtained by vmap()
1260 * @addr: memory base address
1262 * Free the virtually contiguous memory area starting at @addr,
1263 * which was created from the page array passed to vmap().
1265 * Must not be called in interrupt context.
1267 void vunmap(const void *addr
)
1269 BUG_ON(in_interrupt());
1272 EXPORT_SYMBOL(vunmap
);
1275 * vmap - map an array of pages into virtually contiguous space
1276 * @pages: array of page pointers
1277 * @count: number of pages to map
1278 * @flags: vm_area->flags
1279 * @prot: page protection for the mapping
1281 * Maps @count pages from @pages into contiguous kernel virtual
1284 void *vmap(struct page
**pages
, unsigned int count
,
1285 unsigned long flags
, pgprot_t prot
)
1287 struct vm_struct
*area
;
1289 if (count
> num_physpages
)
1292 area
= get_vm_area_caller((count
<< PAGE_SHIFT
), flags
,
1293 __builtin_return_address(0));
1297 if (map_vm_area(area
, prot
, &pages
)) {
1304 EXPORT_SYMBOL(vmap
);
1306 static void *__vmalloc_node(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
,
1307 int node
, void *caller
);
1308 static void *__vmalloc_area_node(struct vm_struct
*area
, gfp_t gfp_mask
,
1309 pgprot_t prot
, int node
, void *caller
)
1311 struct page
**pages
;
1312 unsigned int nr_pages
, array_size
, i
;
1314 nr_pages
= (area
->size
- PAGE_SIZE
) >> PAGE_SHIFT
;
1315 array_size
= (nr_pages
* sizeof(struct page
*));
1317 area
->nr_pages
= nr_pages
;
1318 /* Please note that the recursion is strictly bounded. */
1319 if (array_size
> PAGE_SIZE
) {
1320 pages
= __vmalloc_node(array_size
, gfp_mask
| __GFP_ZERO
,
1321 PAGE_KERNEL
, node
, caller
);
1322 area
->flags
|= VM_VPAGES
;
1324 pages
= kmalloc_node(array_size
,
1325 (gfp_mask
& GFP_RECLAIM_MASK
) | __GFP_ZERO
,
1328 area
->pages
= pages
;
1329 area
->caller
= caller
;
1331 remove_vm_area(area
->addr
);
1336 for (i
= 0; i
< area
->nr_pages
; i
++) {
1340 page
= alloc_page(gfp_mask
);
1342 page
= alloc_pages_node(node
, gfp_mask
, 0);
1344 if (unlikely(!page
)) {
1345 /* Successfully allocated i pages, free them in __vunmap() */
1349 area
->pages
[i
] = page
;
1352 if (map_vm_area(area
, prot
, &pages
))
1361 void *__vmalloc_area(struct vm_struct
*area
, gfp_t gfp_mask
, pgprot_t prot
)
1363 return __vmalloc_area_node(area
, gfp_mask
, prot
, -1,
1364 __builtin_return_address(0));
1368 * __vmalloc_node - allocate virtually contiguous memory
1369 * @size: allocation size
1370 * @gfp_mask: flags for the page level allocator
1371 * @prot: protection mask for the allocated pages
1372 * @node: node to use for allocation or -1
1373 * @caller: caller's return address
1375 * Allocate enough pages to cover @size from the page level
1376 * allocator with @gfp_mask flags. Map them into contiguous
1377 * kernel virtual space, using a pagetable protection of @prot.
1379 static void *__vmalloc_node(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
,
1380 int node
, void *caller
)
1382 struct vm_struct
*area
;
1384 size
= PAGE_ALIGN(size
);
1385 if (!size
|| (size
>> PAGE_SHIFT
) > num_physpages
)
1388 area
= __get_vm_area_node(size
, VM_ALLOC
, VMALLOC_START
, VMALLOC_END
,
1389 node
, gfp_mask
, caller
);
1394 return __vmalloc_area_node(area
, gfp_mask
, prot
, node
, caller
);
1397 void *__vmalloc(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
)
1399 return __vmalloc_node(size
, gfp_mask
, prot
, -1,
1400 __builtin_return_address(0));
1402 EXPORT_SYMBOL(__vmalloc
);
1405 * vmalloc - allocate virtually contiguous memory
1406 * @size: allocation size
1407 * Allocate enough pages to cover @size from the page level
1408 * allocator and map them into contiguous kernel virtual space.
1410 * For tight control over page level allocator and protection flags
1411 * use __vmalloc() instead.
1413 void *vmalloc(unsigned long size
)
1415 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
,
1416 -1, __builtin_return_address(0));
1418 EXPORT_SYMBOL(vmalloc
);
1421 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1422 * @size: allocation size
1424 * The resulting memory area is zeroed so it can be mapped to userspace
1425 * without leaking data.
1427 void *vmalloc_user(unsigned long size
)
1429 struct vm_struct
*area
;
1432 ret
= __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
,
1433 PAGE_KERNEL
, -1, __builtin_return_address(0));
1435 area
= find_vm_area(ret
);
1436 area
->flags
|= VM_USERMAP
;
1440 EXPORT_SYMBOL(vmalloc_user
);
1443 * vmalloc_node - allocate memory on a specific node
1444 * @size: allocation size
1447 * Allocate enough pages to cover @size from the page level
1448 * allocator and map them into contiguous kernel virtual space.
1450 * For tight control over page level allocator and protection flags
1451 * use __vmalloc() instead.
1453 void *vmalloc_node(unsigned long size
, int node
)
1455 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
,
1456 node
, __builtin_return_address(0));
1458 EXPORT_SYMBOL(vmalloc_node
);
1460 #ifndef PAGE_KERNEL_EXEC
1461 # define PAGE_KERNEL_EXEC PAGE_KERNEL
1465 * vmalloc_exec - allocate virtually contiguous, executable memory
1466 * @size: allocation size
1468 * Kernel-internal function to allocate enough pages to cover @size
1469 * the page level allocator and map them into contiguous and
1470 * executable kernel virtual space.
1472 * For tight control over page level allocator and protection flags
1473 * use __vmalloc() instead.
1476 void *vmalloc_exec(unsigned long size
)
1478 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL_EXEC
,
1479 -1, __builtin_return_address(0));
1482 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1483 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1484 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1485 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1487 #define GFP_VMALLOC32 GFP_KERNEL
1491 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1492 * @size: allocation size
1494 * Allocate enough 32bit PA addressable pages to cover @size from the
1495 * page level allocator and map them into contiguous kernel virtual space.
1497 void *vmalloc_32(unsigned long size
)
1499 return __vmalloc_node(size
, GFP_VMALLOC32
, PAGE_KERNEL
,
1500 -1, __builtin_return_address(0));
1502 EXPORT_SYMBOL(vmalloc_32
);
1505 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1506 * @size: allocation size
1508 * The resulting memory area is 32bit addressable and zeroed so it can be
1509 * mapped to userspace without leaking data.
1511 void *vmalloc_32_user(unsigned long size
)
1513 struct vm_struct
*area
;
1516 ret
= __vmalloc_node(size
, GFP_VMALLOC32
| __GFP_ZERO
, PAGE_KERNEL
,
1517 -1, __builtin_return_address(0));
1519 area
= find_vm_area(ret
);
1520 area
->flags
|= VM_USERMAP
;
1524 EXPORT_SYMBOL(vmalloc_32_user
);
1526 long vread(char *buf
, char *addr
, unsigned long count
)
1528 struct vm_struct
*tmp
;
1529 char *vaddr
, *buf_start
= buf
;
1532 /* Don't allow overflow */
1533 if ((unsigned long) addr
+ count
< count
)
1534 count
= -(unsigned long) addr
;
1536 read_lock(&vmlist_lock
);
1537 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
1538 vaddr
= (char *) tmp
->addr
;
1539 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
1541 while (addr
< vaddr
) {
1549 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
1560 read_unlock(&vmlist_lock
);
1561 return buf
- buf_start
;
1564 long vwrite(char *buf
, char *addr
, unsigned long count
)
1566 struct vm_struct
*tmp
;
1567 char *vaddr
, *buf_start
= buf
;
1570 /* Don't allow overflow */
1571 if ((unsigned long) addr
+ count
< count
)
1572 count
= -(unsigned long) addr
;
1574 read_lock(&vmlist_lock
);
1575 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
1576 vaddr
= (char *) tmp
->addr
;
1577 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
1579 while (addr
< vaddr
) {
1586 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
1597 read_unlock(&vmlist_lock
);
1598 return buf
- buf_start
;
1602 * remap_vmalloc_range - map vmalloc pages to userspace
1603 * @vma: vma to cover (map full range of vma)
1604 * @addr: vmalloc memory
1605 * @pgoff: number of pages into addr before first page to map
1607 * Returns: 0 for success, -Exxx on failure
1609 * This function checks that addr is a valid vmalloc'ed area, and
1610 * that it is big enough to cover the vma. Will return failure if
1611 * that criteria isn't met.
1613 * Similar to remap_pfn_range() (see mm/memory.c)
1615 int remap_vmalloc_range(struct vm_area_struct
*vma
, void *addr
,
1616 unsigned long pgoff
)
1618 struct vm_struct
*area
;
1619 unsigned long uaddr
= vma
->vm_start
;
1620 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
1622 if ((PAGE_SIZE
-1) & (unsigned long)addr
)
1625 area
= find_vm_area(addr
);
1629 if (!(area
->flags
& VM_USERMAP
))
1632 if (usize
+ (pgoff
<< PAGE_SHIFT
) > area
->size
- PAGE_SIZE
)
1635 addr
+= pgoff
<< PAGE_SHIFT
;
1637 struct page
*page
= vmalloc_to_page(addr
);
1640 ret
= vm_insert_page(vma
, uaddr
, page
);
1647 } while (usize
> 0);
1649 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
1650 vma
->vm_flags
|= VM_RESERVED
;
1654 EXPORT_SYMBOL(remap_vmalloc_range
);
1657 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
1660 void __attribute__((weak
)) vmalloc_sync_all(void)
1665 static int f(pte_t
*pte
, pgtable_t table
, unsigned long addr
, void *data
)
1667 /* apply_to_page_range() does all the hard work. */
1672 * alloc_vm_area - allocate a range of kernel address space
1673 * @size: size of the area
1675 * Returns: NULL on failure, vm_struct on success
1677 * This function reserves a range of kernel address space, and
1678 * allocates pagetables to map that range. No actual mappings
1679 * are created. If the kernel address space is not shared
1680 * between processes, it syncs the pagetable across all
1683 struct vm_struct
*alloc_vm_area(size_t size
)
1685 struct vm_struct
*area
;
1687 area
= get_vm_area_caller(size
, VM_IOREMAP
,
1688 __builtin_return_address(0));
1693 * This ensures that page tables are constructed for this region
1694 * of kernel virtual address space and mapped into init_mm.
1696 if (apply_to_page_range(&init_mm
, (unsigned long)area
->addr
,
1697 area
->size
, f
, NULL
)) {
1702 /* Make sure the pagetables are constructed in process kernel
1708 EXPORT_SYMBOL_GPL(alloc_vm_area
);
1710 void free_vm_area(struct vm_struct
*area
)
1712 struct vm_struct
*ret
;
1713 ret
= remove_vm_area(area
->addr
);
1714 BUG_ON(ret
!= area
);
1717 EXPORT_SYMBOL_GPL(free_vm_area
);
1720 #ifdef CONFIG_PROC_FS
1721 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
1724 struct vm_struct
*v
;
1726 read_lock(&vmlist_lock
);
1728 while (n
> 0 && v
) {
1739 static void *s_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1741 struct vm_struct
*v
= p
;
1747 static void s_stop(struct seq_file
*m
, void *p
)
1749 read_unlock(&vmlist_lock
);
1752 static void show_numa_info(struct seq_file
*m
, struct vm_struct
*v
)
1755 unsigned int nr
, *counters
= m
->private;
1760 memset(counters
, 0, nr_node_ids
* sizeof(unsigned int));
1762 for (nr
= 0; nr
< v
->nr_pages
; nr
++)
1763 counters
[page_to_nid(v
->pages
[nr
])]++;
1765 for_each_node_state(nr
, N_HIGH_MEMORY
)
1767 seq_printf(m
, " N%u=%u", nr
, counters
[nr
]);
1771 static int s_show(struct seq_file
*m
, void *p
)
1773 struct vm_struct
*v
= p
;
1775 seq_printf(m
, "0x%p-0x%p %7ld",
1776 v
->addr
, v
->addr
+ v
->size
, v
->size
);
1779 char buff
[KSYM_SYMBOL_LEN
];
1782 sprint_symbol(buff
, (unsigned long)v
->caller
);
1787 seq_printf(m
, " pages=%d", v
->nr_pages
);
1790 seq_printf(m
, " phys=%lx", v
->phys_addr
);
1792 if (v
->flags
& VM_IOREMAP
)
1793 seq_printf(m
, " ioremap");
1795 if (v
->flags
& VM_ALLOC
)
1796 seq_printf(m
, " vmalloc");
1798 if (v
->flags
& VM_MAP
)
1799 seq_printf(m
, " vmap");
1801 if (v
->flags
& VM_USERMAP
)
1802 seq_printf(m
, " user");
1804 if (v
->flags
& VM_VPAGES
)
1805 seq_printf(m
, " vpages");
1807 show_numa_info(m
, v
);
1812 static const struct seq_operations vmalloc_op
= {
1819 static int vmalloc_open(struct inode
*inode
, struct file
*file
)
1821 unsigned int *ptr
= NULL
;
1825 ptr
= kmalloc(nr_node_ids
* sizeof(unsigned int), GFP_KERNEL
);
1826 ret
= seq_open(file
, &vmalloc_op
);
1828 struct seq_file
*m
= file
->private_data
;
1835 static const struct file_operations proc_vmalloc_operations
= {
1836 .open
= vmalloc_open
,
1838 .llseek
= seq_lseek
,
1839 .release
= seq_release_private
,
1842 static int __init
proc_vmalloc_init(void)
1844 proc_create("vmallocinfo", S_IRUSR
, NULL
, &proc_vmalloc_operations
);
1847 module_init(proc_vmalloc_init
);