4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
11 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/proc_fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugobjects.h>
21 #include <linux/kallsyms.h>
22 #include <linux/list.h>
23 #include <linux/rbtree.h>
24 #include <linux/radix-tree.h>
25 #include <linux/rcupdate.h>
27 #include <asm/atomic.h>
28 #include <asm/uaccess.h>
29 #include <asm/tlbflush.h>
32 /*** Page table manipulation functions ***/
34 static void vunmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
)
38 pte
= pte_offset_kernel(pmd
, addr
);
40 pte_t ptent
= ptep_get_and_clear(&init_mm
, addr
, pte
);
41 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
42 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
45 static void vunmap_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
)
50 pmd
= pmd_offset(pud
, addr
);
52 next
= pmd_addr_end(addr
, end
);
53 if (pmd_none_or_clear_bad(pmd
))
55 vunmap_pte_range(pmd
, addr
, next
);
56 } while (pmd
++, addr
= next
, addr
!= end
);
59 static void vunmap_pud_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
)
64 pud
= pud_offset(pgd
, addr
);
66 next
= pud_addr_end(addr
, end
);
67 if (pud_none_or_clear_bad(pud
))
69 vunmap_pmd_range(pud
, addr
, next
);
70 } while (pud
++, addr
= next
, addr
!= end
);
73 static void vunmap_page_range(unsigned long addr
, unsigned long end
)
79 pgd
= pgd_offset_k(addr
);
81 next
= pgd_addr_end(addr
, end
);
82 if (pgd_none_or_clear_bad(pgd
))
84 vunmap_pud_range(pgd
, addr
, next
);
85 } while (pgd
++, addr
= next
, addr
!= end
);
88 static int vmap_pte_range(pmd_t
*pmd
, unsigned long addr
,
89 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
94 * nr is a running index into the array which helps higher level
95 * callers keep track of where we're up to.
98 pte
= pte_alloc_kernel(pmd
, addr
);
102 struct page
*page
= pages
[*nr
];
104 if (WARN_ON(!pte_none(*pte
)))
108 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, prot
));
110 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
114 static int vmap_pmd_range(pud_t
*pud
, unsigned long addr
,
115 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
120 pmd
= pmd_alloc(&init_mm
, pud
, addr
);
124 next
= pmd_addr_end(addr
, end
);
125 if (vmap_pte_range(pmd
, addr
, next
, prot
, pages
, nr
))
127 } while (pmd
++, addr
= next
, addr
!= end
);
131 static int vmap_pud_range(pgd_t
*pgd
, unsigned long addr
,
132 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
137 pud
= pud_alloc(&init_mm
, pgd
, addr
);
141 next
= pud_addr_end(addr
, end
);
142 if (vmap_pmd_range(pud
, addr
, next
, prot
, pages
, nr
))
144 } while (pud
++, addr
= next
, addr
!= end
);
149 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
150 * will have pfns corresponding to the "pages" array.
152 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
154 static int vmap_page_range(unsigned long start
, unsigned long end
,
155 pgprot_t prot
, struct page
**pages
)
159 unsigned long addr
= start
;
164 pgd
= pgd_offset_k(addr
);
166 next
= pgd_addr_end(addr
, end
);
167 err
= vmap_pud_range(pgd
, addr
, next
, prot
, pages
, &nr
);
170 } while (pgd
++, addr
= next
, addr
!= end
);
171 flush_cache_vmap(start
, end
);
178 static inline int is_vmalloc_or_module_addr(const void *x
)
181 * ARM, x86-64 and sparc64 put modules in a special place,
182 * and fall back on vmalloc() if that fails. Others
183 * just put it in the vmalloc space.
185 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
186 unsigned long addr
= (unsigned long)x
;
187 if (addr
>= MODULES_VADDR
&& addr
< MODULES_END
)
190 return is_vmalloc_addr(x
);
194 * Walk a vmap address to the struct page it maps.
196 struct page
*vmalloc_to_page(const void *vmalloc_addr
)
198 unsigned long addr
= (unsigned long) vmalloc_addr
;
199 struct page
*page
= NULL
;
200 pgd_t
*pgd
= pgd_offset_k(addr
);
203 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
204 * architectures that do not vmalloc module space
206 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr
));
208 if (!pgd_none(*pgd
)) {
209 pud_t
*pud
= pud_offset(pgd
, addr
);
210 if (!pud_none(*pud
)) {
211 pmd_t
*pmd
= pmd_offset(pud
, addr
);
212 if (!pmd_none(*pmd
)) {
215 ptep
= pte_offset_map(pmd
, addr
);
217 if (pte_present(pte
))
218 page
= pte_page(pte
);
225 EXPORT_SYMBOL(vmalloc_to_page
);
228 * Map a vmalloc()-space virtual address to the physical page frame number.
230 unsigned long vmalloc_to_pfn(const void *vmalloc_addr
)
232 return page_to_pfn(vmalloc_to_page(vmalloc_addr
));
234 EXPORT_SYMBOL(vmalloc_to_pfn
);
237 /*** Global kva allocator ***/
239 #define VM_LAZY_FREE 0x01
240 #define VM_LAZY_FREEING 0x02
241 #define VM_VM_AREA 0x04
244 unsigned long va_start
;
245 unsigned long va_end
;
247 struct rb_node rb_node
; /* address sorted rbtree */
248 struct list_head list
; /* address sorted list */
249 struct list_head purge_list
; /* "lazy purge" list */
251 struct rcu_head rcu_head
;
254 static DEFINE_SPINLOCK(vmap_area_lock
);
255 static struct rb_root vmap_area_root
= RB_ROOT
;
256 static LIST_HEAD(vmap_area_list
);
258 static struct vmap_area
*__find_vmap_area(unsigned long addr
)
260 struct rb_node
*n
= vmap_area_root
.rb_node
;
263 struct vmap_area
*va
;
265 va
= rb_entry(n
, struct vmap_area
, rb_node
);
266 if (addr
< va
->va_start
)
268 else if (addr
> va
->va_start
)
277 static void __insert_vmap_area(struct vmap_area
*va
)
279 struct rb_node
**p
= &vmap_area_root
.rb_node
;
280 struct rb_node
*parent
= NULL
;
284 struct vmap_area
*tmp
;
287 tmp
= rb_entry(parent
, struct vmap_area
, rb_node
);
288 if (va
->va_start
< tmp
->va_end
)
290 else if (va
->va_end
> tmp
->va_start
)
296 rb_link_node(&va
->rb_node
, parent
, p
);
297 rb_insert_color(&va
->rb_node
, &vmap_area_root
);
299 /* address-sort this list so it is usable like the vmlist */
300 tmp
= rb_prev(&va
->rb_node
);
302 struct vmap_area
*prev
;
303 prev
= rb_entry(tmp
, struct vmap_area
, rb_node
);
304 list_add_rcu(&va
->list
, &prev
->list
);
306 list_add_rcu(&va
->list
, &vmap_area_list
);
309 static void purge_vmap_area_lazy(void);
312 * Allocate a region of KVA of the specified size and alignment, within the
315 static struct vmap_area
*alloc_vmap_area(unsigned long size
,
317 unsigned long vstart
, unsigned long vend
,
318 int node
, gfp_t gfp_mask
)
320 struct vmap_area
*va
;
325 BUG_ON(size
& ~PAGE_MASK
);
327 va
= kmalloc_node(sizeof(struct vmap_area
),
328 gfp_mask
& GFP_RECLAIM_MASK
, node
);
330 return ERR_PTR(-ENOMEM
);
333 addr
= ALIGN(vstart
, align
);
335 spin_lock(&vmap_area_lock
);
336 /* XXX: could have a last_hole cache */
337 n
= vmap_area_root
.rb_node
;
339 struct vmap_area
*first
= NULL
;
342 struct vmap_area
*tmp
;
343 tmp
= rb_entry(n
, struct vmap_area
, rb_node
);
344 if (tmp
->va_end
>= addr
) {
345 if (!first
&& tmp
->va_start
< addr
+ size
)
357 if (first
->va_end
< addr
) {
358 n
= rb_next(&first
->rb_node
);
360 first
= rb_entry(n
, struct vmap_area
, rb_node
);
365 while (addr
+ size
> first
->va_start
&& addr
+ size
<= vend
) {
366 addr
= ALIGN(first
->va_end
+ PAGE_SIZE
, align
);
368 n
= rb_next(&first
->rb_node
);
370 first
= rb_entry(n
, struct vmap_area
, rb_node
);
376 if (addr
+ size
> vend
) {
377 spin_unlock(&vmap_area_lock
);
379 purge_vmap_area_lazy();
383 if (printk_ratelimit())
385 "vmap allocation for size %lu failed: "
386 "use vmalloc=<size> to increase size.\n", size
);
387 return ERR_PTR(-EBUSY
);
390 BUG_ON(addr
& (align
-1));
393 va
->va_end
= addr
+ size
;
395 __insert_vmap_area(va
);
396 spin_unlock(&vmap_area_lock
);
401 static void rcu_free_va(struct rcu_head
*head
)
403 struct vmap_area
*va
= container_of(head
, struct vmap_area
, rcu_head
);
408 static void __free_vmap_area(struct vmap_area
*va
)
410 BUG_ON(RB_EMPTY_NODE(&va
->rb_node
));
411 rb_erase(&va
->rb_node
, &vmap_area_root
);
412 RB_CLEAR_NODE(&va
->rb_node
);
413 list_del_rcu(&va
->list
);
415 call_rcu(&va
->rcu_head
, rcu_free_va
);
419 * Free a region of KVA allocated by alloc_vmap_area
421 static void free_vmap_area(struct vmap_area
*va
)
423 spin_lock(&vmap_area_lock
);
424 __free_vmap_area(va
);
425 spin_unlock(&vmap_area_lock
);
429 * Clear the pagetable entries of a given vmap_area
431 static void unmap_vmap_area(struct vmap_area
*va
)
433 vunmap_page_range(va
->va_start
, va
->va_end
);
437 * lazy_max_pages is the maximum amount of virtual address space we gather up
438 * before attempting to purge with a TLB flush.
440 * There is a tradeoff here: a larger number will cover more kernel page tables
441 * and take slightly longer to purge, but it will linearly reduce the number of
442 * global TLB flushes that must be performed. It would seem natural to scale
443 * this number up linearly with the number of CPUs (because vmapping activity
444 * could also scale linearly with the number of CPUs), however it is likely
445 * that in practice, workloads might be constrained in other ways that mean
446 * vmap activity will not scale linearly with CPUs. Also, I want to be
447 * conservative and not introduce a big latency on huge systems, so go with
448 * a less aggressive log scale. It will still be an improvement over the old
449 * code, and it will be simple to change the scale factor if we find that it
450 * becomes a problem on bigger systems.
452 static unsigned long lazy_max_pages(void)
456 log
= fls(num_online_cpus());
458 return log
* (32UL * 1024 * 1024 / PAGE_SIZE
);
461 static atomic_t vmap_lazy_nr
= ATOMIC_INIT(0);
464 * Purges all lazily-freed vmap areas.
466 * If sync is 0 then don't purge if there is already a purge in progress.
467 * If force_flush is 1, then flush kernel TLBs between *start and *end even
468 * if we found no lazy vmap areas to unmap (callers can use this to optimise
469 * their own TLB flushing).
470 * Returns with *start = min(*start, lowest purged address)
471 * *end = max(*end, highest purged address)
473 static void __purge_vmap_area_lazy(unsigned long *start
, unsigned long *end
,
474 int sync
, int force_flush
)
476 static DEFINE_SPINLOCK(purge_lock
);
478 struct vmap_area
*va
;
482 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
483 * should not expect such behaviour. This just simplifies locking for
484 * the case that isn't actually used at the moment anyway.
486 if (!sync
&& !force_flush
) {
487 if (!spin_trylock(&purge_lock
))
490 spin_lock(&purge_lock
);
493 list_for_each_entry_rcu(va
, &vmap_area_list
, list
) {
494 if (va
->flags
& VM_LAZY_FREE
) {
495 if (va
->va_start
< *start
)
496 *start
= va
->va_start
;
497 if (va
->va_end
> *end
)
499 nr
+= (va
->va_end
- va
->va_start
) >> PAGE_SHIFT
;
501 list_add_tail(&va
->purge_list
, &valist
);
502 va
->flags
|= VM_LAZY_FREEING
;
503 va
->flags
&= ~VM_LAZY_FREE
;
509 BUG_ON(nr
> atomic_read(&vmap_lazy_nr
));
510 atomic_sub(nr
, &vmap_lazy_nr
);
513 if (nr
|| force_flush
)
514 flush_tlb_kernel_range(*start
, *end
);
517 spin_lock(&vmap_area_lock
);
518 list_for_each_entry(va
, &valist
, purge_list
)
519 __free_vmap_area(va
);
520 spin_unlock(&vmap_area_lock
);
522 spin_unlock(&purge_lock
);
526 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
527 * is already purging.
529 static void try_purge_vmap_area_lazy(void)
531 unsigned long start
= ULONG_MAX
, end
= 0;
533 __purge_vmap_area_lazy(&start
, &end
, 0, 0);
537 * Kick off a purge of the outstanding lazy areas.
539 static void purge_vmap_area_lazy(void)
541 unsigned long start
= ULONG_MAX
, end
= 0;
543 __purge_vmap_area_lazy(&start
, &end
, 1, 0);
547 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
548 * called for the correct range previously.
550 static void free_unmap_vmap_area_noflush(struct vmap_area
*va
)
552 va
->flags
|= VM_LAZY_FREE
;
553 atomic_add((va
->va_end
- va
->va_start
) >> PAGE_SHIFT
, &vmap_lazy_nr
);
554 if (unlikely(atomic_read(&vmap_lazy_nr
) > lazy_max_pages()))
555 try_purge_vmap_area_lazy();
559 * Free and unmap a vmap area
561 static void free_unmap_vmap_area(struct vmap_area
*va
)
563 flush_cache_vunmap(va
->va_start
, va
->va_end
);
564 free_unmap_vmap_area_noflush(va
);
567 static struct vmap_area
*find_vmap_area(unsigned long addr
)
569 struct vmap_area
*va
;
571 spin_lock(&vmap_area_lock
);
572 va
= __find_vmap_area(addr
);
573 spin_unlock(&vmap_area_lock
);
578 static void free_unmap_vmap_area_addr(unsigned long addr
)
580 struct vmap_area
*va
;
582 va
= find_vmap_area(addr
);
584 free_unmap_vmap_area(va
);
588 /*** Per cpu kva allocator ***/
591 * vmap space is limited especially on 32 bit architectures. Ensure there is
592 * room for at least 16 percpu vmap blocks per CPU.
595 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
596 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
597 * instead (we just need a rough idea)
599 #if BITS_PER_LONG == 32
600 #define VMALLOC_SPACE (128UL*1024*1024)
602 #define VMALLOC_SPACE (128UL*1024*1024*1024)
605 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
606 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
607 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
608 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
609 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
610 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
611 #define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
612 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
613 VMALLOC_PAGES / NR_CPUS / 16))
615 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
617 static bool vmap_initialized __read_mostly
= false;
619 struct vmap_block_queue
{
621 struct list_head free
;
622 struct list_head dirty
;
623 unsigned int nr_dirty
;
628 struct vmap_area
*va
;
629 struct vmap_block_queue
*vbq
;
630 unsigned long free
, dirty
;
631 DECLARE_BITMAP(alloc_map
, VMAP_BBMAP_BITS
);
632 DECLARE_BITMAP(dirty_map
, VMAP_BBMAP_BITS
);
635 struct list_head free_list
;
636 struct list_head dirty_list
;
638 struct rcu_head rcu_head
;
642 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
643 static DEFINE_PER_CPU(struct vmap_block_queue
, vmap_block_queue
);
646 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
647 * in the free path. Could get rid of this if we change the API to return a
648 * "cookie" from alloc, to be passed to free. But no big deal yet.
650 static DEFINE_SPINLOCK(vmap_block_tree_lock
);
651 static RADIX_TREE(vmap_block_tree
, GFP_ATOMIC
);
654 * We should probably have a fallback mechanism to allocate virtual memory
655 * out of partially filled vmap blocks. However vmap block sizing should be
656 * fairly reasonable according to the vmalloc size, so it shouldn't be a
660 static unsigned long addr_to_vb_idx(unsigned long addr
)
662 addr
-= VMALLOC_START
& ~(VMAP_BLOCK_SIZE
-1);
663 addr
/= VMAP_BLOCK_SIZE
;
667 static struct vmap_block
*new_vmap_block(gfp_t gfp_mask
)
669 struct vmap_block_queue
*vbq
;
670 struct vmap_block
*vb
;
671 struct vmap_area
*va
;
672 unsigned long vb_idx
;
675 node
= numa_node_id();
677 vb
= kmalloc_node(sizeof(struct vmap_block
),
678 gfp_mask
& GFP_RECLAIM_MASK
, node
);
680 return ERR_PTR(-ENOMEM
);
682 va
= alloc_vmap_area(VMAP_BLOCK_SIZE
, VMAP_BLOCK_SIZE
,
683 VMALLOC_START
, VMALLOC_END
,
685 if (unlikely(IS_ERR(va
))) {
687 return ERR_PTR(PTR_ERR(va
));
690 err
= radix_tree_preload(gfp_mask
);
697 spin_lock_init(&vb
->lock
);
699 vb
->free
= VMAP_BBMAP_BITS
;
701 bitmap_zero(vb
->alloc_map
, VMAP_BBMAP_BITS
);
702 bitmap_zero(vb
->dirty_map
, VMAP_BBMAP_BITS
);
703 INIT_LIST_HEAD(&vb
->free_list
);
704 INIT_LIST_HEAD(&vb
->dirty_list
);
706 vb_idx
= addr_to_vb_idx(va
->va_start
);
707 spin_lock(&vmap_block_tree_lock
);
708 err
= radix_tree_insert(&vmap_block_tree
, vb_idx
, vb
);
709 spin_unlock(&vmap_block_tree_lock
);
711 radix_tree_preload_end();
713 vbq
= &get_cpu_var(vmap_block_queue
);
715 spin_lock(&vbq
->lock
);
716 list_add(&vb
->free_list
, &vbq
->free
);
717 spin_unlock(&vbq
->lock
);
718 put_cpu_var(vmap_cpu_blocks
);
723 static void rcu_free_vb(struct rcu_head
*head
)
725 struct vmap_block
*vb
= container_of(head
, struct vmap_block
, rcu_head
);
730 static void free_vmap_block(struct vmap_block
*vb
)
732 struct vmap_block
*tmp
;
733 unsigned long vb_idx
;
735 spin_lock(&vb
->vbq
->lock
);
736 if (!list_empty(&vb
->free_list
))
737 list_del(&vb
->free_list
);
738 if (!list_empty(&vb
->dirty_list
))
739 list_del(&vb
->dirty_list
);
740 spin_unlock(&vb
->vbq
->lock
);
742 vb_idx
= addr_to_vb_idx(vb
->va
->va_start
);
743 spin_lock(&vmap_block_tree_lock
);
744 tmp
= radix_tree_delete(&vmap_block_tree
, vb_idx
);
745 spin_unlock(&vmap_block_tree_lock
);
748 free_unmap_vmap_area_noflush(vb
->va
);
749 call_rcu(&vb
->rcu_head
, rcu_free_vb
);
752 static void *vb_alloc(unsigned long size
, gfp_t gfp_mask
)
754 struct vmap_block_queue
*vbq
;
755 struct vmap_block
*vb
;
756 unsigned long addr
= 0;
759 BUG_ON(size
& ~PAGE_MASK
);
760 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
761 order
= get_order(size
);
765 vbq
= &get_cpu_var(vmap_block_queue
);
766 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
769 spin_lock(&vb
->lock
);
770 i
= bitmap_find_free_region(vb
->alloc_map
,
771 VMAP_BBMAP_BITS
, order
);
774 addr
= vb
->va
->va_start
+ (i
<< PAGE_SHIFT
);
775 BUG_ON(addr_to_vb_idx(addr
) !=
776 addr_to_vb_idx(vb
->va
->va_start
));
777 vb
->free
-= 1UL << order
;
779 spin_lock(&vbq
->lock
);
780 list_del_init(&vb
->free_list
);
781 spin_unlock(&vbq
->lock
);
783 spin_unlock(&vb
->lock
);
786 spin_unlock(&vb
->lock
);
788 put_cpu_var(vmap_cpu_blocks
);
792 vb
= new_vmap_block(gfp_mask
);
801 static void vb_free(const void *addr
, unsigned long size
)
803 unsigned long offset
;
804 unsigned long vb_idx
;
806 struct vmap_block
*vb
;
808 BUG_ON(size
& ~PAGE_MASK
);
809 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
811 flush_cache_vunmap((unsigned long)addr
, (unsigned long)addr
+ size
);
813 order
= get_order(size
);
815 offset
= (unsigned long)addr
& (VMAP_BLOCK_SIZE
- 1);
817 vb_idx
= addr_to_vb_idx((unsigned long)addr
);
819 vb
= radix_tree_lookup(&vmap_block_tree
, vb_idx
);
823 spin_lock(&vb
->lock
);
824 bitmap_allocate_region(vb
->dirty_map
, offset
>> PAGE_SHIFT
, order
);
826 spin_lock(&vb
->vbq
->lock
);
827 list_add(&vb
->dirty_list
, &vb
->vbq
->dirty
);
828 spin_unlock(&vb
->vbq
->lock
);
830 vb
->dirty
+= 1UL << order
;
831 if (vb
->dirty
== VMAP_BBMAP_BITS
) {
832 BUG_ON(vb
->free
|| !list_empty(&vb
->free_list
));
833 spin_unlock(&vb
->lock
);
836 spin_unlock(&vb
->lock
);
840 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
842 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
843 * to amortize TLB flushing overheads. What this means is that any page you
844 * have now, may, in a former life, have been mapped into kernel virtual
845 * address by the vmap layer and so there might be some CPUs with TLB entries
846 * still referencing that page (additional to the regular 1:1 kernel mapping).
848 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
849 * be sure that none of the pages we have control over will have any aliases
850 * from the vmap layer.
852 void vm_unmap_aliases(void)
854 unsigned long start
= ULONG_MAX
, end
= 0;
858 if (unlikely(!vmap_initialized
))
861 for_each_possible_cpu(cpu
) {
862 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
863 struct vmap_block
*vb
;
866 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
869 spin_lock(&vb
->lock
);
870 i
= find_first_bit(vb
->dirty_map
, VMAP_BBMAP_BITS
);
871 while (i
< VMAP_BBMAP_BITS
) {
874 j
= find_next_zero_bit(vb
->dirty_map
,
877 s
= vb
->va
->va_start
+ (i
<< PAGE_SHIFT
);
878 e
= vb
->va
->va_start
+ (j
<< PAGE_SHIFT
);
879 vunmap_page_range(s
, e
);
888 i
= find_next_bit(vb
->dirty_map
,
891 spin_unlock(&vb
->lock
);
896 __purge_vmap_area_lazy(&start
, &end
, 1, flush
);
898 EXPORT_SYMBOL_GPL(vm_unmap_aliases
);
901 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
902 * @mem: the pointer returned by vm_map_ram
903 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
905 void vm_unmap_ram(const void *mem
, unsigned int count
)
907 unsigned long size
= count
<< PAGE_SHIFT
;
908 unsigned long addr
= (unsigned long)mem
;
911 BUG_ON(addr
< VMALLOC_START
);
912 BUG_ON(addr
> VMALLOC_END
);
913 BUG_ON(addr
& (PAGE_SIZE
-1));
915 debug_check_no_locks_freed(mem
, size
);
917 if (likely(count
<= VMAP_MAX_ALLOC
))
920 free_unmap_vmap_area_addr(addr
);
922 EXPORT_SYMBOL(vm_unmap_ram
);
925 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
926 * @pages: an array of pointers to the pages to be mapped
927 * @count: number of pages
928 * @node: prefer to allocate data structures on this node
929 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
931 * Returns: a pointer to the address that has been mapped, or %NULL on failure
933 void *vm_map_ram(struct page
**pages
, unsigned int count
, int node
, pgprot_t prot
)
935 unsigned long size
= count
<< PAGE_SHIFT
;
939 if (likely(count
<= VMAP_MAX_ALLOC
)) {
940 mem
= vb_alloc(size
, GFP_KERNEL
);
943 addr
= (unsigned long)mem
;
945 struct vmap_area
*va
;
946 va
= alloc_vmap_area(size
, PAGE_SIZE
,
947 VMALLOC_START
, VMALLOC_END
, node
, GFP_KERNEL
);
954 if (vmap_page_range(addr
, addr
+ size
, prot
, pages
) < 0) {
955 vm_unmap_ram(mem
, count
);
960 EXPORT_SYMBOL(vm_map_ram
);
962 void __init
vmalloc_init(void)
966 for_each_possible_cpu(i
) {
967 struct vmap_block_queue
*vbq
;
969 vbq
= &per_cpu(vmap_block_queue
, i
);
970 spin_lock_init(&vbq
->lock
);
971 INIT_LIST_HEAD(&vbq
->free
);
972 INIT_LIST_HEAD(&vbq
->dirty
);
976 vmap_initialized
= true;
979 void unmap_kernel_range(unsigned long addr
, unsigned long size
)
981 unsigned long end
= addr
+ size
;
982 vunmap_page_range(addr
, end
);
983 flush_tlb_kernel_range(addr
, end
);
986 int map_vm_area(struct vm_struct
*area
, pgprot_t prot
, struct page
***pages
)
988 unsigned long addr
= (unsigned long)area
->addr
;
989 unsigned long end
= addr
+ area
->size
- PAGE_SIZE
;
992 err
= vmap_page_range(addr
, end
, prot
, *pages
);
1000 EXPORT_SYMBOL_GPL(map_vm_area
);
1002 /*** Old vmalloc interfaces ***/
1003 DEFINE_RWLOCK(vmlist_lock
);
1004 struct vm_struct
*vmlist
;
1006 static struct vm_struct
*__get_vm_area_node(unsigned long size
,
1007 unsigned long flags
, unsigned long start
, unsigned long end
,
1008 int node
, gfp_t gfp_mask
, void *caller
)
1010 static struct vmap_area
*va
;
1011 struct vm_struct
*area
;
1012 struct vm_struct
*tmp
, **p
;
1013 unsigned long align
= 1;
1015 BUG_ON(in_interrupt());
1016 if (flags
& VM_IOREMAP
) {
1017 int bit
= fls(size
);
1019 if (bit
> IOREMAP_MAX_ORDER
)
1020 bit
= IOREMAP_MAX_ORDER
;
1021 else if (bit
< PAGE_SHIFT
)
1027 size
= PAGE_ALIGN(size
);
1028 if (unlikely(!size
))
1031 area
= kmalloc_node(sizeof(*area
), gfp_mask
& GFP_RECLAIM_MASK
, node
);
1032 if (unlikely(!area
))
1036 * We always allocate a guard page.
1040 va
= alloc_vmap_area(size
, align
, start
, end
, node
, gfp_mask
);
1046 area
->flags
= flags
;
1047 area
->addr
= (void *)va
->va_start
;
1051 area
->phys_addr
= 0;
1052 area
->caller
= caller
;
1054 va
->flags
|= VM_VM_AREA
;
1056 write_lock(&vmlist_lock
);
1057 for (p
= &vmlist
; (tmp
= *p
) != NULL
; p
= &tmp
->next
) {
1058 if (tmp
->addr
>= area
->addr
)
1063 write_unlock(&vmlist_lock
);
1068 struct vm_struct
*__get_vm_area(unsigned long size
, unsigned long flags
,
1069 unsigned long start
, unsigned long end
)
1071 return __get_vm_area_node(size
, flags
, start
, end
, -1, GFP_KERNEL
,
1072 __builtin_return_address(0));
1074 EXPORT_SYMBOL_GPL(__get_vm_area
);
1077 * get_vm_area - reserve a contiguous kernel virtual area
1078 * @size: size of the area
1079 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1081 * Search an area of @size in the kernel virtual mapping area,
1082 * and reserved it for out purposes. Returns the area descriptor
1083 * on success or %NULL on failure.
1085 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
1087 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
,
1088 -1, GFP_KERNEL
, __builtin_return_address(0));
1091 struct vm_struct
*get_vm_area_caller(unsigned long size
, unsigned long flags
,
1094 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
,
1095 -1, GFP_KERNEL
, caller
);
1098 struct vm_struct
*get_vm_area_node(unsigned long size
, unsigned long flags
,
1099 int node
, gfp_t gfp_mask
)
1101 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
, node
,
1102 gfp_mask
, __builtin_return_address(0));
1105 static struct vm_struct
*find_vm_area(const void *addr
)
1107 struct vmap_area
*va
;
1109 va
= find_vmap_area((unsigned long)addr
);
1110 if (va
&& va
->flags
& VM_VM_AREA
)
1117 * remove_vm_area - find and remove a continuous kernel virtual area
1118 * @addr: base address
1120 * Search for the kernel VM area starting at @addr, and remove it.
1121 * This function returns the found VM area, but using it is NOT safe
1122 * on SMP machines, except for its size or flags.
1124 struct vm_struct
*remove_vm_area(const void *addr
)
1126 struct vmap_area
*va
;
1128 va
= find_vmap_area((unsigned long)addr
);
1129 if (va
&& va
->flags
& VM_VM_AREA
) {
1130 struct vm_struct
*vm
= va
->private;
1131 struct vm_struct
*tmp
, **p
;
1132 free_unmap_vmap_area(va
);
1133 vm
->size
-= PAGE_SIZE
;
1135 write_lock(&vmlist_lock
);
1136 for (p
= &vmlist
; (tmp
= *p
) != vm
; p
= &tmp
->next
)
1139 write_unlock(&vmlist_lock
);
1146 static void __vunmap(const void *addr
, int deallocate_pages
)
1148 struct vm_struct
*area
;
1153 if ((PAGE_SIZE
-1) & (unsigned long)addr
) {
1154 WARN(1, KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
1158 area
= remove_vm_area(addr
);
1159 if (unlikely(!area
)) {
1160 WARN(1, KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
1165 debug_check_no_locks_freed(addr
, area
->size
);
1166 debug_check_no_obj_freed(addr
, area
->size
);
1168 if (deallocate_pages
) {
1171 for (i
= 0; i
< area
->nr_pages
; i
++) {
1172 struct page
*page
= area
->pages
[i
];
1178 if (area
->flags
& VM_VPAGES
)
1189 * vfree - release memory allocated by vmalloc()
1190 * @addr: memory base address
1192 * Free the virtually continuous memory area starting at @addr, as
1193 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1194 * NULL, no operation is performed.
1196 * Must not be called in interrupt context.
1198 void vfree(const void *addr
)
1200 BUG_ON(in_interrupt());
1203 EXPORT_SYMBOL(vfree
);
1206 * vunmap - release virtual mapping obtained by vmap()
1207 * @addr: memory base address
1209 * Free the virtually contiguous memory area starting at @addr,
1210 * which was created from the page array passed to vmap().
1212 * Must not be called in interrupt context.
1214 void vunmap(const void *addr
)
1216 BUG_ON(in_interrupt());
1219 EXPORT_SYMBOL(vunmap
);
1222 * vmap - map an array of pages into virtually contiguous space
1223 * @pages: array of page pointers
1224 * @count: number of pages to map
1225 * @flags: vm_area->flags
1226 * @prot: page protection for the mapping
1228 * Maps @count pages from @pages into contiguous kernel virtual
1231 void *vmap(struct page
**pages
, unsigned int count
,
1232 unsigned long flags
, pgprot_t prot
)
1234 struct vm_struct
*area
;
1236 if (count
> num_physpages
)
1239 area
= get_vm_area_caller((count
<< PAGE_SHIFT
), flags
,
1240 __builtin_return_address(0));
1244 if (map_vm_area(area
, prot
, &pages
)) {
1251 EXPORT_SYMBOL(vmap
);
1253 static void *__vmalloc_node(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
,
1254 int node
, void *caller
);
1255 static void *__vmalloc_area_node(struct vm_struct
*area
, gfp_t gfp_mask
,
1256 pgprot_t prot
, int node
, void *caller
)
1258 struct page
**pages
;
1259 unsigned int nr_pages
, array_size
, i
;
1261 nr_pages
= (area
->size
- PAGE_SIZE
) >> PAGE_SHIFT
;
1262 array_size
= (nr_pages
* sizeof(struct page
*));
1264 area
->nr_pages
= nr_pages
;
1265 /* Please note that the recursion is strictly bounded. */
1266 if (array_size
> PAGE_SIZE
) {
1267 pages
= __vmalloc_node(array_size
, gfp_mask
| __GFP_ZERO
,
1268 PAGE_KERNEL
, node
, caller
);
1269 area
->flags
|= VM_VPAGES
;
1271 pages
= kmalloc_node(array_size
,
1272 (gfp_mask
& GFP_RECLAIM_MASK
) | __GFP_ZERO
,
1275 area
->pages
= pages
;
1276 area
->caller
= caller
;
1278 remove_vm_area(area
->addr
);
1283 for (i
= 0; i
< area
->nr_pages
; i
++) {
1287 page
= alloc_page(gfp_mask
);
1289 page
= alloc_pages_node(node
, gfp_mask
, 0);
1291 if (unlikely(!page
)) {
1292 /* Successfully allocated i pages, free them in __vunmap() */
1296 area
->pages
[i
] = page
;
1299 if (map_vm_area(area
, prot
, &pages
))
1308 void *__vmalloc_area(struct vm_struct
*area
, gfp_t gfp_mask
, pgprot_t prot
)
1310 return __vmalloc_area_node(area
, gfp_mask
, prot
, -1,
1311 __builtin_return_address(0));
1315 * __vmalloc_node - allocate virtually contiguous memory
1316 * @size: allocation size
1317 * @gfp_mask: flags for the page level allocator
1318 * @prot: protection mask for the allocated pages
1319 * @node: node to use for allocation or -1
1320 * @caller: caller's return address
1322 * Allocate enough pages to cover @size from the page level
1323 * allocator with @gfp_mask flags. Map them into contiguous
1324 * kernel virtual space, using a pagetable protection of @prot.
1326 static void *__vmalloc_node(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
,
1327 int node
, void *caller
)
1329 struct vm_struct
*area
;
1331 size
= PAGE_ALIGN(size
);
1332 if (!size
|| (size
>> PAGE_SHIFT
) > num_physpages
)
1335 area
= __get_vm_area_node(size
, VM_ALLOC
, VMALLOC_START
, VMALLOC_END
,
1336 node
, gfp_mask
, caller
);
1341 return __vmalloc_area_node(area
, gfp_mask
, prot
, node
, caller
);
1344 void *__vmalloc(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
)
1346 return __vmalloc_node(size
, gfp_mask
, prot
, -1,
1347 __builtin_return_address(0));
1349 EXPORT_SYMBOL(__vmalloc
);
1352 * vmalloc - allocate virtually contiguous memory
1353 * @size: allocation size
1354 * Allocate enough pages to cover @size from the page level
1355 * allocator and map them into contiguous kernel virtual space.
1357 * For tight control over page level allocator and protection flags
1358 * use __vmalloc() instead.
1360 void *vmalloc(unsigned long size
)
1362 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
,
1363 -1, __builtin_return_address(0));
1365 EXPORT_SYMBOL(vmalloc
);
1368 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1369 * @size: allocation size
1371 * The resulting memory area is zeroed so it can be mapped to userspace
1372 * without leaking data.
1374 void *vmalloc_user(unsigned long size
)
1376 struct vm_struct
*area
;
1379 ret
= __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
,
1380 PAGE_KERNEL
, -1, __builtin_return_address(0));
1382 area
= find_vm_area(ret
);
1383 area
->flags
|= VM_USERMAP
;
1387 EXPORT_SYMBOL(vmalloc_user
);
1390 * vmalloc_node - allocate memory on a specific node
1391 * @size: allocation size
1394 * Allocate enough pages to cover @size from the page level
1395 * allocator and map them into contiguous kernel virtual space.
1397 * For tight control over page level allocator and protection flags
1398 * use __vmalloc() instead.
1400 void *vmalloc_node(unsigned long size
, int node
)
1402 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
,
1403 node
, __builtin_return_address(0));
1405 EXPORT_SYMBOL(vmalloc_node
);
1407 #ifndef PAGE_KERNEL_EXEC
1408 # define PAGE_KERNEL_EXEC PAGE_KERNEL
1412 * vmalloc_exec - allocate virtually contiguous, executable memory
1413 * @size: allocation size
1415 * Kernel-internal function to allocate enough pages to cover @size
1416 * the page level allocator and map them into contiguous and
1417 * executable kernel virtual space.
1419 * For tight control over page level allocator and protection flags
1420 * use __vmalloc() instead.
1423 void *vmalloc_exec(unsigned long size
)
1425 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL_EXEC
,
1426 -1, __builtin_return_address(0));
1429 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1430 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1431 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1432 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1434 #define GFP_VMALLOC32 GFP_KERNEL
1438 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1439 * @size: allocation size
1441 * Allocate enough 32bit PA addressable pages to cover @size from the
1442 * page level allocator and map them into contiguous kernel virtual space.
1444 void *vmalloc_32(unsigned long size
)
1446 return __vmalloc_node(size
, GFP_VMALLOC32
, PAGE_KERNEL
,
1447 -1, __builtin_return_address(0));
1449 EXPORT_SYMBOL(vmalloc_32
);
1452 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1453 * @size: allocation size
1455 * The resulting memory area is 32bit addressable and zeroed so it can be
1456 * mapped to userspace without leaking data.
1458 void *vmalloc_32_user(unsigned long size
)
1460 struct vm_struct
*area
;
1463 ret
= __vmalloc_node(size
, GFP_VMALLOC32
| __GFP_ZERO
, PAGE_KERNEL
,
1464 -1, __builtin_return_address(0));
1466 area
= find_vm_area(ret
);
1467 area
->flags
|= VM_USERMAP
;
1471 EXPORT_SYMBOL(vmalloc_32_user
);
1473 long vread(char *buf
, char *addr
, unsigned long count
)
1475 struct vm_struct
*tmp
;
1476 char *vaddr
, *buf_start
= buf
;
1479 /* Don't allow overflow */
1480 if ((unsigned long) addr
+ count
< count
)
1481 count
= -(unsigned long) addr
;
1483 read_lock(&vmlist_lock
);
1484 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
1485 vaddr
= (char *) tmp
->addr
;
1486 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
1488 while (addr
< vaddr
) {
1496 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
1507 read_unlock(&vmlist_lock
);
1508 return buf
- buf_start
;
1511 long vwrite(char *buf
, char *addr
, unsigned long count
)
1513 struct vm_struct
*tmp
;
1514 char *vaddr
, *buf_start
= buf
;
1517 /* Don't allow overflow */
1518 if ((unsigned long) addr
+ count
< count
)
1519 count
= -(unsigned long) addr
;
1521 read_lock(&vmlist_lock
);
1522 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
1523 vaddr
= (char *) tmp
->addr
;
1524 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
1526 while (addr
< vaddr
) {
1533 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
1544 read_unlock(&vmlist_lock
);
1545 return buf
- buf_start
;
1549 * remap_vmalloc_range - map vmalloc pages to userspace
1550 * @vma: vma to cover (map full range of vma)
1551 * @addr: vmalloc memory
1552 * @pgoff: number of pages into addr before first page to map
1554 * Returns: 0 for success, -Exxx on failure
1556 * This function checks that addr is a valid vmalloc'ed area, and
1557 * that it is big enough to cover the vma. Will return failure if
1558 * that criteria isn't met.
1560 * Similar to remap_pfn_range() (see mm/memory.c)
1562 int remap_vmalloc_range(struct vm_area_struct
*vma
, void *addr
,
1563 unsigned long pgoff
)
1565 struct vm_struct
*area
;
1566 unsigned long uaddr
= vma
->vm_start
;
1567 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
1569 if ((PAGE_SIZE
-1) & (unsigned long)addr
)
1572 area
= find_vm_area(addr
);
1576 if (!(area
->flags
& VM_USERMAP
))
1579 if (usize
+ (pgoff
<< PAGE_SHIFT
) > area
->size
- PAGE_SIZE
)
1582 addr
+= pgoff
<< PAGE_SHIFT
;
1584 struct page
*page
= vmalloc_to_page(addr
);
1587 ret
= vm_insert_page(vma
, uaddr
, page
);
1594 } while (usize
> 0);
1596 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
1597 vma
->vm_flags
|= VM_RESERVED
;
1601 EXPORT_SYMBOL(remap_vmalloc_range
);
1604 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
1607 void __attribute__((weak
)) vmalloc_sync_all(void)
1612 static int f(pte_t
*pte
, pgtable_t table
, unsigned long addr
, void *data
)
1614 /* apply_to_page_range() does all the hard work. */
1619 * alloc_vm_area - allocate a range of kernel address space
1620 * @size: size of the area
1622 * Returns: NULL on failure, vm_struct on success
1624 * This function reserves a range of kernel address space, and
1625 * allocates pagetables to map that range. No actual mappings
1626 * are created. If the kernel address space is not shared
1627 * between processes, it syncs the pagetable across all
1630 struct vm_struct
*alloc_vm_area(size_t size
)
1632 struct vm_struct
*area
;
1634 area
= get_vm_area_caller(size
, VM_IOREMAP
,
1635 __builtin_return_address(0));
1640 * This ensures that page tables are constructed for this region
1641 * of kernel virtual address space and mapped into init_mm.
1643 if (apply_to_page_range(&init_mm
, (unsigned long)area
->addr
,
1644 area
->size
, f
, NULL
)) {
1649 /* Make sure the pagetables are constructed in process kernel
1655 EXPORT_SYMBOL_GPL(alloc_vm_area
);
1657 void free_vm_area(struct vm_struct
*area
)
1659 struct vm_struct
*ret
;
1660 ret
= remove_vm_area(area
->addr
);
1661 BUG_ON(ret
!= area
);
1664 EXPORT_SYMBOL_GPL(free_vm_area
);
1667 #ifdef CONFIG_PROC_FS
1668 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
1671 struct vm_struct
*v
;
1673 read_lock(&vmlist_lock
);
1675 while (n
> 0 && v
) {
1686 static void *s_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1688 struct vm_struct
*v
= p
;
1694 static void s_stop(struct seq_file
*m
, void *p
)
1696 read_unlock(&vmlist_lock
);
1699 static void show_numa_info(struct seq_file
*m
, struct vm_struct
*v
)
1702 unsigned int nr
, *counters
= m
->private;
1707 memset(counters
, 0, nr_node_ids
* sizeof(unsigned int));
1709 for (nr
= 0; nr
< v
->nr_pages
; nr
++)
1710 counters
[page_to_nid(v
->pages
[nr
])]++;
1712 for_each_node_state(nr
, N_HIGH_MEMORY
)
1714 seq_printf(m
, " N%u=%u", nr
, counters
[nr
]);
1718 static int s_show(struct seq_file
*m
, void *p
)
1720 struct vm_struct
*v
= p
;
1722 seq_printf(m
, "0x%p-0x%p %7ld",
1723 v
->addr
, v
->addr
+ v
->size
, v
->size
);
1726 char buff
[KSYM_SYMBOL_LEN
];
1729 sprint_symbol(buff
, (unsigned long)v
->caller
);
1734 seq_printf(m
, " pages=%d", v
->nr_pages
);
1737 seq_printf(m
, " phys=%lx", v
->phys_addr
);
1739 if (v
->flags
& VM_IOREMAP
)
1740 seq_printf(m
, " ioremap");
1742 if (v
->flags
& VM_ALLOC
)
1743 seq_printf(m
, " vmalloc");
1745 if (v
->flags
& VM_MAP
)
1746 seq_printf(m
, " vmap");
1748 if (v
->flags
& VM_USERMAP
)
1749 seq_printf(m
, " user");
1751 if (v
->flags
& VM_VPAGES
)
1752 seq_printf(m
, " vpages");
1754 show_numa_info(m
, v
);
1759 static const struct seq_operations vmalloc_op
= {
1766 static int vmalloc_open(struct inode
*inode
, struct file
*file
)
1768 unsigned int *ptr
= NULL
;
1772 ptr
= kmalloc(nr_node_ids
* sizeof(unsigned int), GFP_KERNEL
);
1773 ret
= seq_open(file
, &vmalloc_op
);
1775 struct seq_file
*m
= file
->private_data
;
1782 static const struct file_operations proc_vmalloc_operations
= {
1783 .open
= vmalloc_open
,
1785 .llseek
= seq_lseek
,
1786 .release
= seq_release_private
,
1789 static int __init
proc_vmalloc_init(void)
1791 proc_create("vmallocinfo", S_IRUSR
, NULL
, &proc_vmalloc_operations
);
1794 module_init(proc_vmalloc_init
);