4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
12 #include <linux/module.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugobjects.h>
19 #include <linux/vmalloc.h>
20 #include <linux/kallsyms.h>
22 #include <asm/uaccess.h>
23 #include <asm/tlbflush.h>
26 DEFINE_RWLOCK(vmlist_lock
);
27 struct vm_struct
*vmlist
;
29 static void *__vmalloc_node(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
,
30 int node
, void *caller
);
32 static void vunmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
)
36 pte
= pte_offset_kernel(pmd
, addr
);
38 pte_t ptent
= ptep_get_and_clear(&init_mm
, addr
, pte
);
39 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
40 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
43 static inline void vunmap_pmd_range(pud_t
*pud
, unsigned long addr
,
49 pmd
= pmd_offset(pud
, addr
);
51 next
= pmd_addr_end(addr
, end
);
52 if (pmd_none_or_clear_bad(pmd
))
54 vunmap_pte_range(pmd
, addr
, next
);
55 } while (pmd
++, addr
= next
, addr
!= end
);
58 static inline void vunmap_pud_range(pgd_t
*pgd
, unsigned long addr
,
64 pud
= pud_offset(pgd
, addr
);
66 next
= pud_addr_end(addr
, end
);
67 if (pud_none_or_clear_bad(pud
))
69 vunmap_pmd_range(pud
, addr
, next
);
70 } while (pud
++, addr
= next
, addr
!= end
);
73 void unmap_kernel_range(unsigned long addr
, unsigned long size
)
77 unsigned long start
= addr
;
78 unsigned long end
= addr
+ size
;
81 pgd
= pgd_offset_k(addr
);
82 flush_cache_vunmap(addr
, end
);
84 next
= pgd_addr_end(addr
, end
);
85 if (pgd_none_or_clear_bad(pgd
))
87 vunmap_pud_range(pgd
, addr
, next
);
88 } while (pgd
++, addr
= next
, addr
!= end
);
89 flush_tlb_kernel_range(start
, end
);
92 static void unmap_vm_area(struct vm_struct
*area
)
94 unmap_kernel_range((unsigned long)area
->addr
, area
->size
);
97 static int vmap_pte_range(pmd_t
*pmd
, unsigned long addr
,
98 unsigned long end
, pgprot_t prot
, struct page
***pages
)
102 pte
= pte_alloc_kernel(pmd
, addr
);
106 struct page
*page
= **pages
;
107 WARN_ON(!pte_none(*pte
));
110 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, prot
));
112 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
116 static inline int vmap_pmd_range(pud_t
*pud
, unsigned long addr
,
117 unsigned long end
, pgprot_t prot
, struct page
***pages
)
122 pmd
= pmd_alloc(&init_mm
, pud
, addr
);
126 next
= pmd_addr_end(addr
, end
);
127 if (vmap_pte_range(pmd
, addr
, next
, prot
, pages
))
129 } while (pmd
++, addr
= next
, addr
!= end
);
133 static inline int vmap_pud_range(pgd_t
*pgd
, unsigned long addr
,
134 unsigned long end
, pgprot_t prot
, struct page
***pages
)
139 pud
= pud_alloc(&init_mm
, pgd
, addr
);
143 next
= pud_addr_end(addr
, end
);
144 if (vmap_pmd_range(pud
, addr
, next
, prot
, pages
))
146 } while (pud
++, addr
= next
, addr
!= end
);
150 int map_vm_area(struct vm_struct
*area
, pgprot_t prot
, struct page
***pages
)
154 unsigned long addr
= (unsigned long) area
->addr
;
155 unsigned long end
= addr
+ area
->size
- PAGE_SIZE
;
159 pgd
= pgd_offset_k(addr
);
161 next
= pgd_addr_end(addr
, end
);
162 err
= vmap_pud_range(pgd
, addr
, next
, prot
, pages
);
165 } while (pgd
++, addr
= next
, addr
!= end
);
166 flush_cache_vmap((unsigned long) area
->addr
, end
);
169 EXPORT_SYMBOL_GPL(map_vm_area
);
172 * Map a vmalloc()-space virtual address to the physical page.
174 struct page
*vmalloc_to_page(const void *vmalloc_addr
)
176 unsigned long addr
= (unsigned long) vmalloc_addr
;
177 struct page
*page
= NULL
;
178 pgd_t
*pgd
= pgd_offset_k(addr
);
184 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
185 * architectures that do not vmalloc module space
187 VIRTUAL_BUG_ON(!is_vmalloc_addr(vmalloc_addr
) &&
188 !is_module_address(addr
));
190 if (!pgd_none(*pgd
)) {
191 pud
= pud_offset(pgd
, addr
);
192 if (!pud_none(*pud
)) {
193 pmd
= pmd_offset(pud
, addr
);
194 if (!pmd_none(*pmd
)) {
195 ptep
= pte_offset_map(pmd
, addr
);
197 if (pte_present(pte
))
198 page
= pte_page(pte
);
205 EXPORT_SYMBOL(vmalloc_to_page
);
208 * Map a vmalloc()-space virtual address to the physical page frame number.
210 unsigned long vmalloc_to_pfn(const void *vmalloc_addr
)
212 return page_to_pfn(vmalloc_to_page(vmalloc_addr
));
214 EXPORT_SYMBOL(vmalloc_to_pfn
);
216 static struct vm_struct
*
217 __get_vm_area_node(unsigned long size
, unsigned long flags
, unsigned long start
,
218 unsigned long end
, int node
, gfp_t gfp_mask
, void *caller
)
220 struct vm_struct
**p
, *tmp
, *area
;
221 unsigned long align
= 1;
224 BUG_ON(in_interrupt());
225 if (flags
& VM_IOREMAP
) {
228 if (bit
> IOREMAP_MAX_ORDER
)
229 bit
= IOREMAP_MAX_ORDER
;
230 else if (bit
< PAGE_SHIFT
)
235 addr
= ALIGN(start
, align
);
236 size
= PAGE_ALIGN(size
);
240 area
= kmalloc_node(sizeof(*area
), gfp_mask
& GFP_RECLAIM_MASK
, node
);
246 * We always allocate a guard page.
250 write_lock(&vmlist_lock
);
251 for (p
= &vmlist
; (tmp
= *p
) != NULL
;p
= &tmp
->next
) {
252 if ((unsigned long)tmp
->addr
< addr
) {
253 if((unsigned long)tmp
->addr
+ tmp
->size
>= addr
)
254 addr
= ALIGN(tmp
->size
+
255 (unsigned long)tmp
->addr
, align
);
258 if ((size
+ addr
) < addr
)
260 if (size
+ addr
<= (unsigned long)tmp
->addr
)
262 addr
= ALIGN(tmp
->size
+ (unsigned long)tmp
->addr
, align
);
263 if (addr
> end
- size
)
266 if ((size
+ addr
) < addr
)
268 if (addr
> end
- size
)
276 area
->addr
= (void *)addr
;
281 area
->caller
= caller
;
282 write_unlock(&vmlist_lock
);
287 write_unlock(&vmlist_lock
);
289 if (printk_ratelimit())
290 printk(KERN_WARNING
"allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
294 struct vm_struct
*__get_vm_area(unsigned long size
, unsigned long flags
,
295 unsigned long start
, unsigned long end
)
297 return __get_vm_area_node(size
, flags
, start
, end
, -1, GFP_KERNEL
,
298 __builtin_return_address(0));
300 EXPORT_SYMBOL_GPL(__get_vm_area
);
303 * get_vm_area - reserve a contiguous kernel virtual area
304 * @size: size of the area
305 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
307 * Search an area of @size in the kernel virtual mapping area,
308 * and reserved it for out purposes. Returns the area descriptor
309 * on success or %NULL on failure.
311 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
313 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
,
314 -1, GFP_KERNEL
, __builtin_return_address(0));
317 struct vm_struct
*get_vm_area_caller(unsigned long size
, unsigned long flags
,
320 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
,
321 -1, GFP_KERNEL
, caller
);
324 struct vm_struct
*get_vm_area_node(unsigned long size
, unsigned long flags
,
325 int node
, gfp_t gfp_mask
)
327 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
, node
,
328 gfp_mask
, __builtin_return_address(0));
331 /* Caller must hold vmlist_lock */
332 static struct vm_struct
*__find_vm_area(const void *addr
)
334 struct vm_struct
*tmp
;
336 for (tmp
= vmlist
; tmp
!= NULL
; tmp
= tmp
->next
) {
337 if (tmp
->addr
== addr
)
344 /* Caller must hold vmlist_lock */
345 static struct vm_struct
*__remove_vm_area(const void *addr
)
347 struct vm_struct
**p
, *tmp
;
349 for (p
= &vmlist
; (tmp
= *p
) != NULL
;p
= &tmp
->next
) {
350 if (tmp
->addr
== addr
)
360 * Remove the guard page.
362 tmp
->size
-= PAGE_SIZE
;
367 * remove_vm_area - find and remove a continuous kernel virtual area
368 * @addr: base address
370 * Search for the kernel VM area starting at @addr, and remove it.
371 * This function returns the found VM area, but using it is NOT safe
372 * on SMP machines, except for its size or flags.
374 struct vm_struct
*remove_vm_area(const void *addr
)
377 write_lock(&vmlist_lock
);
378 v
= __remove_vm_area(addr
);
379 write_unlock(&vmlist_lock
);
383 static void __vunmap(const void *addr
, int deallocate_pages
)
385 struct vm_struct
*area
;
390 if ((PAGE_SIZE
-1) & (unsigned long)addr
) {
391 WARN(1, KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
395 area
= remove_vm_area(addr
);
396 if (unlikely(!area
)) {
397 WARN(1, KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
402 debug_check_no_locks_freed(addr
, area
->size
);
403 debug_check_no_obj_freed(addr
, area
->size
);
405 if (deallocate_pages
) {
408 for (i
= 0; i
< area
->nr_pages
; i
++) {
409 struct page
*page
= area
->pages
[i
];
415 if (area
->flags
& VM_VPAGES
)
426 * vfree - release memory allocated by vmalloc()
427 * @addr: memory base address
429 * Free the virtually continuous memory area starting at @addr, as
430 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
431 * NULL, no operation is performed.
433 * Must not be called in interrupt context.
435 void vfree(const void *addr
)
437 BUG_ON(in_interrupt());
440 EXPORT_SYMBOL(vfree
);
443 * vunmap - release virtual mapping obtained by vmap()
444 * @addr: memory base address
446 * Free the virtually contiguous memory area starting at @addr,
447 * which was created from the page array passed to vmap().
449 * Must not be called in interrupt context.
451 void vunmap(const void *addr
)
453 BUG_ON(in_interrupt());
456 EXPORT_SYMBOL(vunmap
);
459 * vmap - map an array of pages into virtually contiguous space
460 * @pages: array of page pointers
461 * @count: number of pages to map
462 * @flags: vm_area->flags
463 * @prot: page protection for the mapping
465 * Maps @count pages from @pages into contiguous kernel virtual
468 void *vmap(struct page
**pages
, unsigned int count
,
469 unsigned long flags
, pgprot_t prot
)
471 struct vm_struct
*area
;
473 if (count
> num_physpages
)
476 area
= get_vm_area_caller((count
<< PAGE_SHIFT
), flags
,
477 __builtin_return_address(0));
481 if (map_vm_area(area
, prot
, &pages
)) {
490 static void *__vmalloc_area_node(struct vm_struct
*area
, gfp_t gfp_mask
,
491 pgprot_t prot
, int node
, void *caller
)
494 unsigned int nr_pages
, array_size
, i
;
496 nr_pages
= (area
->size
- PAGE_SIZE
) >> PAGE_SHIFT
;
497 array_size
= (nr_pages
* sizeof(struct page
*));
499 area
->nr_pages
= nr_pages
;
500 /* Please note that the recursion is strictly bounded. */
501 if (array_size
> PAGE_SIZE
) {
502 pages
= __vmalloc_node(array_size
, gfp_mask
| __GFP_ZERO
,
503 PAGE_KERNEL
, node
, caller
);
504 area
->flags
|= VM_VPAGES
;
506 pages
= kmalloc_node(array_size
,
507 (gfp_mask
& GFP_RECLAIM_MASK
) | __GFP_ZERO
,
511 area
->caller
= caller
;
513 remove_vm_area(area
->addr
);
518 for (i
= 0; i
< area
->nr_pages
; i
++) {
522 page
= alloc_page(gfp_mask
);
524 page
= alloc_pages_node(node
, gfp_mask
, 0);
526 if (unlikely(!page
)) {
527 /* Successfully allocated i pages, free them in __vunmap() */
531 area
->pages
[i
] = page
;
534 if (map_vm_area(area
, prot
, &pages
))
543 void *__vmalloc_area(struct vm_struct
*area
, gfp_t gfp_mask
, pgprot_t prot
)
545 return __vmalloc_area_node(area
, gfp_mask
, prot
, -1,
546 __builtin_return_address(0));
550 * __vmalloc_node - allocate virtually contiguous memory
551 * @size: allocation size
552 * @gfp_mask: flags for the page level allocator
553 * @prot: protection mask for the allocated pages
554 * @node: node to use for allocation or -1
555 * @caller: caller's return address
557 * Allocate enough pages to cover @size from the page level
558 * allocator with @gfp_mask flags. Map them into contiguous
559 * kernel virtual space, using a pagetable protection of @prot.
561 static void *__vmalloc_node(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
,
562 int node
, void *caller
)
564 struct vm_struct
*area
;
566 size
= PAGE_ALIGN(size
);
567 if (!size
|| (size
>> PAGE_SHIFT
) > num_physpages
)
570 area
= __get_vm_area_node(size
, VM_ALLOC
, VMALLOC_START
, VMALLOC_END
,
571 node
, gfp_mask
, caller
);
576 return __vmalloc_area_node(area
, gfp_mask
, prot
, node
, caller
);
579 void *__vmalloc(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
)
581 return __vmalloc_node(size
, gfp_mask
, prot
, -1,
582 __builtin_return_address(0));
584 EXPORT_SYMBOL(__vmalloc
);
587 * vmalloc - allocate virtually contiguous memory
588 * @size: allocation size
589 * Allocate enough pages to cover @size from the page level
590 * allocator and map them into contiguous kernel virtual space.
592 * For tight control over page level allocator and protection flags
593 * use __vmalloc() instead.
595 void *vmalloc(unsigned long size
)
597 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
,
598 -1, __builtin_return_address(0));
600 EXPORT_SYMBOL(vmalloc
);
603 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
604 * @size: allocation size
606 * The resulting memory area is zeroed so it can be mapped to userspace
607 * without leaking data.
609 void *vmalloc_user(unsigned long size
)
611 struct vm_struct
*area
;
614 ret
= __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
, PAGE_KERNEL
);
616 write_lock(&vmlist_lock
);
617 area
= __find_vm_area(ret
);
618 area
->flags
|= VM_USERMAP
;
619 write_unlock(&vmlist_lock
);
623 EXPORT_SYMBOL(vmalloc_user
);
626 * vmalloc_node - allocate memory on a specific node
627 * @size: allocation size
630 * Allocate enough pages to cover @size from the page level
631 * allocator and map them into contiguous kernel virtual space.
633 * For tight control over page level allocator and protection flags
634 * use __vmalloc() instead.
636 void *vmalloc_node(unsigned long size
, int node
)
638 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
,
639 node
, __builtin_return_address(0));
641 EXPORT_SYMBOL(vmalloc_node
);
643 #ifndef PAGE_KERNEL_EXEC
644 # define PAGE_KERNEL_EXEC PAGE_KERNEL
648 * vmalloc_exec - allocate virtually contiguous, executable memory
649 * @size: allocation size
651 * Kernel-internal function to allocate enough pages to cover @size
652 * the page level allocator and map them into contiguous and
653 * executable kernel virtual space.
655 * For tight control over page level allocator and protection flags
656 * use __vmalloc() instead.
659 void *vmalloc_exec(unsigned long size
)
661 return __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL_EXEC
);
664 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
665 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
666 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
667 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
669 #define GFP_VMALLOC32 GFP_KERNEL
673 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
674 * @size: allocation size
676 * Allocate enough 32bit PA addressable pages to cover @size from the
677 * page level allocator and map them into contiguous kernel virtual space.
679 void *vmalloc_32(unsigned long size
)
681 return __vmalloc(size
, GFP_VMALLOC32
, PAGE_KERNEL
);
683 EXPORT_SYMBOL(vmalloc_32
);
686 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
687 * @size: allocation size
689 * The resulting memory area is 32bit addressable and zeroed so it can be
690 * mapped to userspace without leaking data.
692 void *vmalloc_32_user(unsigned long size
)
694 struct vm_struct
*area
;
697 ret
= __vmalloc(size
, GFP_VMALLOC32
| __GFP_ZERO
, PAGE_KERNEL
);
699 write_lock(&vmlist_lock
);
700 area
= __find_vm_area(ret
);
701 area
->flags
|= VM_USERMAP
;
702 write_unlock(&vmlist_lock
);
706 EXPORT_SYMBOL(vmalloc_32_user
);
708 long vread(char *buf
, char *addr
, unsigned long count
)
710 struct vm_struct
*tmp
;
711 char *vaddr
, *buf_start
= buf
;
714 /* Don't allow overflow */
715 if ((unsigned long) addr
+ count
< count
)
716 count
= -(unsigned long) addr
;
718 read_lock(&vmlist_lock
);
719 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
720 vaddr
= (char *) tmp
->addr
;
721 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
723 while (addr
< vaddr
) {
731 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
742 read_unlock(&vmlist_lock
);
743 return buf
- buf_start
;
746 long vwrite(char *buf
, char *addr
, unsigned long count
)
748 struct vm_struct
*tmp
;
749 char *vaddr
, *buf_start
= buf
;
752 /* Don't allow overflow */
753 if ((unsigned long) addr
+ count
< count
)
754 count
= -(unsigned long) addr
;
756 read_lock(&vmlist_lock
);
757 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
758 vaddr
= (char *) tmp
->addr
;
759 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
761 while (addr
< vaddr
) {
768 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
779 read_unlock(&vmlist_lock
);
780 return buf
- buf_start
;
784 * remap_vmalloc_range - map vmalloc pages to userspace
785 * @vma: vma to cover (map full range of vma)
786 * @addr: vmalloc memory
787 * @pgoff: number of pages into addr before first page to map
789 * Returns: 0 for success, -Exxx on failure
791 * This function checks that addr is a valid vmalloc'ed area, and
792 * that it is big enough to cover the vma. Will return failure if
793 * that criteria isn't met.
795 * Similar to remap_pfn_range() (see mm/memory.c)
797 int remap_vmalloc_range(struct vm_area_struct
*vma
, void *addr
,
800 struct vm_struct
*area
;
801 unsigned long uaddr
= vma
->vm_start
;
802 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
805 if ((PAGE_SIZE
-1) & (unsigned long)addr
)
808 read_lock(&vmlist_lock
);
809 area
= __find_vm_area(addr
);
811 goto out_einval_locked
;
813 if (!(area
->flags
& VM_USERMAP
))
814 goto out_einval_locked
;
816 if (usize
+ (pgoff
<< PAGE_SHIFT
) > area
->size
- PAGE_SIZE
)
817 goto out_einval_locked
;
818 read_unlock(&vmlist_lock
);
820 addr
+= pgoff
<< PAGE_SHIFT
;
822 struct page
*page
= vmalloc_to_page(addr
);
823 ret
= vm_insert_page(vma
, uaddr
, page
);
832 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
833 vma
->vm_flags
|= VM_RESERVED
;
838 read_unlock(&vmlist_lock
);
841 EXPORT_SYMBOL(remap_vmalloc_range
);
844 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
847 void __attribute__((weak
)) vmalloc_sync_all(void)
852 static int f(pte_t
*pte
, pgtable_t table
, unsigned long addr
, void *data
)
854 /* apply_to_page_range() does all the hard work. */
859 * alloc_vm_area - allocate a range of kernel address space
860 * @size: size of the area
862 * Returns: NULL on failure, vm_struct on success
864 * This function reserves a range of kernel address space, and
865 * allocates pagetables to map that range. No actual mappings
866 * are created. If the kernel address space is not shared
867 * between processes, it syncs the pagetable across all
870 struct vm_struct
*alloc_vm_area(size_t size
)
872 struct vm_struct
*area
;
874 area
= get_vm_area_caller(size
, VM_IOREMAP
,
875 __builtin_return_address(0));
880 * This ensures that page tables are constructed for this region
881 * of kernel virtual address space and mapped into init_mm.
883 if (apply_to_page_range(&init_mm
, (unsigned long)area
->addr
,
884 area
->size
, f
, NULL
)) {
889 /* Make sure the pagetables are constructed in process kernel
895 EXPORT_SYMBOL_GPL(alloc_vm_area
);
897 void free_vm_area(struct vm_struct
*area
)
899 struct vm_struct
*ret
;
900 ret
= remove_vm_area(area
->addr
);
904 EXPORT_SYMBOL_GPL(free_vm_area
);
907 #ifdef CONFIG_PROC_FS
908 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
913 read_lock(&vmlist_lock
);
926 static void *s_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
928 struct vm_struct
*v
= p
;
934 static void s_stop(struct seq_file
*m
, void *p
)
936 read_unlock(&vmlist_lock
);
939 static void show_numa_info(struct seq_file
*m
, struct vm_struct
*v
)
942 unsigned int nr
, *counters
= m
->private;
947 memset(counters
, 0, nr_node_ids
* sizeof(unsigned int));
949 for (nr
= 0; nr
< v
->nr_pages
; nr
++)
950 counters
[page_to_nid(v
->pages
[nr
])]++;
952 for_each_node_state(nr
, N_HIGH_MEMORY
)
954 seq_printf(m
, " N%u=%u", nr
, counters
[nr
]);
958 static int s_show(struct seq_file
*m
, void *p
)
960 struct vm_struct
*v
= p
;
962 seq_printf(m
, "0x%p-0x%p %7ld",
963 v
->addr
, v
->addr
+ v
->size
, v
->size
);
966 char buff
[2 * KSYM_NAME_LEN
];
969 sprint_symbol(buff
, (unsigned long)v
->caller
);
974 seq_printf(m
, " pages=%d", v
->nr_pages
);
977 seq_printf(m
, " phys=%lx", v
->phys_addr
);
979 if (v
->flags
& VM_IOREMAP
)
980 seq_printf(m
, " ioremap");
982 if (v
->flags
& VM_ALLOC
)
983 seq_printf(m
, " vmalloc");
985 if (v
->flags
& VM_MAP
)
986 seq_printf(m
, " vmap");
988 if (v
->flags
& VM_USERMAP
)
989 seq_printf(m
, " user");
991 if (v
->flags
& VM_VPAGES
)
992 seq_printf(m
, " vpages");
994 show_numa_info(m
, v
);
999 const struct seq_operations vmalloc_op
= {