4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
11 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/proc_fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugobjects.h>
21 #include <linux/kallsyms.h>
22 #include <linux/list.h>
23 #include <linux/rbtree.h>
24 #include <linux/radix-tree.h>
25 #include <linux/rcupdate.h>
27 #include <asm/atomic.h>
28 #include <asm/uaccess.h>
29 #include <asm/tlbflush.h>
32 /*** Page table manipulation functions ***/
34 static void vunmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
)
38 pte
= pte_offset_kernel(pmd
, addr
);
40 pte_t ptent
= ptep_get_and_clear(&init_mm
, addr
, pte
);
41 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
42 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
45 static void vunmap_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
)
50 pmd
= pmd_offset(pud
, addr
);
52 next
= pmd_addr_end(addr
, end
);
53 if (pmd_none_or_clear_bad(pmd
))
55 vunmap_pte_range(pmd
, addr
, next
);
56 } while (pmd
++, addr
= next
, addr
!= end
);
59 static void vunmap_pud_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
)
64 pud
= pud_offset(pgd
, addr
);
66 next
= pud_addr_end(addr
, end
);
67 if (pud_none_or_clear_bad(pud
))
69 vunmap_pmd_range(pud
, addr
, next
);
70 } while (pud
++, addr
= next
, addr
!= end
);
73 static void vunmap_page_range(unsigned long addr
, unsigned long end
)
79 pgd
= pgd_offset_k(addr
);
80 flush_cache_vunmap(addr
, end
);
82 next
= pgd_addr_end(addr
, end
);
83 if (pgd_none_or_clear_bad(pgd
))
85 vunmap_pud_range(pgd
, addr
, next
);
86 } while (pgd
++, addr
= next
, addr
!= end
);
89 static int vmap_pte_range(pmd_t
*pmd
, unsigned long addr
,
90 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
95 * nr is a running index into the array which helps higher level
96 * callers keep track of where we're up to.
99 pte
= pte_alloc_kernel(pmd
, addr
);
103 struct page
*page
= pages
[*nr
];
105 if (WARN_ON(!pte_none(*pte
)))
109 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, prot
));
111 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
115 static int vmap_pmd_range(pud_t
*pud
, unsigned long addr
,
116 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
121 pmd
= pmd_alloc(&init_mm
, pud
, addr
);
125 next
= pmd_addr_end(addr
, end
);
126 if (vmap_pte_range(pmd
, addr
, next
, prot
, pages
, nr
))
128 } while (pmd
++, addr
= next
, addr
!= end
);
132 static int vmap_pud_range(pgd_t
*pgd
, unsigned long addr
,
133 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
138 pud
= pud_alloc(&init_mm
, pgd
, addr
);
142 next
= pud_addr_end(addr
, end
);
143 if (vmap_pmd_range(pud
, addr
, next
, prot
, pages
, nr
))
145 } while (pud
++, addr
= next
, addr
!= end
);
150 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
151 * will have pfns corresponding to the "pages" array.
153 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
155 static int vmap_page_range(unsigned long addr
, unsigned long end
,
156 pgprot_t prot
, struct page
**pages
)
164 pgd
= pgd_offset_k(addr
);
166 next
= pgd_addr_end(addr
, end
);
167 err
= vmap_pud_range(pgd
, addr
, next
, prot
, pages
, &nr
);
170 } while (pgd
++, addr
= next
, addr
!= end
);
171 flush_cache_vmap(addr
, end
);
178 static inline int is_vmalloc_or_module_addr(const void *x
)
181 * x86-64 and sparc64 put modules in a special place,
182 * and fall back on vmalloc() if that fails. Others
183 * just put it in the vmalloc space.
185 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
186 unsigned long addr
= (unsigned long)x
;
187 if (addr
>= MODULES_VADDR
&& addr
< MODULES_END
)
190 return is_vmalloc_addr(x
);
194 * Walk a vmap address to the struct page it maps.
196 struct page
*vmalloc_to_page(const void *vmalloc_addr
)
198 unsigned long addr
= (unsigned long) vmalloc_addr
;
199 struct page
*page
= NULL
;
200 pgd_t
*pgd
= pgd_offset_k(addr
);
203 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
204 * architectures that do not vmalloc module space
206 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr
));
208 if (!pgd_none(*pgd
)) {
209 pud_t
*pud
= pud_offset(pgd
, addr
);
210 if (!pud_none(*pud
)) {
211 pmd_t
*pmd
= pmd_offset(pud
, addr
);
212 if (!pmd_none(*pmd
)) {
215 ptep
= pte_offset_map(pmd
, addr
);
217 if (pte_present(pte
))
218 page
= pte_page(pte
);
225 EXPORT_SYMBOL(vmalloc_to_page
);
228 * Map a vmalloc()-space virtual address to the physical page frame number.
230 unsigned long vmalloc_to_pfn(const void *vmalloc_addr
)
232 return page_to_pfn(vmalloc_to_page(vmalloc_addr
));
234 EXPORT_SYMBOL(vmalloc_to_pfn
);
237 /*** Global kva allocator ***/
239 #define VM_LAZY_FREE 0x01
240 #define VM_LAZY_FREEING 0x02
241 #define VM_VM_AREA 0x04
244 unsigned long va_start
;
245 unsigned long va_end
;
247 struct rb_node rb_node
; /* address sorted rbtree */
248 struct list_head list
; /* address sorted list */
249 struct list_head purge_list
; /* "lazy purge" list */
251 struct rcu_head rcu_head
;
254 static DEFINE_SPINLOCK(vmap_area_lock
);
255 static struct rb_root vmap_area_root
= RB_ROOT
;
256 static LIST_HEAD(vmap_area_list
);
258 static struct vmap_area
*__find_vmap_area(unsigned long addr
)
260 struct rb_node
*n
= vmap_area_root
.rb_node
;
263 struct vmap_area
*va
;
265 va
= rb_entry(n
, struct vmap_area
, rb_node
);
266 if (addr
< va
->va_start
)
268 else if (addr
> va
->va_start
)
277 static void __insert_vmap_area(struct vmap_area
*va
)
279 struct rb_node
**p
= &vmap_area_root
.rb_node
;
280 struct rb_node
*parent
= NULL
;
284 struct vmap_area
*tmp
;
287 tmp
= rb_entry(parent
, struct vmap_area
, rb_node
);
288 if (va
->va_start
< tmp
->va_end
)
290 else if (va
->va_end
> tmp
->va_start
)
296 rb_link_node(&va
->rb_node
, parent
, p
);
297 rb_insert_color(&va
->rb_node
, &vmap_area_root
);
299 /* address-sort this list so it is usable like the vmlist */
300 tmp
= rb_prev(&va
->rb_node
);
302 struct vmap_area
*prev
;
303 prev
= rb_entry(tmp
, struct vmap_area
, rb_node
);
304 list_add_rcu(&va
->list
, &prev
->list
);
306 list_add_rcu(&va
->list
, &vmap_area_list
);
309 static void purge_vmap_area_lazy(void);
312 * Allocate a region of KVA of the specified size and alignment, within the
315 static struct vmap_area
*alloc_vmap_area(unsigned long size
,
317 unsigned long vstart
, unsigned long vend
,
318 int node
, gfp_t gfp_mask
)
320 struct vmap_area
*va
;
325 BUG_ON(size
& ~PAGE_MASK
);
327 addr
= ALIGN(vstart
, align
);
329 va
= kmalloc_node(sizeof(struct vmap_area
),
330 gfp_mask
& GFP_RECLAIM_MASK
, node
);
332 return ERR_PTR(-ENOMEM
);
335 spin_lock(&vmap_area_lock
);
336 /* XXX: could have a last_hole cache */
337 n
= vmap_area_root
.rb_node
;
339 struct vmap_area
*first
= NULL
;
342 struct vmap_area
*tmp
;
343 tmp
= rb_entry(n
, struct vmap_area
, rb_node
);
344 if (tmp
->va_end
>= addr
) {
345 if (!first
&& tmp
->va_start
< addr
+ size
)
357 if (first
->va_end
< addr
) {
358 n
= rb_next(&first
->rb_node
);
360 first
= rb_entry(n
, struct vmap_area
, rb_node
);
365 while (addr
+ size
>= first
->va_start
&& addr
+ size
<= vend
) {
366 addr
= ALIGN(first
->va_end
+ PAGE_SIZE
, align
);
368 n
= rb_next(&first
->rb_node
);
370 first
= rb_entry(n
, struct vmap_area
, rb_node
);
376 if (addr
+ size
> vend
) {
377 spin_unlock(&vmap_area_lock
);
379 purge_vmap_area_lazy();
383 if (printk_ratelimit())
384 printk(KERN_WARNING
"vmap allocation failed: "
385 "use vmalloc=<size> to increase size.\n");
386 return ERR_PTR(-EBUSY
);
389 BUG_ON(addr
& (align
-1));
392 va
->va_end
= addr
+ size
;
394 __insert_vmap_area(va
);
395 spin_unlock(&vmap_area_lock
);
400 static void rcu_free_va(struct rcu_head
*head
)
402 struct vmap_area
*va
= container_of(head
, struct vmap_area
, rcu_head
);
407 static void __free_vmap_area(struct vmap_area
*va
)
409 BUG_ON(RB_EMPTY_NODE(&va
->rb_node
));
410 rb_erase(&va
->rb_node
, &vmap_area_root
);
411 RB_CLEAR_NODE(&va
->rb_node
);
412 list_del_rcu(&va
->list
);
414 call_rcu(&va
->rcu_head
, rcu_free_va
);
418 * Free a region of KVA allocated by alloc_vmap_area
420 static void free_vmap_area(struct vmap_area
*va
)
422 spin_lock(&vmap_area_lock
);
423 __free_vmap_area(va
);
424 spin_unlock(&vmap_area_lock
);
428 * Clear the pagetable entries of a given vmap_area
430 static void unmap_vmap_area(struct vmap_area
*va
)
432 vunmap_page_range(va
->va_start
, va
->va_end
);
436 * lazy_max_pages is the maximum amount of virtual address space we gather up
437 * before attempting to purge with a TLB flush.
439 * There is a tradeoff here: a larger number will cover more kernel page tables
440 * and take slightly longer to purge, but it will linearly reduce the number of
441 * global TLB flushes that must be performed. It would seem natural to scale
442 * this number up linearly with the number of CPUs (because vmapping activity
443 * could also scale linearly with the number of CPUs), however it is likely
444 * that in practice, workloads might be constrained in other ways that mean
445 * vmap activity will not scale linearly with CPUs. Also, I want to be
446 * conservative and not introduce a big latency on huge systems, so go with
447 * a less aggressive log scale. It will still be an improvement over the old
448 * code, and it will be simple to change the scale factor if we find that it
449 * becomes a problem on bigger systems.
451 static unsigned long lazy_max_pages(void)
455 log
= fls(num_online_cpus());
457 return log
* (32UL * 1024 * 1024 / PAGE_SIZE
);
460 static atomic_t vmap_lazy_nr
= ATOMIC_INIT(0);
463 * Purges all lazily-freed vmap areas.
465 * If sync is 0 then don't purge if there is already a purge in progress.
466 * If force_flush is 1, then flush kernel TLBs between *start and *end even
467 * if we found no lazy vmap areas to unmap (callers can use this to optimise
468 * their own TLB flushing).
469 * Returns with *start = min(*start, lowest purged address)
470 * *end = max(*end, highest purged address)
472 static void __purge_vmap_area_lazy(unsigned long *start
, unsigned long *end
,
473 int sync
, int force_flush
)
475 static DEFINE_SPINLOCK(purge_lock
);
477 struct vmap_area
*va
;
481 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
482 * should not expect such behaviour. This just simplifies locking for
483 * the case that isn't actually used at the moment anyway.
485 if (!sync
&& !force_flush
) {
486 if (!spin_trylock(&purge_lock
))
489 spin_lock(&purge_lock
);
492 list_for_each_entry_rcu(va
, &vmap_area_list
, list
) {
493 if (va
->flags
& VM_LAZY_FREE
) {
494 if (va
->va_start
< *start
)
495 *start
= va
->va_start
;
496 if (va
->va_end
> *end
)
498 nr
+= (va
->va_end
- va
->va_start
) >> PAGE_SHIFT
;
500 list_add_tail(&va
->purge_list
, &valist
);
501 va
->flags
|= VM_LAZY_FREEING
;
502 va
->flags
&= ~VM_LAZY_FREE
;
508 BUG_ON(nr
> atomic_read(&vmap_lazy_nr
));
509 atomic_sub(nr
, &vmap_lazy_nr
);
512 if (nr
|| force_flush
)
513 flush_tlb_kernel_range(*start
, *end
);
516 spin_lock(&vmap_area_lock
);
517 list_for_each_entry(va
, &valist
, purge_list
)
518 __free_vmap_area(va
);
519 spin_unlock(&vmap_area_lock
);
521 spin_unlock(&purge_lock
);
525 * Kick off a purge of the outstanding lazy areas.
527 static void purge_vmap_area_lazy(void)
529 unsigned long start
= ULONG_MAX
, end
= 0;
531 __purge_vmap_area_lazy(&start
, &end
, 0, 0);
535 * Free and unmap a vmap area
537 static void free_unmap_vmap_area(struct vmap_area
*va
)
539 va
->flags
|= VM_LAZY_FREE
;
540 atomic_add((va
->va_end
- va
->va_start
) >> PAGE_SHIFT
, &vmap_lazy_nr
);
541 if (unlikely(atomic_read(&vmap_lazy_nr
) > lazy_max_pages()))
542 purge_vmap_area_lazy();
545 static struct vmap_area
*find_vmap_area(unsigned long addr
)
547 struct vmap_area
*va
;
549 spin_lock(&vmap_area_lock
);
550 va
= __find_vmap_area(addr
);
551 spin_unlock(&vmap_area_lock
);
556 static void free_unmap_vmap_area_addr(unsigned long addr
)
558 struct vmap_area
*va
;
560 va
= find_vmap_area(addr
);
562 free_unmap_vmap_area(va
);
566 /*** Per cpu kva allocator ***/
569 * vmap space is limited especially on 32 bit architectures. Ensure there is
570 * room for at least 16 percpu vmap blocks per CPU.
573 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
574 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
575 * instead (we just need a rough idea)
577 #if BITS_PER_LONG == 32
578 #define VMALLOC_SPACE (128UL*1024*1024)
580 #define VMALLOC_SPACE (128UL*1024*1024*1024)
583 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
584 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
585 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
586 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
587 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
588 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
589 #define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
590 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
591 VMALLOC_PAGES / NR_CPUS / 16))
593 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
595 struct vmap_block_queue
{
597 struct list_head free
;
598 struct list_head dirty
;
599 unsigned int nr_dirty
;
604 struct vmap_area
*va
;
605 struct vmap_block_queue
*vbq
;
606 unsigned long free
, dirty
;
607 DECLARE_BITMAP(alloc_map
, VMAP_BBMAP_BITS
);
608 DECLARE_BITMAP(dirty_map
, VMAP_BBMAP_BITS
);
611 struct list_head free_list
;
612 struct list_head dirty_list
;
614 struct rcu_head rcu_head
;
618 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
619 static DEFINE_PER_CPU(struct vmap_block_queue
, vmap_block_queue
);
622 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
623 * in the free path. Could get rid of this if we change the API to return a
624 * "cookie" from alloc, to be passed to free. But no big deal yet.
626 static DEFINE_SPINLOCK(vmap_block_tree_lock
);
627 static RADIX_TREE(vmap_block_tree
, GFP_ATOMIC
);
630 * We should probably have a fallback mechanism to allocate virtual memory
631 * out of partially filled vmap blocks. However vmap block sizing should be
632 * fairly reasonable according to the vmalloc size, so it shouldn't be a
636 static unsigned long addr_to_vb_idx(unsigned long addr
)
638 addr
-= VMALLOC_START
& ~(VMAP_BLOCK_SIZE
-1);
639 addr
/= VMAP_BLOCK_SIZE
;
643 static struct vmap_block
*new_vmap_block(gfp_t gfp_mask
)
645 struct vmap_block_queue
*vbq
;
646 struct vmap_block
*vb
;
647 struct vmap_area
*va
;
648 unsigned long vb_idx
;
651 node
= numa_node_id();
653 vb
= kmalloc_node(sizeof(struct vmap_block
),
654 gfp_mask
& GFP_RECLAIM_MASK
, node
);
656 return ERR_PTR(-ENOMEM
);
658 va
= alloc_vmap_area(VMAP_BLOCK_SIZE
, VMAP_BLOCK_SIZE
,
659 VMALLOC_START
, VMALLOC_END
,
661 if (unlikely(IS_ERR(va
))) {
663 return ERR_PTR(PTR_ERR(va
));
666 err
= radix_tree_preload(gfp_mask
);
673 spin_lock_init(&vb
->lock
);
675 vb
->free
= VMAP_BBMAP_BITS
;
677 bitmap_zero(vb
->alloc_map
, VMAP_BBMAP_BITS
);
678 bitmap_zero(vb
->dirty_map
, VMAP_BBMAP_BITS
);
679 INIT_LIST_HEAD(&vb
->free_list
);
680 INIT_LIST_HEAD(&vb
->dirty_list
);
682 vb_idx
= addr_to_vb_idx(va
->va_start
);
683 spin_lock(&vmap_block_tree_lock
);
684 err
= radix_tree_insert(&vmap_block_tree
, vb_idx
, vb
);
685 spin_unlock(&vmap_block_tree_lock
);
687 radix_tree_preload_end();
689 vbq
= &get_cpu_var(vmap_block_queue
);
691 spin_lock(&vbq
->lock
);
692 list_add(&vb
->free_list
, &vbq
->free
);
693 spin_unlock(&vbq
->lock
);
694 put_cpu_var(vmap_cpu_blocks
);
699 static void rcu_free_vb(struct rcu_head
*head
)
701 struct vmap_block
*vb
= container_of(head
, struct vmap_block
, rcu_head
);
706 static void free_vmap_block(struct vmap_block
*vb
)
708 struct vmap_block
*tmp
;
709 unsigned long vb_idx
;
711 spin_lock(&vb
->vbq
->lock
);
712 if (!list_empty(&vb
->free_list
))
713 list_del(&vb
->free_list
);
714 if (!list_empty(&vb
->dirty_list
))
715 list_del(&vb
->dirty_list
);
716 spin_unlock(&vb
->vbq
->lock
);
718 vb_idx
= addr_to_vb_idx(vb
->va
->va_start
);
719 spin_lock(&vmap_block_tree_lock
);
720 tmp
= radix_tree_delete(&vmap_block_tree
, vb_idx
);
721 spin_unlock(&vmap_block_tree_lock
);
724 free_unmap_vmap_area(vb
->va
);
725 call_rcu(&vb
->rcu_head
, rcu_free_vb
);
728 static void *vb_alloc(unsigned long size
, gfp_t gfp_mask
)
730 struct vmap_block_queue
*vbq
;
731 struct vmap_block
*vb
;
732 unsigned long addr
= 0;
735 BUG_ON(size
& ~PAGE_MASK
);
736 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
737 order
= get_order(size
);
741 vbq
= &get_cpu_var(vmap_block_queue
);
742 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
745 spin_lock(&vb
->lock
);
746 i
= bitmap_find_free_region(vb
->alloc_map
,
747 VMAP_BBMAP_BITS
, order
);
750 addr
= vb
->va
->va_start
+ (i
<< PAGE_SHIFT
);
751 BUG_ON(addr_to_vb_idx(addr
) !=
752 addr_to_vb_idx(vb
->va
->va_start
));
753 vb
->free
-= 1UL << order
;
755 spin_lock(&vbq
->lock
);
756 list_del_init(&vb
->free_list
);
757 spin_unlock(&vbq
->lock
);
759 spin_unlock(&vb
->lock
);
762 spin_unlock(&vb
->lock
);
764 put_cpu_var(vmap_cpu_blocks
);
768 vb
= new_vmap_block(gfp_mask
);
777 static void vb_free(const void *addr
, unsigned long size
)
779 unsigned long offset
;
780 unsigned long vb_idx
;
782 struct vmap_block
*vb
;
784 BUG_ON(size
& ~PAGE_MASK
);
785 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
786 order
= get_order(size
);
788 offset
= (unsigned long)addr
& (VMAP_BLOCK_SIZE
- 1);
790 vb_idx
= addr_to_vb_idx((unsigned long)addr
);
792 vb
= radix_tree_lookup(&vmap_block_tree
, vb_idx
);
796 spin_lock(&vb
->lock
);
797 bitmap_allocate_region(vb
->dirty_map
, offset
>> PAGE_SHIFT
, order
);
799 spin_lock(&vb
->vbq
->lock
);
800 list_add(&vb
->dirty_list
, &vb
->vbq
->dirty
);
801 spin_unlock(&vb
->vbq
->lock
);
803 vb
->dirty
+= 1UL << order
;
804 if (vb
->dirty
== VMAP_BBMAP_BITS
) {
805 BUG_ON(vb
->free
|| !list_empty(&vb
->free_list
));
806 spin_unlock(&vb
->lock
);
809 spin_unlock(&vb
->lock
);
813 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
815 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
816 * to amortize TLB flushing overheads. What this means is that any page you
817 * have now, may, in a former life, have been mapped into kernel virtual
818 * address by the vmap layer and so there might be some CPUs with TLB entries
819 * still referencing that page (additional to the regular 1:1 kernel mapping).
821 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
822 * be sure that none of the pages we have control over will have any aliases
823 * from the vmap layer.
825 void vm_unmap_aliases(void)
827 unsigned long start
= ULONG_MAX
, end
= 0;
831 for_each_possible_cpu(cpu
) {
832 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
833 struct vmap_block
*vb
;
836 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
839 spin_lock(&vb
->lock
);
840 i
= find_first_bit(vb
->dirty_map
, VMAP_BBMAP_BITS
);
841 while (i
< VMAP_BBMAP_BITS
) {
844 j
= find_next_zero_bit(vb
->dirty_map
,
847 s
= vb
->va
->va_start
+ (i
<< PAGE_SHIFT
);
848 e
= vb
->va
->va_start
+ (j
<< PAGE_SHIFT
);
849 vunmap_page_range(s
, e
);
858 i
= find_next_bit(vb
->dirty_map
,
861 spin_unlock(&vb
->lock
);
866 __purge_vmap_area_lazy(&start
, &end
, 1, flush
);
868 EXPORT_SYMBOL_GPL(vm_unmap_aliases
);
871 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
872 * @mem: the pointer returned by vm_map_ram
873 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
875 void vm_unmap_ram(const void *mem
, unsigned int count
)
877 unsigned long size
= count
<< PAGE_SHIFT
;
878 unsigned long addr
= (unsigned long)mem
;
881 BUG_ON(addr
< VMALLOC_START
);
882 BUG_ON(addr
> VMALLOC_END
);
883 BUG_ON(addr
& (PAGE_SIZE
-1));
885 debug_check_no_locks_freed(mem
, size
);
887 if (likely(count
<= VMAP_MAX_ALLOC
))
890 free_unmap_vmap_area_addr(addr
);
892 EXPORT_SYMBOL(vm_unmap_ram
);
895 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
896 * @pages: an array of pointers to the pages to be mapped
897 * @count: number of pages
898 * @node: prefer to allocate data structures on this node
899 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
900 * @returns: a pointer to the address that has been mapped, or NULL on failure
902 void *vm_map_ram(struct page
**pages
, unsigned int count
, int node
, pgprot_t prot
)
904 unsigned long size
= count
<< PAGE_SHIFT
;
908 if (likely(count
<= VMAP_MAX_ALLOC
)) {
909 mem
= vb_alloc(size
, GFP_KERNEL
);
912 addr
= (unsigned long)mem
;
914 struct vmap_area
*va
;
915 va
= alloc_vmap_area(size
, PAGE_SIZE
,
916 VMALLOC_START
, VMALLOC_END
, node
, GFP_KERNEL
);
923 if (vmap_page_range(addr
, addr
+ size
, prot
, pages
) < 0) {
924 vm_unmap_ram(mem
, count
);
929 EXPORT_SYMBOL(vm_map_ram
);
931 void __init
vmalloc_init(void)
935 for_each_possible_cpu(i
) {
936 struct vmap_block_queue
*vbq
;
938 vbq
= &per_cpu(vmap_block_queue
, i
);
939 spin_lock_init(&vbq
->lock
);
940 INIT_LIST_HEAD(&vbq
->free
);
941 INIT_LIST_HEAD(&vbq
->dirty
);
946 void unmap_kernel_range(unsigned long addr
, unsigned long size
)
948 unsigned long end
= addr
+ size
;
949 vunmap_page_range(addr
, end
);
950 flush_tlb_kernel_range(addr
, end
);
953 int map_vm_area(struct vm_struct
*area
, pgprot_t prot
, struct page
***pages
)
955 unsigned long addr
= (unsigned long)area
->addr
;
956 unsigned long end
= addr
+ area
->size
- PAGE_SIZE
;
959 err
= vmap_page_range(addr
, end
, prot
, *pages
);
967 EXPORT_SYMBOL_GPL(map_vm_area
);
969 /*** Old vmalloc interfaces ***/
970 DEFINE_RWLOCK(vmlist_lock
);
971 struct vm_struct
*vmlist
;
973 static struct vm_struct
*__get_vm_area_node(unsigned long size
,
974 unsigned long flags
, unsigned long start
, unsigned long end
,
975 int node
, gfp_t gfp_mask
, void *caller
)
977 static struct vmap_area
*va
;
978 struct vm_struct
*area
;
979 struct vm_struct
*tmp
, **p
;
980 unsigned long align
= 1;
982 BUG_ON(in_interrupt());
983 if (flags
& VM_IOREMAP
) {
986 if (bit
> IOREMAP_MAX_ORDER
)
987 bit
= IOREMAP_MAX_ORDER
;
988 else if (bit
< PAGE_SHIFT
)
994 size
= PAGE_ALIGN(size
);
998 area
= kmalloc_node(sizeof(*area
), gfp_mask
& GFP_RECLAIM_MASK
, node
);
1003 * We always allocate a guard page.
1007 va
= alloc_vmap_area(size
, align
, start
, end
, node
, gfp_mask
);
1013 area
->flags
= flags
;
1014 area
->addr
= (void *)va
->va_start
;
1018 area
->phys_addr
= 0;
1019 area
->caller
= caller
;
1021 va
->flags
|= VM_VM_AREA
;
1023 write_lock(&vmlist_lock
);
1024 for (p
= &vmlist
; (tmp
= *p
) != NULL
; p
= &tmp
->next
) {
1025 if (tmp
->addr
>= area
->addr
)
1030 write_unlock(&vmlist_lock
);
1035 struct vm_struct
*__get_vm_area(unsigned long size
, unsigned long flags
,
1036 unsigned long start
, unsigned long end
)
1038 return __get_vm_area_node(size
, flags
, start
, end
, -1, GFP_KERNEL
,
1039 __builtin_return_address(0));
1041 EXPORT_SYMBOL_GPL(__get_vm_area
);
1044 * get_vm_area - reserve a contiguous kernel virtual area
1045 * @size: size of the area
1046 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1048 * Search an area of @size in the kernel virtual mapping area,
1049 * and reserved it for out purposes. Returns the area descriptor
1050 * on success or %NULL on failure.
1052 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
1054 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
,
1055 -1, GFP_KERNEL
, __builtin_return_address(0));
1058 struct vm_struct
*get_vm_area_caller(unsigned long size
, unsigned long flags
,
1061 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
,
1062 -1, GFP_KERNEL
, caller
);
1065 struct vm_struct
*get_vm_area_node(unsigned long size
, unsigned long flags
,
1066 int node
, gfp_t gfp_mask
)
1068 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
, node
,
1069 gfp_mask
, __builtin_return_address(0));
1072 static struct vm_struct
*find_vm_area(const void *addr
)
1074 struct vmap_area
*va
;
1076 va
= find_vmap_area((unsigned long)addr
);
1077 if (va
&& va
->flags
& VM_VM_AREA
)
1084 * remove_vm_area - find and remove a continuous kernel virtual area
1085 * @addr: base address
1087 * Search for the kernel VM area starting at @addr, and remove it.
1088 * This function returns the found VM area, but using it is NOT safe
1089 * on SMP machines, except for its size or flags.
1091 struct vm_struct
*remove_vm_area(const void *addr
)
1093 struct vmap_area
*va
;
1095 va
= find_vmap_area((unsigned long)addr
);
1096 if (va
&& va
->flags
& VM_VM_AREA
) {
1097 struct vm_struct
*vm
= va
->private;
1098 struct vm_struct
*tmp
, **p
;
1099 free_unmap_vmap_area(va
);
1100 vm
->size
-= PAGE_SIZE
;
1102 write_lock(&vmlist_lock
);
1103 for (p
= &vmlist
; (tmp
= *p
) != vm
; p
= &tmp
->next
)
1106 write_unlock(&vmlist_lock
);
1113 static void __vunmap(const void *addr
, int deallocate_pages
)
1115 struct vm_struct
*area
;
1120 if ((PAGE_SIZE
-1) & (unsigned long)addr
) {
1121 WARN(1, KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
1125 area
= remove_vm_area(addr
);
1126 if (unlikely(!area
)) {
1127 WARN(1, KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
1132 debug_check_no_locks_freed(addr
, area
->size
);
1133 debug_check_no_obj_freed(addr
, area
->size
);
1135 if (deallocate_pages
) {
1138 for (i
= 0; i
< area
->nr_pages
; i
++) {
1139 struct page
*page
= area
->pages
[i
];
1145 if (area
->flags
& VM_VPAGES
)
1156 * vfree - release memory allocated by vmalloc()
1157 * @addr: memory base address
1159 * Free the virtually continuous memory area starting at @addr, as
1160 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1161 * NULL, no operation is performed.
1163 * Must not be called in interrupt context.
1165 void vfree(const void *addr
)
1167 BUG_ON(in_interrupt());
1170 EXPORT_SYMBOL(vfree
);
1173 * vunmap - release virtual mapping obtained by vmap()
1174 * @addr: memory base address
1176 * Free the virtually contiguous memory area starting at @addr,
1177 * which was created from the page array passed to vmap().
1179 * Must not be called in interrupt context.
1181 void vunmap(const void *addr
)
1183 BUG_ON(in_interrupt());
1186 EXPORT_SYMBOL(vunmap
);
1189 * vmap - map an array of pages into virtually contiguous space
1190 * @pages: array of page pointers
1191 * @count: number of pages to map
1192 * @flags: vm_area->flags
1193 * @prot: page protection for the mapping
1195 * Maps @count pages from @pages into contiguous kernel virtual
1198 void *vmap(struct page
**pages
, unsigned int count
,
1199 unsigned long flags
, pgprot_t prot
)
1201 struct vm_struct
*area
;
1203 if (count
> num_physpages
)
1206 area
= get_vm_area_caller((count
<< PAGE_SHIFT
), flags
,
1207 __builtin_return_address(0));
1211 if (map_vm_area(area
, prot
, &pages
)) {
1218 EXPORT_SYMBOL(vmap
);
1220 static void *__vmalloc_node(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
,
1221 int node
, void *caller
);
1222 static void *__vmalloc_area_node(struct vm_struct
*area
, gfp_t gfp_mask
,
1223 pgprot_t prot
, int node
, void *caller
)
1225 struct page
**pages
;
1226 unsigned int nr_pages
, array_size
, i
;
1228 nr_pages
= (area
->size
- PAGE_SIZE
) >> PAGE_SHIFT
;
1229 array_size
= (nr_pages
* sizeof(struct page
*));
1231 area
->nr_pages
= nr_pages
;
1232 /* Please note that the recursion is strictly bounded. */
1233 if (array_size
> PAGE_SIZE
) {
1234 pages
= __vmalloc_node(array_size
, gfp_mask
| __GFP_ZERO
,
1235 PAGE_KERNEL
, node
, caller
);
1236 area
->flags
|= VM_VPAGES
;
1238 pages
= kmalloc_node(array_size
,
1239 (gfp_mask
& GFP_RECLAIM_MASK
) | __GFP_ZERO
,
1242 area
->pages
= pages
;
1243 area
->caller
= caller
;
1245 remove_vm_area(area
->addr
);
1250 for (i
= 0; i
< area
->nr_pages
; i
++) {
1254 page
= alloc_page(gfp_mask
);
1256 page
= alloc_pages_node(node
, gfp_mask
, 0);
1258 if (unlikely(!page
)) {
1259 /* Successfully allocated i pages, free them in __vunmap() */
1263 area
->pages
[i
] = page
;
1266 if (map_vm_area(area
, prot
, &pages
))
1275 void *__vmalloc_area(struct vm_struct
*area
, gfp_t gfp_mask
, pgprot_t prot
)
1277 return __vmalloc_area_node(area
, gfp_mask
, prot
, -1,
1278 __builtin_return_address(0));
1282 * __vmalloc_node - allocate virtually contiguous memory
1283 * @size: allocation size
1284 * @gfp_mask: flags for the page level allocator
1285 * @prot: protection mask for the allocated pages
1286 * @node: node to use for allocation or -1
1287 * @caller: caller's return address
1289 * Allocate enough pages to cover @size from the page level
1290 * allocator with @gfp_mask flags. Map them into contiguous
1291 * kernel virtual space, using a pagetable protection of @prot.
1293 static void *__vmalloc_node(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
,
1294 int node
, void *caller
)
1296 struct vm_struct
*area
;
1298 size
= PAGE_ALIGN(size
);
1299 if (!size
|| (size
>> PAGE_SHIFT
) > num_physpages
)
1302 area
= __get_vm_area_node(size
, VM_ALLOC
, VMALLOC_START
, VMALLOC_END
,
1303 node
, gfp_mask
, caller
);
1308 return __vmalloc_area_node(area
, gfp_mask
, prot
, node
, caller
);
1311 void *__vmalloc(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
)
1313 return __vmalloc_node(size
, gfp_mask
, prot
, -1,
1314 __builtin_return_address(0));
1316 EXPORT_SYMBOL(__vmalloc
);
1319 * vmalloc - allocate virtually contiguous memory
1320 * @size: allocation size
1321 * Allocate enough pages to cover @size from the page level
1322 * allocator and map them into contiguous kernel virtual space.
1324 * For tight control over page level allocator and protection flags
1325 * use __vmalloc() instead.
1327 void *vmalloc(unsigned long size
)
1329 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
,
1330 -1, __builtin_return_address(0));
1332 EXPORT_SYMBOL(vmalloc
);
1335 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1336 * @size: allocation size
1338 * The resulting memory area is zeroed so it can be mapped to userspace
1339 * without leaking data.
1341 void *vmalloc_user(unsigned long size
)
1343 struct vm_struct
*area
;
1346 ret
= __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
, PAGE_KERNEL
);
1348 area
= find_vm_area(ret
);
1349 area
->flags
|= VM_USERMAP
;
1353 EXPORT_SYMBOL(vmalloc_user
);
1356 * vmalloc_node - allocate memory on a specific node
1357 * @size: allocation size
1360 * Allocate enough pages to cover @size from the page level
1361 * allocator and map them into contiguous kernel virtual space.
1363 * For tight control over page level allocator and protection flags
1364 * use __vmalloc() instead.
1366 void *vmalloc_node(unsigned long size
, int node
)
1368 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
,
1369 node
, __builtin_return_address(0));
1371 EXPORT_SYMBOL(vmalloc_node
);
1373 #ifndef PAGE_KERNEL_EXEC
1374 # define PAGE_KERNEL_EXEC PAGE_KERNEL
1378 * vmalloc_exec - allocate virtually contiguous, executable memory
1379 * @size: allocation size
1381 * Kernel-internal function to allocate enough pages to cover @size
1382 * the page level allocator and map them into contiguous and
1383 * executable kernel virtual space.
1385 * For tight control over page level allocator and protection flags
1386 * use __vmalloc() instead.
1389 void *vmalloc_exec(unsigned long size
)
1391 return __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL_EXEC
);
1394 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1395 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1396 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1397 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1399 #define GFP_VMALLOC32 GFP_KERNEL
1403 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1404 * @size: allocation size
1406 * Allocate enough 32bit PA addressable pages to cover @size from the
1407 * page level allocator and map them into contiguous kernel virtual space.
1409 void *vmalloc_32(unsigned long size
)
1411 return __vmalloc(size
, GFP_VMALLOC32
, PAGE_KERNEL
);
1413 EXPORT_SYMBOL(vmalloc_32
);
1416 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1417 * @size: allocation size
1419 * The resulting memory area is 32bit addressable and zeroed so it can be
1420 * mapped to userspace without leaking data.
1422 void *vmalloc_32_user(unsigned long size
)
1424 struct vm_struct
*area
;
1427 ret
= __vmalloc(size
, GFP_VMALLOC32
| __GFP_ZERO
, PAGE_KERNEL
);
1429 area
= find_vm_area(ret
);
1430 area
->flags
|= VM_USERMAP
;
1434 EXPORT_SYMBOL(vmalloc_32_user
);
1436 long vread(char *buf
, char *addr
, unsigned long count
)
1438 struct vm_struct
*tmp
;
1439 char *vaddr
, *buf_start
= buf
;
1442 /* Don't allow overflow */
1443 if ((unsigned long) addr
+ count
< count
)
1444 count
= -(unsigned long) addr
;
1446 read_lock(&vmlist_lock
);
1447 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
1448 vaddr
= (char *) tmp
->addr
;
1449 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
1451 while (addr
< vaddr
) {
1459 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
1470 read_unlock(&vmlist_lock
);
1471 return buf
- buf_start
;
1474 long vwrite(char *buf
, char *addr
, unsigned long count
)
1476 struct vm_struct
*tmp
;
1477 char *vaddr
, *buf_start
= buf
;
1480 /* Don't allow overflow */
1481 if ((unsigned long) addr
+ count
< count
)
1482 count
= -(unsigned long) addr
;
1484 read_lock(&vmlist_lock
);
1485 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
1486 vaddr
= (char *) tmp
->addr
;
1487 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
1489 while (addr
< vaddr
) {
1496 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
1507 read_unlock(&vmlist_lock
);
1508 return buf
- buf_start
;
1512 * remap_vmalloc_range - map vmalloc pages to userspace
1513 * @vma: vma to cover (map full range of vma)
1514 * @addr: vmalloc memory
1515 * @pgoff: number of pages into addr before first page to map
1517 * Returns: 0 for success, -Exxx on failure
1519 * This function checks that addr is a valid vmalloc'ed area, and
1520 * that it is big enough to cover the vma. Will return failure if
1521 * that criteria isn't met.
1523 * Similar to remap_pfn_range() (see mm/memory.c)
1525 int remap_vmalloc_range(struct vm_area_struct
*vma
, void *addr
,
1526 unsigned long pgoff
)
1528 struct vm_struct
*area
;
1529 unsigned long uaddr
= vma
->vm_start
;
1530 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
1532 if ((PAGE_SIZE
-1) & (unsigned long)addr
)
1535 area
= find_vm_area(addr
);
1539 if (!(area
->flags
& VM_USERMAP
))
1542 if (usize
+ (pgoff
<< PAGE_SHIFT
) > area
->size
- PAGE_SIZE
)
1545 addr
+= pgoff
<< PAGE_SHIFT
;
1547 struct page
*page
= vmalloc_to_page(addr
);
1550 ret
= vm_insert_page(vma
, uaddr
, page
);
1557 } while (usize
> 0);
1559 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
1560 vma
->vm_flags
|= VM_RESERVED
;
1564 EXPORT_SYMBOL(remap_vmalloc_range
);
1567 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
1570 void __attribute__((weak
)) vmalloc_sync_all(void)
1575 static int f(pte_t
*pte
, pgtable_t table
, unsigned long addr
, void *data
)
1577 /* apply_to_page_range() does all the hard work. */
1582 * alloc_vm_area - allocate a range of kernel address space
1583 * @size: size of the area
1585 * Returns: NULL on failure, vm_struct on success
1587 * This function reserves a range of kernel address space, and
1588 * allocates pagetables to map that range. No actual mappings
1589 * are created. If the kernel address space is not shared
1590 * between processes, it syncs the pagetable across all
1593 struct vm_struct
*alloc_vm_area(size_t size
)
1595 struct vm_struct
*area
;
1597 area
= get_vm_area_caller(size
, VM_IOREMAP
,
1598 __builtin_return_address(0));
1603 * This ensures that page tables are constructed for this region
1604 * of kernel virtual address space and mapped into init_mm.
1606 if (apply_to_page_range(&init_mm
, (unsigned long)area
->addr
,
1607 area
->size
, f
, NULL
)) {
1612 /* Make sure the pagetables are constructed in process kernel
1618 EXPORT_SYMBOL_GPL(alloc_vm_area
);
1620 void free_vm_area(struct vm_struct
*area
)
1622 struct vm_struct
*ret
;
1623 ret
= remove_vm_area(area
->addr
);
1624 BUG_ON(ret
!= area
);
1627 EXPORT_SYMBOL_GPL(free_vm_area
);
1630 #ifdef CONFIG_PROC_FS
1631 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
1634 struct vm_struct
*v
;
1636 read_lock(&vmlist_lock
);
1638 while (n
> 0 && v
) {
1649 static void *s_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1651 struct vm_struct
*v
= p
;
1657 static void s_stop(struct seq_file
*m
, void *p
)
1659 read_unlock(&vmlist_lock
);
1662 static void show_numa_info(struct seq_file
*m
, struct vm_struct
*v
)
1665 unsigned int nr
, *counters
= m
->private;
1670 memset(counters
, 0, nr_node_ids
* sizeof(unsigned int));
1672 for (nr
= 0; nr
< v
->nr_pages
; nr
++)
1673 counters
[page_to_nid(v
->pages
[nr
])]++;
1675 for_each_node_state(nr
, N_HIGH_MEMORY
)
1677 seq_printf(m
, " N%u=%u", nr
, counters
[nr
]);
1681 static int s_show(struct seq_file
*m
, void *p
)
1683 struct vm_struct
*v
= p
;
1685 seq_printf(m
, "0x%p-0x%p %7ld",
1686 v
->addr
, v
->addr
+ v
->size
, v
->size
);
1689 char buff
[2 * KSYM_NAME_LEN
];
1692 sprint_symbol(buff
, (unsigned long)v
->caller
);
1697 seq_printf(m
, " pages=%d", v
->nr_pages
);
1700 seq_printf(m
, " phys=%lx", v
->phys_addr
);
1702 if (v
->flags
& VM_IOREMAP
)
1703 seq_printf(m
, " ioremap");
1705 if (v
->flags
& VM_ALLOC
)
1706 seq_printf(m
, " vmalloc");
1708 if (v
->flags
& VM_MAP
)
1709 seq_printf(m
, " vmap");
1711 if (v
->flags
& VM_USERMAP
)
1712 seq_printf(m
, " user");
1714 if (v
->flags
& VM_VPAGES
)
1715 seq_printf(m
, " vpages");
1717 show_numa_info(m
, v
);
1722 static const struct seq_operations vmalloc_op
= {
1729 static int vmalloc_open(struct inode
*inode
, struct file
*file
)
1731 unsigned int *ptr
= NULL
;
1735 ptr
= kmalloc(nr_node_ids
* sizeof(unsigned int), GFP_KERNEL
);
1736 ret
= seq_open(file
, &vmalloc_op
);
1738 struct seq_file
*m
= file
->private_data
;
1745 static const struct file_operations proc_vmalloc_operations
= {
1746 .open
= vmalloc_open
,
1748 .llseek
= seq_lseek
,
1749 .release
= seq_release_private
,
1752 static int __init
proc_vmalloc_init(void)
1754 proc_create("vmallocinfo", S_IRUSR
, NULL
, &proc_vmalloc_operations
);
1757 module_init(proc_vmalloc_init
);