hugetlbfs: per mount huge page sizes
[linux-2.6/cjktty.git] / mm / vmalloc.c
blob35f2938162948b02003007c31686f1b58d7af542
1 /*
2 * linux/mm/vmalloc.c
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
9 */
11 #include <linux/mm.h>
12 #include <linux/module.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugobjects.h>
19 #include <linux/vmalloc.h>
20 #include <linux/kallsyms.h>
22 #include <asm/uaccess.h>
23 #include <asm/tlbflush.h>
26 DEFINE_RWLOCK(vmlist_lock);
27 struct vm_struct *vmlist;
29 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
30 int node, void *caller);
32 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
34 pte_t *pte;
36 pte = pte_offset_kernel(pmd, addr);
37 do {
38 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
39 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
40 } while (pte++, addr += PAGE_SIZE, addr != end);
43 static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
44 unsigned long end)
46 pmd_t *pmd;
47 unsigned long next;
49 pmd = pmd_offset(pud, addr);
50 do {
51 next = pmd_addr_end(addr, end);
52 if (pmd_none_or_clear_bad(pmd))
53 continue;
54 vunmap_pte_range(pmd, addr, next);
55 } while (pmd++, addr = next, addr != end);
58 static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
59 unsigned long end)
61 pud_t *pud;
62 unsigned long next;
64 pud = pud_offset(pgd, addr);
65 do {
66 next = pud_addr_end(addr, end);
67 if (pud_none_or_clear_bad(pud))
68 continue;
69 vunmap_pmd_range(pud, addr, next);
70 } while (pud++, addr = next, addr != end);
73 void unmap_kernel_range(unsigned long addr, unsigned long size)
75 pgd_t *pgd;
76 unsigned long next;
77 unsigned long start = addr;
78 unsigned long end = addr + size;
80 BUG_ON(addr >= end);
81 pgd = pgd_offset_k(addr);
82 flush_cache_vunmap(addr, end);
83 do {
84 next = pgd_addr_end(addr, end);
85 if (pgd_none_or_clear_bad(pgd))
86 continue;
87 vunmap_pud_range(pgd, addr, next);
88 } while (pgd++, addr = next, addr != end);
89 flush_tlb_kernel_range(start, end);
92 static void unmap_vm_area(struct vm_struct *area)
94 unmap_kernel_range((unsigned long)area->addr, area->size);
97 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
98 unsigned long end, pgprot_t prot, struct page ***pages)
100 pte_t *pte;
102 pte = pte_alloc_kernel(pmd, addr);
103 if (!pte)
104 return -ENOMEM;
105 do {
106 struct page *page = **pages;
107 WARN_ON(!pte_none(*pte));
108 if (!page)
109 return -ENOMEM;
110 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
111 (*pages)++;
112 } while (pte++, addr += PAGE_SIZE, addr != end);
113 return 0;
116 static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
117 unsigned long end, pgprot_t prot, struct page ***pages)
119 pmd_t *pmd;
120 unsigned long next;
122 pmd = pmd_alloc(&init_mm, pud, addr);
123 if (!pmd)
124 return -ENOMEM;
125 do {
126 next = pmd_addr_end(addr, end);
127 if (vmap_pte_range(pmd, addr, next, prot, pages))
128 return -ENOMEM;
129 } while (pmd++, addr = next, addr != end);
130 return 0;
133 static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
134 unsigned long end, pgprot_t prot, struct page ***pages)
136 pud_t *pud;
137 unsigned long next;
139 pud = pud_alloc(&init_mm, pgd, addr);
140 if (!pud)
141 return -ENOMEM;
142 do {
143 next = pud_addr_end(addr, end);
144 if (vmap_pmd_range(pud, addr, next, prot, pages))
145 return -ENOMEM;
146 } while (pud++, addr = next, addr != end);
147 return 0;
150 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
152 pgd_t *pgd;
153 unsigned long next;
154 unsigned long addr = (unsigned long) area->addr;
155 unsigned long end = addr + area->size - PAGE_SIZE;
156 int err;
158 BUG_ON(addr >= end);
159 pgd = pgd_offset_k(addr);
160 do {
161 next = pgd_addr_end(addr, end);
162 err = vmap_pud_range(pgd, addr, next, prot, pages);
163 if (err)
164 break;
165 } while (pgd++, addr = next, addr != end);
166 flush_cache_vmap((unsigned long) area->addr, end);
167 return err;
169 EXPORT_SYMBOL_GPL(map_vm_area);
172 * Map a vmalloc()-space virtual address to the physical page.
174 struct page *vmalloc_to_page(const void *vmalloc_addr)
176 unsigned long addr = (unsigned long) vmalloc_addr;
177 struct page *page = NULL;
178 pgd_t *pgd = pgd_offset_k(addr);
179 pud_t *pud;
180 pmd_t *pmd;
181 pte_t *ptep, pte;
183 if (!pgd_none(*pgd)) {
184 pud = pud_offset(pgd, addr);
185 if (!pud_none(*pud)) {
186 pmd = pmd_offset(pud, addr);
187 if (!pmd_none(*pmd)) {
188 ptep = pte_offset_map(pmd, addr);
189 pte = *ptep;
190 if (pte_present(pte))
191 page = pte_page(pte);
192 pte_unmap(ptep);
196 return page;
198 EXPORT_SYMBOL(vmalloc_to_page);
201 * Map a vmalloc()-space virtual address to the physical page frame number.
203 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
205 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
207 EXPORT_SYMBOL(vmalloc_to_pfn);
209 static struct vm_struct *
210 __get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start,
211 unsigned long end, int node, gfp_t gfp_mask, void *caller)
213 struct vm_struct **p, *tmp, *area;
214 unsigned long align = 1;
215 unsigned long addr;
217 BUG_ON(in_interrupt());
218 if (flags & VM_IOREMAP) {
219 int bit = fls(size);
221 if (bit > IOREMAP_MAX_ORDER)
222 bit = IOREMAP_MAX_ORDER;
223 else if (bit < PAGE_SHIFT)
224 bit = PAGE_SHIFT;
226 align = 1ul << bit;
228 addr = ALIGN(start, align);
229 size = PAGE_ALIGN(size);
230 if (unlikely(!size))
231 return NULL;
233 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
235 if (unlikely(!area))
236 return NULL;
239 * We always allocate a guard page.
241 size += PAGE_SIZE;
243 write_lock(&vmlist_lock);
244 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
245 if ((unsigned long)tmp->addr < addr) {
246 if((unsigned long)tmp->addr + tmp->size >= addr)
247 addr = ALIGN(tmp->size +
248 (unsigned long)tmp->addr, align);
249 continue;
251 if ((size + addr) < addr)
252 goto out;
253 if (size + addr <= (unsigned long)tmp->addr)
254 goto found;
255 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
256 if (addr > end - size)
257 goto out;
259 if ((size + addr) < addr)
260 goto out;
261 if (addr > end - size)
262 goto out;
264 found:
265 area->next = *p;
266 *p = area;
268 area->flags = flags;
269 area->addr = (void *)addr;
270 area->size = size;
271 area->pages = NULL;
272 area->nr_pages = 0;
273 area->phys_addr = 0;
274 area->caller = caller;
275 write_unlock(&vmlist_lock);
277 return area;
279 out:
280 write_unlock(&vmlist_lock);
281 kfree(area);
282 if (printk_ratelimit())
283 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
284 return NULL;
287 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
288 unsigned long start, unsigned long end)
290 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
291 __builtin_return_address(0));
293 EXPORT_SYMBOL_GPL(__get_vm_area);
296 * get_vm_area - reserve a contiguous kernel virtual area
297 * @size: size of the area
298 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
300 * Search an area of @size in the kernel virtual mapping area,
301 * and reserved it for out purposes. Returns the area descriptor
302 * on success or %NULL on failure.
304 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
306 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
307 -1, GFP_KERNEL, __builtin_return_address(0));
310 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
311 void *caller)
313 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
314 -1, GFP_KERNEL, caller);
317 struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
318 int node, gfp_t gfp_mask)
320 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
321 gfp_mask, __builtin_return_address(0));
324 /* Caller must hold vmlist_lock */
325 static struct vm_struct *__find_vm_area(const void *addr)
327 struct vm_struct *tmp;
329 for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
330 if (tmp->addr == addr)
331 break;
334 return tmp;
337 /* Caller must hold vmlist_lock */
338 static struct vm_struct *__remove_vm_area(const void *addr)
340 struct vm_struct **p, *tmp;
342 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
343 if (tmp->addr == addr)
344 goto found;
346 return NULL;
348 found:
349 unmap_vm_area(tmp);
350 *p = tmp->next;
353 * Remove the guard page.
355 tmp->size -= PAGE_SIZE;
356 return tmp;
360 * remove_vm_area - find and remove a continuous kernel virtual area
361 * @addr: base address
363 * Search for the kernel VM area starting at @addr, and remove it.
364 * This function returns the found VM area, but using it is NOT safe
365 * on SMP machines, except for its size or flags.
367 struct vm_struct *remove_vm_area(const void *addr)
369 struct vm_struct *v;
370 write_lock(&vmlist_lock);
371 v = __remove_vm_area(addr);
372 write_unlock(&vmlist_lock);
373 return v;
376 static void __vunmap(const void *addr, int deallocate_pages)
378 struct vm_struct *area;
380 if (!addr)
381 return;
383 if ((PAGE_SIZE-1) & (unsigned long)addr) {
384 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
385 WARN_ON(1);
386 return;
389 area = remove_vm_area(addr);
390 if (unlikely(!area)) {
391 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
392 addr);
393 WARN_ON(1);
394 return;
397 debug_check_no_locks_freed(addr, area->size);
398 debug_check_no_obj_freed(addr, area->size);
400 if (deallocate_pages) {
401 int i;
403 for (i = 0; i < area->nr_pages; i++) {
404 struct page *page = area->pages[i];
406 BUG_ON(!page);
407 __free_page(page);
410 if (area->flags & VM_VPAGES)
411 vfree(area->pages);
412 else
413 kfree(area->pages);
416 kfree(area);
417 return;
421 * vfree - release memory allocated by vmalloc()
422 * @addr: memory base address
424 * Free the virtually continuous memory area starting at @addr, as
425 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
426 * NULL, no operation is performed.
428 * Must not be called in interrupt context.
430 void vfree(const void *addr)
432 BUG_ON(in_interrupt());
433 __vunmap(addr, 1);
435 EXPORT_SYMBOL(vfree);
438 * vunmap - release virtual mapping obtained by vmap()
439 * @addr: memory base address
441 * Free the virtually contiguous memory area starting at @addr,
442 * which was created from the page array passed to vmap().
444 * Must not be called in interrupt context.
446 void vunmap(const void *addr)
448 BUG_ON(in_interrupt());
449 __vunmap(addr, 0);
451 EXPORT_SYMBOL(vunmap);
454 * vmap - map an array of pages into virtually contiguous space
455 * @pages: array of page pointers
456 * @count: number of pages to map
457 * @flags: vm_area->flags
458 * @prot: page protection for the mapping
460 * Maps @count pages from @pages into contiguous kernel virtual
461 * space.
463 void *vmap(struct page **pages, unsigned int count,
464 unsigned long flags, pgprot_t prot)
466 struct vm_struct *area;
468 if (count > num_physpages)
469 return NULL;
471 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
472 __builtin_return_address(0));
473 if (!area)
474 return NULL;
476 if (map_vm_area(area, prot, &pages)) {
477 vunmap(area->addr);
478 return NULL;
481 return area->addr;
483 EXPORT_SYMBOL(vmap);
485 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
486 pgprot_t prot, int node, void *caller)
488 struct page **pages;
489 unsigned int nr_pages, array_size, i;
491 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
492 array_size = (nr_pages * sizeof(struct page *));
494 area->nr_pages = nr_pages;
495 /* Please note that the recursion is strictly bounded. */
496 if (array_size > PAGE_SIZE) {
497 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
498 PAGE_KERNEL, node, caller);
499 area->flags |= VM_VPAGES;
500 } else {
501 pages = kmalloc_node(array_size,
502 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
503 node);
505 area->pages = pages;
506 area->caller = caller;
507 if (!area->pages) {
508 remove_vm_area(area->addr);
509 kfree(area);
510 return NULL;
513 for (i = 0; i < area->nr_pages; i++) {
514 struct page *page;
516 if (node < 0)
517 page = alloc_page(gfp_mask);
518 else
519 page = alloc_pages_node(node, gfp_mask, 0);
521 if (unlikely(!page)) {
522 /* Successfully allocated i pages, free them in __vunmap() */
523 area->nr_pages = i;
524 goto fail;
526 area->pages[i] = page;
529 if (map_vm_area(area, prot, &pages))
530 goto fail;
531 return area->addr;
533 fail:
534 vfree(area->addr);
535 return NULL;
538 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
540 return __vmalloc_area_node(area, gfp_mask, prot, -1,
541 __builtin_return_address(0));
545 * __vmalloc_node - allocate virtually contiguous memory
546 * @size: allocation size
547 * @gfp_mask: flags for the page level allocator
548 * @prot: protection mask for the allocated pages
549 * @node: node to use for allocation or -1
550 * @caller: caller's return address
552 * Allocate enough pages to cover @size from the page level
553 * allocator with @gfp_mask flags. Map them into contiguous
554 * kernel virtual space, using a pagetable protection of @prot.
556 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
557 int node, void *caller)
559 struct vm_struct *area;
561 size = PAGE_ALIGN(size);
562 if (!size || (size >> PAGE_SHIFT) > num_physpages)
563 return NULL;
565 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
566 node, gfp_mask, caller);
568 if (!area)
569 return NULL;
571 return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
574 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
576 return __vmalloc_node(size, gfp_mask, prot, -1,
577 __builtin_return_address(0));
579 EXPORT_SYMBOL(__vmalloc);
582 * vmalloc - allocate virtually contiguous memory
583 * @size: allocation size
584 * Allocate enough pages to cover @size from the page level
585 * allocator and map them into contiguous kernel virtual space.
587 * For tight control over page level allocator and protection flags
588 * use __vmalloc() instead.
590 void *vmalloc(unsigned long size)
592 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
593 -1, __builtin_return_address(0));
595 EXPORT_SYMBOL(vmalloc);
598 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
599 * @size: allocation size
601 * The resulting memory area is zeroed so it can be mapped to userspace
602 * without leaking data.
604 void *vmalloc_user(unsigned long size)
606 struct vm_struct *area;
607 void *ret;
609 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
610 if (ret) {
611 write_lock(&vmlist_lock);
612 area = __find_vm_area(ret);
613 area->flags |= VM_USERMAP;
614 write_unlock(&vmlist_lock);
616 return ret;
618 EXPORT_SYMBOL(vmalloc_user);
621 * vmalloc_node - allocate memory on a specific node
622 * @size: allocation size
623 * @node: numa node
625 * Allocate enough pages to cover @size from the page level
626 * allocator and map them into contiguous kernel virtual space.
628 * For tight control over page level allocator and protection flags
629 * use __vmalloc() instead.
631 void *vmalloc_node(unsigned long size, int node)
633 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
634 node, __builtin_return_address(0));
636 EXPORT_SYMBOL(vmalloc_node);
638 #ifndef PAGE_KERNEL_EXEC
639 # define PAGE_KERNEL_EXEC PAGE_KERNEL
640 #endif
643 * vmalloc_exec - allocate virtually contiguous, executable memory
644 * @size: allocation size
646 * Kernel-internal function to allocate enough pages to cover @size
647 * the page level allocator and map them into contiguous and
648 * executable kernel virtual space.
650 * For tight control over page level allocator and protection flags
651 * use __vmalloc() instead.
654 void *vmalloc_exec(unsigned long size)
656 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
659 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
660 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
661 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
662 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
663 #else
664 #define GFP_VMALLOC32 GFP_KERNEL
665 #endif
668 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
669 * @size: allocation size
671 * Allocate enough 32bit PA addressable pages to cover @size from the
672 * page level allocator and map them into contiguous kernel virtual space.
674 void *vmalloc_32(unsigned long size)
676 return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
678 EXPORT_SYMBOL(vmalloc_32);
681 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
682 * @size: allocation size
684 * The resulting memory area is 32bit addressable and zeroed so it can be
685 * mapped to userspace without leaking data.
687 void *vmalloc_32_user(unsigned long size)
689 struct vm_struct *area;
690 void *ret;
692 ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
693 if (ret) {
694 write_lock(&vmlist_lock);
695 area = __find_vm_area(ret);
696 area->flags |= VM_USERMAP;
697 write_unlock(&vmlist_lock);
699 return ret;
701 EXPORT_SYMBOL(vmalloc_32_user);
703 long vread(char *buf, char *addr, unsigned long count)
705 struct vm_struct *tmp;
706 char *vaddr, *buf_start = buf;
707 unsigned long n;
709 /* Don't allow overflow */
710 if ((unsigned long) addr + count < count)
711 count = -(unsigned long) addr;
713 read_lock(&vmlist_lock);
714 for (tmp = vmlist; tmp; tmp = tmp->next) {
715 vaddr = (char *) tmp->addr;
716 if (addr >= vaddr + tmp->size - PAGE_SIZE)
717 continue;
718 while (addr < vaddr) {
719 if (count == 0)
720 goto finished;
721 *buf = '\0';
722 buf++;
723 addr++;
724 count--;
726 n = vaddr + tmp->size - PAGE_SIZE - addr;
727 do {
728 if (count == 0)
729 goto finished;
730 *buf = *addr;
731 buf++;
732 addr++;
733 count--;
734 } while (--n > 0);
736 finished:
737 read_unlock(&vmlist_lock);
738 return buf - buf_start;
741 long vwrite(char *buf, char *addr, unsigned long count)
743 struct vm_struct *tmp;
744 char *vaddr, *buf_start = buf;
745 unsigned long n;
747 /* Don't allow overflow */
748 if ((unsigned long) addr + count < count)
749 count = -(unsigned long) addr;
751 read_lock(&vmlist_lock);
752 for (tmp = vmlist; tmp; tmp = tmp->next) {
753 vaddr = (char *) tmp->addr;
754 if (addr >= vaddr + tmp->size - PAGE_SIZE)
755 continue;
756 while (addr < vaddr) {
757 if (count == 0)
758 goto finished;
759 buf++;
760 addr++;
761 count--;
763 n = vaddr + tmp->size - PAGE_SIZE - addr;
764 do {
765 if (count == 0)
766 goto finished;
767 *addr = *buf;
768 buf++;
769 addr++;
770 count--;
771 } while (--n > 0);
773 finished:
774 read_unlock(&vmlist_lock);
775 return buf - buf_start;
779 * remap_vmalloc_range - map vmalloc pages to userspace
780 * @vma: vma to cover (map full range of vma)
781 * @addr: vmalloc memory
782 * @pgoff: number of pages into addr before first page to map
784 * Returns: 0 for success, -Exxx on failure
786 * This function checks that addr is a valid vmalloc'ed area, and
787 * that it is big enough to cover the vma. Will return failure if
788 * that criteria isn't met.
790 * Similar to remap_pfn_range() (see mm/memory.c)
792 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
793 unsigned long pgoff)
795 struct vm_struct *area;
796 unsigned long uaddr = vma->vm_start;
797 unsigned long usize = vma->vm_end - vma->vm_start;
798 int ret;
800 if ((PAGE_SIZE-1) & (unsigned long)addr)
801 return -EINVAL;
803 read_lock(&vmlist_lock);
804 area = __find_vm_area(addr);
805 if (!area)
806 goto out_einval_locked;
808 if (!(area->flags & VM_USERMAP))
809 goto out_einval_locked;
811 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
812 goto out_einval_locked;
813 read_unlock(&vmlist_lock);
815 addr += pgoff << PAGE_SHIFT;
816 do {
817 struct page *page = vmalloc_to_page(addr);
818 ret = vm_insert_page(vma, uaddr, page);
819 if (ret)
820 return ret;
822 uaddr += PAGE_SIZE;
823 addr += PAGE_SIZE;
824 usize -= PAGE_SIZE;
825 } while (usize > 0);
827 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
828 vma->vm_flags |= VM_RESERVED;
830 return ret;
832 out_einval_locked:
833 read_unlock(&vmlist_lock);
834 return -EINVAL;
836 EXPORT_SYMBOL(remap_vmalloc_range);
839 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
840 * have one.
842 void __attribute__((weak)) vmalloc_sync_all(void)
847 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
849 /* apply_to_page_range() does all the hard work. */
850 return 0;
854 * alloc_vm_area - allocate a range of kernel address space
855 * @size: size of the area
857 * Returns: NULL on failure, vm_struct on success
859 * This function reserves a range of kernel address space, and
860 * allocates pagetables to map that range. No actual mappings
861 * are created. If the kernel address space is not shared
862 * between processes, it syncs the pagetable across all
863 * processes.
865 struct vm_struct *alloc_vm_area(size_t size)
867 struct vm_struct *area;
869 area = get_vm_area_caller(size, VM_IOREMAP,
870 __builtin_return_address(0));
871 if (area == NULL)
872 return NULL;
875 * This ensures that page tables are constructed for this region
876 * of kernel virtual address space and mapped into init_mm.
878 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
879 area->size, f, NULL)) {
880 free_vm_area(area);
881 return NULL;
884 /* Make sure the pagetables are constructed in process kernel
885 mappings */
886 vmalloc_sync_all();
888 return area;
890 EXPORT_SYMBOL_GPL(alloc_vm_area);
892 void free_vm_area(struct vm_struct *area)
894 struct vm_struct *ret;
895 ret = remove_vm_area(area->addr);
896 BUG_ON(ret != area);
897 kfree(area);
899 EXPORT_SYMBOL_GPL(free_vm_area);
902 #ifdef CONFIG_PROC_FS
903 static void *s_start(struct seq_file *m, loff_t *pos)
905 loff_t n = *pos;
906 struct vm_struct *v;
908 read_lock(&vmlist_lock);
909 v = vmlist;
910 while (n > 0 && v) {
911 n--;
912 v = v->next;
914 if (!n)
915 return v;
917 return NULL;
921 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
923 struct vm_struct *v = p;
925 ++*pos;
926 return v->next;
929 static void s_stop(struct seq_file *m, void *p)
931 read_unlock(&vmlist_lock);
934 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
936 if (NUMA_BUILD) {
937 unsigned int nr, *counters = m->private;
939 if (!counters)
940 return;
942 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
944 for (nr = 0; nr < v->nr_pages; nr++)
945 counters[page_to_nid(v->pages[nr])]++;
947 for_each_node_state(nr, N_HIGH_MEMORY)
948 if (counters[nr])
949 seq_printf(m, " N%u=%u", nr, counters[nr]);
953 static int s_show(struct seq_file *m, void *p)
955 struct vm_struct *v = p;
957 seq_printf(m, "0x%p-0x%p %7ld",
958 v->addr, v->addr + v->size, v->size);
960 if (v->caller) {
961 char buff[2 * KSYM_NAME_LEN];
963 seq_putc(m, ' ');
964 sprint_symbol(buff, (unsigned long)v->caller);
965 seq_puts(m, buff);
968 if (v->nr_pages)
969 seq_printf(m, " pages=%d", v->nr_pages);
971 if (v->phys_addr)
972 seq_printf(m, " phys=%lx", v->phys_addr);
974 if (v->flags & VM_IOREMAP)
975 seq_printf(m, " ioremap");
977 if (v->flags & VM_ALLOC)
978 seq_printf(m, " vmalloc");
980 if (v->flags & VM_MAP)
981 seq_printf(m, " vmap");
983 if (v->flags & VM_USERMAP)
984 seq_printf(m, " user");
986 if (v->flags & VM_VPAGES)
987 seq_printf(m, " vpages");
989 show_numa_info(m, v);
990 seq_putc(m, '\n');
991 return 0;
994 const struct seq_operations vmalloc_op = {
995 .start = s_start,
996 .next = s_next,
997 .stop = s_stop,
998 .show = s_show,
1000 #endif