4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
11 #include <linux/module.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
17 #include <linux/vmalloc.h>
19 #include <asm/uaccess.h>
20 #include <asm/tlbflush.h>
23 rwlock_t vmlist_lock
= RW_LOCK_UNLOCKED
;
24 struct vm_struct
*vmlist
;
26 static void unmap_area_pte(pmd_t
*pmd
, unsigned long address
,
40 pte
= pte_offset_kernel(pmd
, address
);
48 page
= ptep_get_and_clear(pte
);
53 if (pte_present(page
))
55 printk(KERN_CRIT
"Whee.. Swapped out page in kernel page table\n");
56 } while (address
< end
);
59 static void unmap_area_pmd(pgd_t
*dir
, unsigned long address
,
73 pmd
= pmd_offset(dir
, address
);
74 address
&= ~PGDIR_MASK
;
80 unmap_area_pte(pmd
, address
, end
- address
);
81 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
83 } while (address
< end
);
86 static int map_area_pte(pte_t
*pte
, unsigned long address
,
87 unsigned long size
, pgprot_t prot
,
98 struct page
*page
= **pages
;
100 WARN_ON(!pte_none(*pte
));
104 set_pte(pte
, mk_pte(page
, prot
));
105 address
+= PAGE_SIZE
;
108 } while (address
< end
);
112 static int map_area_pmd(pmd_t
*pmd
, unsigned long address
,
113 unsigned long size
, pgprot_t prot
,
114 struct page
***pages
)
116 unsigned long base
, end
;
118 base
= address
& PGDIR_MASK
;
119 address
&= ~PGDIR_MASK
;
120 end
= address
+ size
;
121 if (end
> PGDIR_SIZE
)
125 pte_t
* pte
= pte_alloc_kernel(&init_mm
, pmd
, base
+ address
);
128 if (map_area_pte(pte
, address
, end
- address
, prot
, pages
))
130 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
132 } while (address
< end
);
137 void unmap_vm_area(struct vm_struct
*area
)
139 unsigned long address
= (unsigned long) area
->addr
;
140 unsigned long end
= (address
+ area
->size
);
143 dir
= pgd_offset_k(address
);
144 flush_cache_vunmap(address
, end
);
146 unmap_area_pmd(dir
, address
, end
- address
);
147 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
149 } while (address
&& (address
< end
));
150 flush_tlb_kernel_range((unsigned long) area
->addr
, end
);
153 int map_vm_area(struct vm_struct
*area
, pgprot_t prot
, struct page
***pages
)
155 unsigned long address
= (unsigned long) area
->addr
;
156 unsigned long end
= address
+ (area
->size
-PAGE_SIZE
);
160 dir
= pgd_offset_k(address
);
161 spin_lock(&init_mm
.page_table_lock
);
163 pmd_t
*pmd
= pmd_alloc(&init_mm
, dir
, address
);
168 if (map_area_pmd(pmd
, address
, end
- address
, prot
, pages
)) {
173 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
175 } while (address
&& (address
< end
));
177 spin_unlock(&init_mm
.page_table_lock
);
178 flush_cache_vmap((unsigned long) area
->addr
, end
);
182 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
184 struct vm_struct
*__get_vm_area(unsigned long size
, unsigned long flags
,
185 unsigned long start
, unsigned long end
)
187 struct vm_struct
**p
, *tmp
, *area
;
188 unsigned long align
= 1;
191 if (flags
& VM_IOREMAP
) {
194 if (bit
> IOREMAP_MAX_ORDER
)
195 bit
= IOREMAP_MAX_ORDER
;
196 else if (bit
< PAGE_SHIFT
)
201 addr
= ALIGN(start
, align
);
203 area
= kmalloc(sizeof(*area
), GFP_KERNEL
);
208 * We always allocate a guard page.
211 if (unlikely(!size
)) {
216 write_lock(&vmlist_lock
);
217 for (p
= &vmlist
; (tmp
= *p
) != NULL
;p
= &tmp
->next
) {
218 if ((unsigned long)tmp
->addr
< addr
) {
219 if((unsigned long)tmp
->addr
+ tmp
->size
>= addr
)
220 addr
= ALIGN(tmp
->size
+
221 (unsigned long)tmp
->addr
, align
);
224 if ((size
+ addr
) < addr
)
226 if (size
+ addr
<= (unsigned long)tmp
->addr
)
228 addr
= ALIGN(tmp
->size
+ (unsigned long)tmp
->addr
, align
);
229 if (addr
> end
- size
)
238 area
->addr
= (void *)addr
;
243 write_unlock(&vmlist_lock
);
248 write_unlock(&vmlist_lock
);
250 if (printk_ratelimit())
251 printk(KERN_WARNING
"allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
256 * get_vm_area - reserve a contingous kernel virtual area
258 * @size: size of the area
259 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
261 * Search an area of @size in the kernel virtual mapping area,
262 * and reserved it for out purposes. Returns the area descriptor
263 * on success or %NULL on failure.
265 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
267 return __get_vm_area(size
, flags
, VMALLOC_START
, VMALLOC_END
);
271 * remove_vm_area - find and remove a contingous kernel virtual area
273 * @addr: base address
275 * Search for the kernel VM area starting at @addr, and remove it.
276 * This function returns the found VM area, but using it is NOT safe
279 struct vm_struct
*remove_vm_area(void *addr
)
281 struct vm_struct
**p
, *tmp
;
283 write_lock(&vmlist_lock
);
284 for (p
= &vmlist
; (tmp
= *p
) != NULL
;p
= &tmp
->next
) {
285 if (tmp
->addr
== addr
)
288 write_unlock(&vmlist_lock
);
294 write_unlock(&vmlist_lock
);
298 void __vunmap(void *addr
, int deallocate_pages
)
300 struct vm_struct
*area
;
305 if ((PAGE_SIZE
-1) & (unsigned long)addr
) {
306 printk(KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
311 area
= remove_vm_area(addr
);
312 if (unlikely(!area
)) {
313 printk(KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
319 if (deallocate_pages
) {
322 for (i
= 0; i
< area
->nr_pages
; i
++) {
323 if (unlikely(!area
->pages
[i
]))
325 __free_page(area
->pages
[i
]);
336 * vfree - release memory allocated by vmalloc()
338 * @addr: memory base address
340 * Free the virtually contiguous memory area starting at @addr, as
341 * obtained from vmalloc(), vmalloc_32() or __vmalloc().
343 * May not be called in interrupt context.
345 void vfree(void *addr
)
347 BUG_ON(in_interrupt());
351 EXPORT_SYMBOL(vfree
);
354 * vunmap - release virtual mapping obtained by vmap()
356 * @addr: memory base address
358 * Free the virtually contiguous memory area starting at @addr,
359 * which was created from the page array passed to vmap().
361 * May not be called in interrupt context.
363 void vunmap(void *addr
)
365 BUG_ON(in_interrupt());
369 EXPORT_SYMBOL(vunmap
);
372 * vmap - map an array of pages into virtually contiguous space
374 * @pages: array of page pointers
375 * @count: number of pages to map
376 * @flags: vm_area->flags
377 * @prot: page protection for the mapping
379 * Maps @count pages from @pages into contiguous kernel virtual
382 void *vmap(struct page
**pages
, unsigned int count
,
383 unsigned long flags
, pgprot_t prot
)
385 struct vm_struct
*area
;
387 if (count
> num_physpages
)
390 area
= get_vm_area((count
<< PAGE_SHIFT
), flags
);
393 if (map_vm_area(area
, prot
, &pages
)) {
404 * __vmalloc - allocate virtually contiguous memory
406 * @size: allocation size
407 * @gfp_mask: flags for the page level allocator
408 * @prot: protection mask for the allocated pages
410 * Allocate enough pages to cover @size from the page level
411 * allocator with @gfp_mask flags. Map them into contiguous
412 * kernel virtual space, using a pagetable protection of @prot.
414 void *__vmalloc(unsigned long size
, int gfp_mask
, pgprot_t prot
)
416 struct vm_struct
*area
;
418 unsigned int nr_pages
, array_size
, i
;
420 size
= PAGE_ALIGN(size
);
421 if (!size
|| (size
>> PAGE_SHIFT
) > num_physpages
)
424 area
= get_vm_area(size
, VM_ALLOC
);
428 nr_pages
= size
>> PAGE_SHIFT
;
429 array_size
= (nr_pages
* sizeof(struct page
*));
431 area
->nr_pages
= nr_pages
;
432 area
->pages
= pages
= kmalloc(array_size
, (gfp_mask
& ~__GFP_HIGHMEM
));
434 remove_vm_area(area
->addr
);
438 memset(area
->pages
, 0, array_size
);
440 for (i
= 0; i
< area
->nr_pages
; i
++) {
441 area
->pages
[i
] = alloc_page(gfp_mask
);
442 if (unlikely(!area
->pages
[i
])) {
443 /* Successfully allocated i pages, free them in __vunmap() */
449 if (map_vm_area(area
, prot
, &pages
))
458 EXPORT_SYMBOL(__vmalloc
);
461 * vmalloc - allocate virtually contiguous memory
463 * @size: allocation size
465 * Allocate enough pages to cover @size from the page level
466 * allocator and map them into contiguous kernel virtual space.
468 * For tight cotrol over page level allocator and protection flags
469 * use __vmalloc() instead.
471 void *vmalloc(unsigned long size
)
473 return __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
);
476 EXPORT_SYMBOL(vmalloc
);
479 * vmalloc_exec - allocate virtually contiguous, executable memory
481 * @size: allocation size
483 * Kernel-internal function to allocate enough pages to cover @size
484 * the page level allocator and map them into contiguous and
485 * executable kernel virtual space.
487 * For tight cotrol over page level allocator and protection flags
488 * use __vmalloc() instead.
491 #ifndef PAGE_KERNEL_EXEC
492 # define PAGE_KERNEL_EXEC PAGE_KERNEL
495 void *vmalloc_exec(unsigned long size
)
497 return __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL_EXEC
);
501 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
503 * @size: allocation size
505 * Allocate enough 32bit PA addressable pages to cover @size from the
506 * page level allocator and map them into contiguous kernel virtual space.
508 void *vmalloc_32(unsigned long size
)
510 return __vmalloc(size
, GFP_KERNEL
, PAGE_KERNEL
);
513 EXPORT_SYMBOL(vmalloc_32
);
515 long vread(char *buf
, char *addr
, unsigned long count
)
517 struct vm_struct
*tmp
;
518 char *vaddr
, *buf_start
= buf
;
521 /* Don't allow overflow */
522 if ((unsigned long) addr
+ count
< count
)
523 count
= -(unsigned long) addr
;
525 read_lock(&vmlist_lock
);
526 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
527 vaddr
= (char *) tmp
->addr
;
528 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
530 while (addr
< vaddr
) {
538 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
549 read_unlock(&vmlist_lock
);
550 return buf
- buf_start
;
553 long vwrite(char *buf
, char *addr
, unsigned long count
)
555 struct vm_struct
*tmp
;
556 char *vaddr
, *buf_start
= buf
;
559 /* Don't allow overflow */
560 if ((unsigned long) addr
+ count
< count
)
561 count
= -(unsigned long) addr
;
563 read_lock(&vmlist_lock
);
564 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
565 vaddr
= (char *) tmp
->addr
;
566 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
568 while (addr
< vaddr
) {
575 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
586 read_unlock(&vmlist_lock
);
587 return buf
- buf_start
;