4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
11 #include <linux/module.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
17 #include <linux/vmalloc.h>
19 #include <asm/uaccess.h>
20 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
24 rwlock_t vmlist_lock
= RW_LOCK_UNLOCKED
;
25 struct vm_struct
*vmlist
;
27 static void unmap_area_pte(pmd_t
*pmd
, unsigned long address
,
41 pte
= pte_offset_kernel(pmd
, address
);
49 page
= ptep_get_and_clear(pte
);
54 if (pte_present(page
))
56 printk(KERN_CRIT
"Whee.. Swapped out page in kernel page table\n");
57 } while (address
< end
);
60 static void unmap_area_pmd(pgd_t
*dir
, unsigned long address
,
74 pmd
= pmd_offset(dir
, address
);
75 address
&= ~PGDIR_MASK
;
81 unmap_area_pte(pmd
, address
, end
- address
);
82 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
84 } while (address
< end
);
87 static int map_area_pte(pte_t
*pte
, unsigned long address
,
88 unsigned long size
, pgprot_t prot
,
99 struct page
*page
= **pages
;
101 WARN_ON(!pte_none(*pte
));
105 set_pte(pte
, mk_pte(page
, prot
));
106 address
+= PAGE_SIZE
;
109 } while (address
< end
);
113 static int map_area_pmd(pmd_t
*pmd
, unsigned long address
,
114 unsigned long size
, pgprot_t prot
,
115 struct page
***pages
)
119 address
&= ~PGDIR_MASK
;
120 end
= address
+ size
;
121 if (end
> PGDIR_SIZE
)
125 pte_t
* pte
= pte_alloc_kernel(&init_mm
, pmd
, address
);
128 if (map_area_pte(pte
, address
, end
- address
, prot
, pages
))
130 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
132 } while (address
< end
);
137 void unmap_vm_area(struct vm_struct
*area
)
139 unsigned long address
= (unsigned long) area
->addr
;
140 unsigned long end
= (address
+ area
->size
);
143 dir
= pgd_offset_k(address
);
144 flush_cache_vunmap(address
, end
);
146 unmap_area_pmd(dir
, address
, end
- address
);
147 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
149 } while (address
&& (address
< end
));
150 flush_tlb_kernel_range((unsigned long) area
->addr
, end
);
153 int map_vm_area(struct vm_struct
*area
, pgprot_t prot
, struct page
***pages
)
155 unsigned long address
= (unsigned long) area
->addr
;
156 unsigned long end
= address
+ (area
->size
-PAGE_SIZE
);
160 dir
= pgd_offset_k(address
);
161 spin_lock(&init_mm
.page_table_lock
);
163 pmd_t
*pmd
= pmd_alloc(&init_mm
, dir
, address
);
168 if (map_area_pmd(pmd
, address
, end
- address
, prot
, pages
)) {
173 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
175 } while (address
&& (address
< end
));
177 spin_unlock(&init_mm
.page_table_lock
);
178 flush_cache_vmap((unsigned long) area
->addr
, end
);
182 struct vm_struct
*__get_vm_area(unsigned long size
, unsigned long flags
,
183 unsigned long start
, unsigned long end
)
185 struct vm_struct
**p
, *tmp
, *area
;
186 unsigned long addr
= start
;
188 area
= kmalloc(sizeof(*area
), GFP_KERNEL
);
193 * We always allocate a guard page.
196 if (unlikely(!size
)) {
201 write_lock(&vmlist_lock
);
202 for (p
= &vmlist
; (tmp
= *p
) ;p
= &tmp
->next
) {
203 if ((unsigned long)tmp
->addr
< addr
)
205 if ((size
+ addr
) < addr
)
207 if (size
+ addr
<= (unsigned long)tmp
->addr
)
209 addr
= tmp
->size
+ (unsigned long)tmp
->addr
;
210 if (addr
> end
- size
)
219 area
->addr
= (void *)addr
;
224 write_unlock(&vmlist_lock
);
229 write_unlock(&vmlist_lock
);
235 * get_vm_area - reserve a contingous kernel virtual area
237 * @size: size of the area
238 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
240 * Search an area of @size in the kernel virtual mapping area,
241 * and reserved it for out purposes. Returns the area descriptor
242 * on success or %NULL on failure.
244 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
246 return __get_vm_area(size
, flags
, VMALLOC_START
, VMALLOC_END
);
250 * remove_vm_area - find and remove a contingous kernel virtual area
252 * @addr: base address
254 * Search for the kernel VM area starting at @addr, and remove it.
255 * This function returns the found VM area, but using it is NOT safe
258 struct vm_struct
*remove_vm_area(void *addr
)
260 struct vm_struct
**p
, *tmp
;
262 write_lock(&vmlist_lock
);
263 for (p
= &vmlist
; (tmp
= *p
) ;p
= &tmp
->next
) {
264 if (tmp
->addr
== addr
)
267 write_unlock(&vmlist_lock
);
273 write_unlock(&vmlist_lock
);
277 void __vunmap(void *addr
, int deallocate_pages
)
279 struct vm_struct
*area
;
284 if ((PAGE_SIZE
-1) & (unsigned long)addr
) {
285 printk(KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
289 area
= remove_vm_area(addr
);
290 if (unlikely(!area
)) {
291 printk(KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
296 if (deallocate_pages
) {
299 for (i
= 0; i
< area
->nr_pages
; i
++) {
300 if (unlikely(!area
->pages
[i
]))
302 __free_page(area
->pages
[i
]);
313 * vfree - release memory allocated by vmalloc()
315 * @addr: memory base address
317 * Free the virtually contiguous memory area starting at @addr, as
318 * obtained from vmalloc(), vmalloc_32() or __vmalloc().
320 * May not be called in interrupt context.
322 void vfree(void *addr
)
324 BUG_ON(in_interrupt());
328 EXPORT_SYMBOL(vfree
);
331 * vunmap - release virtual mapping obtained by vmap()
333 * @addr: memory base address
335 * Free the virtually contiguous memory area starting at @addr,
336 * which was created from the page array passed to vmap().
338 * May not be called in interrupt context.
340 void vunmap(void *addr
)
342 BUG_ON(in_interrupt());
346 EXPORT_SYMBOL(vunmap
);
349 * vmap - map an array of pages into virtually contiguous space
351 * @pages: array of page pointers
352 * @count: number of pages to map
353 * @flags: vm_area->flags
354 * @prot: page protection for the mapping
356 * Maps @count pages from @pages into contiguous kernel virtual
359 void *vmap(struct page
**pages
, unsigned int count
,
360 unsigned long flags
, pgprot_t prot
)
362 struct vm_struct
*area
;
364 if (count
> num_physpages
)
367 area
= get_vm_area((count
<< PAGE_SHIFT
), flags
);
370 if (map_vm_area(area
, prot
, &pages
)) {
381 * __vmalloc - allocate virtually contiguous memory
383 * @size: allocation size
384 * @gfp_mask: flags for the page level allocator
385 * @prot: protection mask for the allocated pages
387 * Allocate enough pages to cover @size from the page level
388 * allocator with @gfp_mask flags. Map them into contiguous
389 * kernel virtual space, using a pagetable protection of @prot.
391 void *__vmalloc(unsigned long size
, int gfp_mask
, pgprot_t prot
)
393 struct vm_struct
*area
;
395 unsigned int nr_pages
, array_size
, i
;
397 size
= PAGE_ALIGN(size
);
398 if (!size
|| (size
>> PAGE_SHIFT
) > num_physpages
)
401 area
= get_vm_area(size
, VM_ALLOC
);
405 nr_pages
= size
>> PAGE_SHIFT
;
406 array_size
= (nr_pages
* sizeof(struct page
*));
408 area
->nr_pages
= nr_pages
;
409 area
->pages
= pages
= kmalloc(array_size
, (gfp_mask
& ~__GFP_HIGHMEM
));
411 remove_vm_area(area
->addr
);
415 memset(area
->pages
, 0, array_size
);
417 for (i
= 0; i
< area
->nr_pages
; i
++) {
418 area
->pages
[i
] = alloc_page(gfp_mask
);
419 if (unlikely(!area
->pages
[i
])) {
420 /* Successfully allocated i pages, free them in __vunmap() */
426 if (map_vm_area(area
, prot
, &pages
))
435 EXPORT_SYMBOL(__vmalloc
);
438 * vmalloc - allocate virtually contiguous memory
440 * @size: allocation size
442 * Allocate enough pages to cover @size from the page level
443 * allocator and map them into contiguous kernel virtual space.
445 * For tight cotrol over page level allocator and protection flags
446 * use __vmalloc() instead.
448 void *vmalloc(unsigned long size
)
450 return __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
);
453 EXPORT_SYMBOL(vmalloc
);
456 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
458 * @size: allocation size
460 * Allocate enough 32bit PA addressable pages to cover @size from the
461 * page level allocator and map them into contiguous kernel virtual space.
463 void *vmalloc_32(unsigned long size
)
465 return __vmalloc(size
, GFP_KERNEL
, PAGE_KERNEL
);
468 EXPORT_SYMBOL(vmalloc_32
);
470 long vread(char *buf
, char *addr
, unsigned long count
)
472 struct vm_struct
*tmp
;
473 char *vaddr
, *buf_start
= buf
;
476 /* Don't allow overflow */
477 if ((unsigned long) addr
+ count
< count
)
478 count
= -(unsigned long) addr
;
480 read_lock(&vmlist_lock
);
481 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
482 vaddr
= (char *) tmp
->addr
;
483 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
485 while (addr
< vaddr
) {
493 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
504 read_unlock(&vmlist_lock
);
505 return buf
- buf_start
;
508 long vwrite(char *buf
, char *addr
, unsigned long count
)
510 struct vm_struct
*tmp
;
511 char *vaddr
, *buf_start
= buf
;
514 /* Don't allow overflow */
515 if ((unsigned long) addr
+ count
< count
)
516 count
= -(unsigned long) addr
;
518 read_lock(&vmlist_lock
);
519 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
520 vaddr
= (char *) tmp
->addr
;
521 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
523 while (addr
< vaddr
) {
530 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
541 read_unlock(&vmlist_lock
);
542 return buf
- buf_start
;