4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
11 #include <linux/highmem.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
16 #include <linux/vmalloc.h>
18 #include <asm/uaccess.h>
19 #include <asm/pgalloc.h>
20 #include <asm/tlbflush.h>
23 rwlock_t vmlist_lock
= RW_LOCK_UNLOCKED
;
24 struct vm_struct
*vmlist
;
26 static void unmap_area_pte(pmd_t
*pmd
, unsigned long address
,
40 pte
= pte_offset_kernel(pmd
, address
);
48 page
= ptep_get_and_clear(pte
);
53 if (pte_present(page
))
55 printk(KERN_CRIT
"Whee.. Swapped out page in kernel page table\n");
56 } while (address
< end
);
59 static void unmap_area_pmd(pgd_t
*dir
, unsigned long address
,
73 pmd
= pmd_offset(dir
, address
);
74 address
&= ~PGDIR_MASK
;
80 unmap_area_pte(pmd
, address
, end
- address
);
81 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
83 } while (address
< end
);
86 static int map_area_pte(pte_t
*pte
, unsigned long address
,
87 unsigned long size
, pgprot_t prot
,
98 struct page
*page
= **pages
;
100 WARN_ON(!pte_none(*pte
));
104 set_pte(pte
, mk_pte(page
, prot
));
105 address
+= PAGE_SIZE
;
108 } while (address
< end
);
112 static int map_area_pmd(pmd_t
*pmd
, unsigned long address
,
113 unsigned long size
, pgprot_t prot
,
114 struct page
***pages
)
118 address
&= ~PGDIR_MASK
;
119 end
= address
+ size
;
120 if (end
> PGDIR_SIZE
)
124 pte_t
* pte
= pte_alloc_kernel(&init_mm
, pmd
, address
);
127 if (map_area_pte(pte
, address
, end
- address
, prot
, pages
))
129 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
131 } while (address
< end
);
136 void unmap_vm_area(struct vm_struct
*area
)
138 unsigned long address
= VMALLOC_VMADDR(area
->addr
);
139 unsigned long end
= (address
+ area
->size
);
142 dir
= pgd_offset_k(address
);
145 unmap_area_pmd(dir
, address
, end
- address
);
146 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
148 } while (address
&& (address
< end
));
149 flush_tlb_kernel_range(VMALLOC_VMADDR(area
->addr
), end
);
152 int map_vm_area(struct vm_struct
*area
, pgprot_t prot
, struct page
***pages
)
154 unsigned long address
= VMALLOC_VMADDR(area
->addr
);
155 unsigned long end
= address
+ (area
->size
-PAGE_SIZE
);
159 dir
= pgd_offset_k(address
);
160 spin_lock(&init_mm
.page_table_lock
);
162 pmd_t
*pmd
= pmd_alloc(&init_mm
, dir
, address
);
167 if (map_area_pmd(pmd
, address
, end
- address
, prot
, pages
)) {
172 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
174 } while (address
&& (address
< end
));
176 spin_unlock(&init_mm
.page_table_lock
);
183 * get_vm_area - reserve a contingous kernel virtual area
185 * @size: size of the area
186 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
188 * Search an area of @size in the kernel virtual mapping area,
189 * and reserved it for out purposes. Returns the area descriptor
190 * on success or %NULL on failure.
192 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
194 struct vm_struct
**p
, *tmp
, *area
;
195 unsigned long addr
= VMALLOC_START
;
197 area
= kmalloc(sizeof(*area
), GFP_KERNEL
);
202 * We always allocate a guard page.
205 if (unlikely(!size
)) {
210 write_lock(&vmlist_lock
);
211 for (p
= &vmlist
; (tmp
= *p
) ;p
= &tmp
->next
) {
212 if ((size
+ addr
) < addr
)
214 if (size
+ addr
<= (unsigned long)tmp
->addr
)
216 addr
= tmp
->size
+ (unsigned long)tmp
->addr
;
217 if (addr
> VMALLOC_END
-size
)
226 area
->addr
= (void *)addr
;
231 write_unlock(&vmlist_lock
);
236 write_unlock(&vmlist_lock
);
242 * remove_vm_area - find and remove a contingous kernel virtual area
244 * @addr: base address
246 * Search for the kernel VM area starting at @addr, and remove it.
247 * This function returns the found VM area, but using it is NOT safe
250 struct vm_struct
*remove_vm_area(void *addr
)
252 struct vm_struct
**p
, *tmp
;
254 write_lock(&vmlist_lock
);
255 for (p
= &vmlist
; (tmp
= *p
) ;p
= &tmp
->next
) {
256 if (tmp
->addr
== addr
)
259 write_unlock(&vmlist_lock
);
265 write_unlock(&vmlist_lock
);
269 void __vunmap(void *addr
, int deallocate_pages
)
271 struct vm_struct
*area
;
276 if ((PAGE_SIZE
-1) & (unsigned long)addr
) {
277 printk(KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
281 area
= remove_vm_area(addr
);
282 if (unlikely(!area
)) {
283 printk(KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
288 if (deallocate_pages
) {
291 for (i
= 0; i
< area
->nr_pages
; i
++) {
292 if (unlikely(!area
->pages
[i
]))
294 __free_page(area
->pages
[i
]);
305 * vfree - release memory allocated by vmalloc()
307 * @addr: memory base address
309 * Free the virtually contiguous memory area starting at @addr, as
310 * obtained from vmalloc(), vmalloc_32() or __vmalloc().
312 * May not be called in interrupt context.
314 void vfree(void *addr
)
316 BUG_ON(in_interrupt());
321 * vunmap - release virtual mapping obtained by vmap()
323 * @addr: memory base address
325 * Free the virtually contiguous memory area starting at @addr,
326 * which was created from the page array passed to vmap().
328 * May not be called in interrupt context.
330 void vunmap(void *addr
)
332 BUG_ON(in_interrupt());
337 * vmap - map an array of pages into virtually contiguous space
339 * @pages: array of page pointers
340 * @count: number of pages to map
341 * @flags: vm_area->flags
342 * @prot: page protection for the mapping
344 * Maps @count pages from @pages into contiguous kernel virtual
347 void *vmap(struct page
**pages
, unsigned int count
,
348 unsigned long flags
, pgprot_t prot
)
350 struct vm_struct
*area
;
352 if (count
> num_physpages
)
355 area
= get_vm_area((count
<< PAGE_SHIFT
), flags
);
358 if (map_vm_area(area
, prot
, &pages
)) {
367 * __vmalloc - allocate virtually contiguous memory
369 * @size: allocation size
370 * @gfp_mask: flags for the page level allocator
371 * @prot: protection mask for the allocated pages
373 * Allocate enough pages to cover @size from the page level
374 * allocator with @gfp_mask flags. Map them into contiguous
375 * kernel virtual space, using a pagetable protection of @prot.
377 void *__vmalloc(unsigned long size
, int gfp_mask
, pgprot_t prot
)
379 struct vm_struct
*area
;
381 unsigned int nr_pages
, array_size
, i
;
383 size
= PAGE_ALIGN(size
);
384 if (!size
|| (size
>> PAGE_SHIFT
) > num_physpages
)
387 area
= get_vm_area(size
, VM_ALLOC
);
391 nr_pages
= size
>> PAGE_SHIFT
;
392 array_size
= (nr_pages
* sizeof(struct page
*));
394 area
->nr_pages
= nr_pages
;
395 area
->pages
= pages
= kmalloc(array_size
, (gfp_mask
& ~__GFP_HIGHMEM
));
397 remove_vm_area(area
->addr
);
401 memset(area
->pages
, 0, array_size
);
403 for (i
= 0; i
< area
->nr_pages
; i
++) {
404 area
->pages
[i
] = alloc_page(gfp_mask
);
405 if (unlikely(!area
->pages
[i
])) {
406 /* Successfully allocated i pages, free them in __vunmap() */
412 if (map_vm_area(area
, prot
, &pages
))
422 * vmalloc - allocate virtually contiguous memory
424 * @size: allocation size
426 * Allocate enough pages to cover @size from the page level
427 * allocator and map them into contiguous kernel virtual space.
429 * For tight cotrol over page level allocator and protection flags
430 * use __vmalloc() instead.
432 void *vmalloc(unsigned long size
)
434 return __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
);
438 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
440 * @size: allocation size
442 * Allocate enough 32bit PA addressable pages to cover @size from the
443 * page level allocator and map them into contiguous kernel virtual space.
445 void *vmalloc_32(unsigned long size
)
447 return __vmalloc(size
, GFP_KERNEL
, PAGE_KERNEL
);
450 long vread(char *buf
, char *addr
, unsigned long count
)
452 struct vm_struct
*tmp
;
453 char *vaddr
, *buf_start
= buf
;
456 /* Don't allow overflow */
457 if ((unsigned long) addr
+ count
< count
)
458 count
= -(unsigned long) addr
;
460 read_lock(&vmlist_lock
);
461 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
462 vaddr
= (char *) tmp
->addr
;
463 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
465 while (addr
< vaddr
) {
473 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
484 read_unlock(&vmlist_lock
);
485 return buf
- buf_start
;
488 long vwrite(char *buf
, char *addr
, unsigned long count
)
490 struct vm_struct
*tmp
;
491 char *vaddr
, *buf_start
= buf
;
494 /* Don't allow overflow */
495 if ((unsigned long) addr
+ count
< count
)
496 count
= -(unsigned long) addr
;
498 read_lock(&vmlist_lock
);
499 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
500 vaddr
= (char *) tmp
->addr
;
501 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
503 while (addr
< vaddr
) {
510 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
521 read_unlock(&vmlist_lock
);
522 return buf
- buf_start
;