o kernel/ksyms.c: move remaining EXPORT_SYMBOLs, remove this file from the tree
[linux-2.6/history.git] / mm / vmalloc.c
blob2d3022d2c859910d7e023cd41dfe9c20aa4b9495
1 /*
2 * linux/mm/vmalloc.c
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 */
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
17 #include <linux/vmalloc.h>
19 #include <asm/uaccess.h>
20 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
24 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
25 struct vm_struct *vmlist;
27 static void unmap_area_pte(pmd_t *pmd, unsigned long address,
28 unsigned long size)
30 unsigned long end;
31 pte_t *pte;
33 if (pmd_none(*pmd))
34 return;
35 if (pmd_bad(*pmd)) {
36 pmd_ERROR(*pmd);
37 pmd_clear(pmd);
38 return;
41 pte = pte_offset_kernel(pmd, address);
42 address &= ~PMD_MASK;
43 end = address + size;
44 if (end > PMD_SIZE)
45 end = PMD_SIZE;
47 do {
48 pte_t page;
49 page = ptep_get_and_clear(pte);
50 address += PAGE_SIZE;
51 pte++;
52 if (pte_none(page))
53 continue;
54 if (pte_present(page))
55 continue;
56 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
57 } while (address < end);
60 static void unmap_area_pmd(pgd_t *dir, unsigned long address,
61 unsigned long size)
63 unsigned long end;
64 pmd_t *pmd;
66 if (pgd_none(*dir))
67 return;
68 if (pgd_bad(*dir)) {
69 pgd_ERROR(*dir);
70 pgd_clear(dir);
71 return;
74 pmd = pmd_offset(dir, address);
75 address &= ~PGDIR_MASK;
76 end = address + size;
77 if (end > PGDIR_SIZE)
78 end = PGDIR_SIZE;
80 do {
81 unmap_area_pte(pmd, address, end - address);
82 address = (address + PMD_SIZE) & PMD_MASK;
83 pmd++;
84 } while (address < end);
87 static int map_area_pte(pte_t *pte, unsigned long address,
88 unsigned long size, pgprot_t prot,
89 struct page ***pages)
91 unsigned long end;
93 address &= ~PMD_MASK;
94 end = address + size;
95 if (end > PMD_SIZE)
96 end = PMD_SIZE;
98 do {
99 struct page *page = **pages;
101 WARN_ON(!pte_none(*pte));
102 if (!page)
103 return -ENOMEM;
105 set_pte(pte, mk_pte(page, prot));
106 address += PAGE_SIZE;
107 pte++;
108 (*pages)++;
109 } while (address < end);
110 return 0;
113 static int map_area_pmd(pmd_t *pmd, unsigned long address,
114 unsigned long size, pgprot_t prot,
115 struct page ***pages)
117 unsigned long end;
119 address &= ~PGDIR_MASK;
120 end = address + size;
121 if (end > PGDIR_SIZE)
122 end = PGDIR_SIZE;
124 do {
125 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
126 if (!pte)
127 return -ENOMEM;
128 if (map_area_pte(pte, address, end - address, prot, pages))
129 return -ENOMEM;
130 address = (address + PMD_SIZE) & PMD_MASK;
131 pmd++;
132 } while (address < end);
134 return 0;
137 void unmap_vm_area(struct vm_struct *area)
139 unsigned long address = (unsigned long) area->addr;
140 unsigned long end = (address + area->size);
141 pgd_t *dir;
143 dir = pgd_offset_k(address);
144 flush_cache_vunmap(address, end);
145 do {
146 unmap_area_pmd(dir, address, end - address);
147 address = (address + PGDIR_SIZE) & PGDIR_MASK;
148 dir++;
149 } while (address && (address < end));
150 flush_tlb_kernel_range((unsigned long) area->addr, end);
153 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
155 unsigned long address = (unsigned long) area->addr;
156 unsigned long end = address + (area->size-PAGE_SIZE);
157 pgd_t *dir;
158 int err = 0;
160 dir = pgd_offset_k(address);
161 spin_lock(&init_mm.page_table_lock);
162 do {
163 pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
164 if (!pmd) {
165 err = -ENOMEM;
166 break;
168 if (map_area_pmd(pmd, address, end - address, prot, pages)) {
169 err = -ENOMEM;
170 break;
173 address = (address + PGDIR_SIZE) & PGDIR_MASK;
174 dir++;
175 } while (address && (address < end));
177 spin_unlock(&init_mm.page_table_lock);
178 flush_cache_vmap((unsigned long) area->addr, end);
179 return err;
182 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
183 unsigned long start, unsigned long end)
185 struct vm_struct **p, *tmp, *area;
186 unsigned long addr = start;
188 area = kmalloc(sizeof(*area), GFP_KERNEL);
189 if (unlikely(!area))
190 return NULL;
193 * We always allocate a guard page.
195 size += PAGE_SIZE;
196 if (unlikely(!size)) {
197 kfree (area);
198 return NULL;
201 write_lock(&vmlist_lock);
202 for (p = &vmlist; (tmp = *p) ;p = &tmp->next) {
203 if ((unsigned long)tmp->addr < addr)
204 continue;
205 if ((size + addr) < addr)
206 goto out;
207 if (size + addr <= (unsigned long)tmp->addr)
208 goto found;
209 addr = tmp->size + (unsigned long)tmp->addr;
210 if (addr > end - size)
211 goto out;
214 found:
215 area->next = *p;
216 *p = area;
218 area->flags = flags;
219 area->addr = (void *)addr;
220 area->size = size;
221 area->pages = NULL;
222 area->nr_pages = 0;
223 area->phys_addr = 0;
224 write_unlock(&vmlist_lock);
226 return area;
228 out:
229 write_unlock(&vmlist_lock);
230 kfree(area);
231 return NULL;
235 * get_vm_area - reserve a contingous kernel virtual area
237 * @size: size of the area
238 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
240 * Search an area of @size in the kernel virtual mapping area,
241 * and reserved it for out purposes. Returns the area descriptor
242 * on success or %NULL on failure.
244 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
246 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
250 * remove_vm_area - find and remove a contingous kernel virtual area
252 * @addr: base address
254 * Search for the kernel VM area starting at @addr, and remove it.
255 * This function returns the found VM area, but using it is NOT safe
256 * on SMP machines.
258 struct vm_struct *remove_vm_area(void *addr)
260 struct vm_struct **p, *tmp;
262 write_lock(&vmlist_lock);
263 for (p = &vmlist ; (tmp = *p) ;p = &tmp->next) {
264 if (tmp->addr == addr)
265 goto found;
267 write_unlock(&vmlist_lock);
268 return NULL;
270 found:
271 unmap_vm_area(tmp);
272 *p = tmp->next;
273 write_unlock(&vmlist_lock);
274 return tmp;
277 void __vunmap(void *addr, int deallocate_pages)
279 struct vm_struct *area;
281 if (!addr)
282 return;
284 if ((PAGE_SIZE-1) & (unsigned long)addr) {
285 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
286 return;
289 area = remove_vm_area(addr);
290 if (unlikely(!area)) {
291 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
292 addr);
293 return;
296 if (deallocate_pages) {
297 int i;
299 for (i = 0; i < area->nr_pages; i++) {
300 if (unlikely(!area->pages[i]))
301 BUG();
302 __free_page(area->pages[i]);
305 kfree(area->pages);
308 kfree(area);
309 return;
313 * vfree - release memory allocated by vmalloc()
315 * @addr: memory base address
317 * Free the virtually contiguous memory area starting at @addr, as
318 * obtained from vmalloc(), vmalloc_32() or __vmalloc().
320 * May not be called in interrupt context.
322 void vfree(void *addr)
324 BUG_ON(in_interrupt());
325 __vunmap(addr, 1);
328 EXPORT_SYMBOL(vfree);
331 * vunmap - release virtual mapping obtained by vmap()
333 * @addr: memory base address
335 * Free the virtually contiguous memory area starting at @addr,
336 * which was created from the page array passed to vmap().
338 * May not be called in interrupt context.
340 void vunmap(void *addr)
342 BUG_ON(in_interrupt());
343 __vunmap(addr, 0);
346 EXPORT_SYMBOL(vunmap);
349 * vmap - map an array of pages into virtually contiguous space
351 * @pages: array of page pointers
352 * @count: number of pages to map
353 * @flags: vm_area->flags
354 * @prot: page protection for the mapping
356 * Maps @count pages from @pages into contiguous kernel virtual
357 * space.
359 void *vmap(struct page **pages, unsigned int count,
360 unsigned long flags, pgprot_t prot)
362 struct vm_struct *area;
364 if (count > num_physpages)
365 return NULL;
367 area = get_vm_area((count << PAGE_SHIFT), flags);
368 if (!area)
369 return NULL;
370 if (map_vm_area(area, prot, &pages)) {
371 vunmap(area->addr);
372 return NULL;
375 return area->addr;
378 EXPORT_SYMBOL(vmap);
381 * __vmalloc - allocate virtually contiguous memory
383 * @size: allocation size
384 * @gfp_mask: flags for the page level allocator
385 * @prot: protection mask for the allocated pages
387 * Allocate enough pages to cover @size from the page level
388 * allocator with @gfp_mask flags. Map them into contiguous
389 * kernel virtual space, using a pagetable protection of @prot.
391 void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
393 struct vm_struct *area;
394 struct page **pages;
395 unsigned int nr_pages, array_size, i;
397 size = PAGE_ALIGN(size);
398 if (!size || (size >> PAGE_SHIFT) > num_physpages)
399 return NULL;
401 area = get_vm_area(size, VM_ALLOC);
402 if (!area)
403 return NULL;
405 nr_pages = size >> PAGE_SHIFT;
406 array_size = (nr_pages * sizeof(struct page *));
408 area->nr_pages = nr_pages;
409 area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
410 if (!area->pages) {
411 remove_vm_area(area->addr);
412 kfree(area);
413 return NULL;
415 memset(area->pages, 0, array_size);
417 for (i = 0; i < area->nr_pages; i++) {
418 area->pages[i] = alloc_page(gfp_mask);
419 if (unlikely(!area->pages[i])) {
420 /* Successfully allocated i pages, free them in __vunmap() */
421 area->nr_pages = i;
422 goto fail;
426 if (map_vm_area(area, prot, &pages))
427 goto fail;
428 return area->addr;
430 fail:
431 vfree(area->addr);
432 return NULL;
435 EXPORT_SYMBOL(__vmalloc);
438 * vmalloc - allocate virtually contiguous memory
440 * @size: allocation size
442 * Allocate enough pages to cover @size from the page level
443 * allocator and map them into contiguous kernel virtual space.
445 * For tight cotrol over page level allocator and protection flags
446 * use __vmalloc() instead.
448 void *vmalloc(unsigned long size)
450 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
453 EXPORT_SYMBOL(vmalloc);
456 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
458 * @size: allocation size
460 * Allocate enough 32bit PA addressable pages to cover @size from the
461 * page level allocator and map them into contiguous kernel virtual space.
463 void *vmalloc_32(unsigned long size)
465 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
468 EXPORT_SYMBOL(vmalloc_32);
470 long vread(char *buf, char *addr, unsigned long count)
472 struct vm_struct *tmp;
473 char *vaddr, *buf_start = buf;
474 unsigned long n;
476 /* Don't allow overflow */
477 if ((unsigned long) addr + count < count)
478 count = -(unsigned long) addr;
480 read_lock(&vmlist_lock);
481 for (tmp = vmlist; tmp; tmp = tmp->next) {
482 vaddr = (char *) tmp->addr;
483 if (addr >= vaddr + tmp->size - PAGE_SIZE)
484 continue;
485 while (addr < vaddr) {
486 if (count == 0)
487 goto finished;
488 *buf = '\0';
489 buf++;
490 addr++;
491 count--;
493 n = vaddr + tmp->size - PAGE_SIZE - addr;
494 do {
495 if (count == 0)
496 goto finished;
497 *buf = *addr;
498 buf++;
499 addr++;
500 count--;
501 } while (--n > 0);
503 finished:
504 read_unlock(&vmlist_lock);
505 return buf - buf_start;
508 long vwrite(char *buf, char *addr, unsigned long count)
510 struct vm_struct *tmp;
511 char *vaddr, *buf_start = buf;
512 unsigned long n;
514 /* Don't allow overflow */
515 if ((unsigned long) addr + count < count)
516 count = -(unsigned long) addr;
518 read_lock(&vmlist_lock);
519 for (tmp = vmlist; tmp; tmp = tmp->next) {
520 vaddr = (char *) tmp->addr;
521 if (addr >= vaddr + tmp->size - PAGE_SIZE)
522 continue;
523 while (addr < vaddr) {
524 if (count == 0)
525 goto finished;
526 buf++;
527 addr++;
528 count--;
530 n = vaddr + tmp->size - PAGE_SIZE - addr;
531 do {
532 if (count == 0)
533 goto finished;
534 *addr = *buf;
535 buf++;
536 addr++;
537 count--;
538 } while (--n > 0);
540 finished:
541 read_unlock(&vmlist_lock);
542 return buf - buf_start;