MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / mm / vmalloc.c
blob4093deae381b93efa1ed2e89c9f10c98e0b0b3b7
1 /*
2 * linux/mm/vmalloc.c
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 */
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
17 #include <linux/vmalloc.h>
19 #include <asm/uaccess.h>
20 #include <asm/tlbflush.h>
23 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
24 struct vm_struct *vmlist;
26 static void unmap_area_pte(pmd_t *pmd, unsigned long address,
27 unsigned long size)
29 unsigned long end;
30 pte_t *pte;
32 if (pmd_none(*pmd))
33 return;
34 if (pmd_bad(*pmd)) {
35 pmd_ERROR(*pmd);
36 pmd_clear(pmd);
37 return;
40 pte = pte_offset_kernel(pmd, address);
41 address &= ~PMD_MASK;
42 end = address + size;
43 if (end > PMD_SIZE)
44 end = PMD_SIZE;
46 do {
47 pte_t page;
48 page = ptep_get_and_clear(pte);
49 address += PAGE_SIZE;
50 pte++;
51 if (pte_none(page))
52 continue;
53 if (pte_present(page))
54 continue;
55 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
56 } while (address < end);
59 static void unmap_area_pmd(pgd_t *dir, unsigned long address,
60 unsigned long size)
62 unsigned long end;
63 pmd_t *pmd;
65 if (pgd_none(*dir))
66 return;
67 if (pgd_bad(*dir)) {
68 pgd_ERROR(*dir);
69 pgd_clear(dir);
70 return;
73 pmd = pmd_offset(dir, address);
74 address &= ~PGDIR_MASK;
75 end = address + size;
76 if (end > PGDIR_SIZE)
77 end = PGDIR_SIZE;
79 do {
80 unmap_area_pte(pmd, address, end - address);
81 address = (address + PMD_SIZE) & PMD_MASK;
82 pmd++;
83 } while (address < end);
86 static int map_area_pte(pte_t *pte, unsigned long address,
87 unsigned long size, pgprot_t prot,
88 struct page ***pages)
90 unsigned long end;
92 address &= ~PMD_MASK;
93 end = address + size;
94 if (end > PMD_SIZE)
95 end = PMD_SIZE;
97 do {
98 struct page *page = **pages;
100 WARN_ON(!pte_none(*pte));
101 if (!page)
102 return -ENOMEM;
104 set_pte(pte, mk_pte(page, prot));
105 address += PAGE_SIZE;
106 pte++;
107 (*pages)++;
108 } while (address < end);
109 return 0;
112 static int map_area_pmd(pmd_t *pmd, unsigned long address,
113 unsigned long size, pgprot_t prot,
114 struct page ***pages)
116 unsigned long base, end;
118 base = address & PGDIR_MASK;
119 address &= ~PGDIR_MASK;
120 end = address + size;
121 if (end > PGDIR_SIZE)
122 end = PGDIR_SIZE;
124 do {
125 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, base + address);
126 if (!pte)
127 return -ENOMEM;
128 if (map_area_pte(pte, address, end - address, prot, pages))
129 return -ENOMEM;
130 address = (address + PMD_SIZE) & PMD_MASK;
131 pmd++;
132 } while (address < end);
134 return 0;
137 void unmap_vm_area(struct vm_struct *area)
139 unsigned long address = (unsigned long) area->addr;
140 unsigned long end = (address + area->size);
141 pgd_t *dir;
143 dir = pgd_offset_k(address);
144 flush_cache_vunmap(address, end);
145 do {
146 unmap_area_pmd(dir, address, end - address);
147 address = (address + PGDIR_SIZE) & PGDIR_MASK;
148 dir++;
149 } while (address && (address < end));
150 flush_tlb_kernel_range((unsigned long) area->addr, end);
153 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
155 unsigned long address = (unsigned long) area->addr;
156 unsigned long end = address + (area->size-PAGE_SIZE);
157 pgd_t *dir;
158 int err = 0;
160 dir = pgd_offset_k(address);
161 spin_lock(&init_mm.page_table_lock);
162 do {
163 pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
164 if (!pmd) {
165 err = -ENOMEM;
166 break;
168 if (map_area_pmd(pmd, address, end - address, prot, pages)) {
169 err = -ENOMEM;
170 break;
173 address = (address + PGDIR_SIZE) & PGDIR_MASK;
174 dir++;
175 } while (address && (address < end));
177 spin_unlock(&init_mm.page_table_lock);
178 flush_cache_vmap((unsigned long) area->addr, end);
179 return err;
182 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
184 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
185 unsigned long start, unsigned long end)
187 struct vm_struct **p, *tmp, *area;
188 unsigned long align = 1;
189 unsigned long addr;
191 if (flags & VM_IOREMAP) {
192 int bit = fls(size);
194 if (bit > IOREMAP_MAX_ORDER)
195 bit = IOREMAP_MAX_ORDER;
196 else if (bit < PAGE_SHIFT)
197 bit = PAGE_SHIFT;
199 align = 1ul << bit;
201 addr = ALIGN(start, align);
203 area = kmalloc(sizeof(*area), GFP_KERNEL);
204 if (unlikely(!area))
205 return NULL;
208 * We always allocate a guard page.
210 size += PAGE_SIZE;
211 if (unlikely(!size)) {
212 kfree (area);
213 return NULL;
216 write_lock(&vmlist_lock);
217 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
218 if ((unsigned long)tmp->addr < addr) {
219 if((unsigned long)tmp->addr + tmp->size >= addr)
220 addr = ALIGN(tmp->size +
221 (unsigned long)tmp->addr, align);
222 continue;
224 if ((size + addr) < addr)
225 goto out;
226 if (size + addr <= (unsigned long)tmp->addr)
227 goto found;
228 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
229 if (addr > end - size)
230 goto out;
233 found:
234 area->next = *p;
235 *p = area;
237 area->flags = flags;
238 area->addr = (void *)addr;
239 area->size = size;
240 area->pages = NULL;
241 area->nr_pages = 0;
242 area->phys_addr = 0;
243 write_unlock(&vmlist_lock);
245 return area;
247 out:
248 write_unlock(&vmlist_lock);
249 kfree(area);
250 if (printk_ratelimit())
251 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
252 return NULL;
256 * get_vm_area - reserve a contingous kernel virtual area
258 * @size: size of the area
259 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
261 * Search an area of @size in the kernel virtual mapping area,
262 * and reserved it for out purposes. Returns the area descriptor
263 * on success or %NULL on failure.
265 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
267 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
271 * remove_vm_area - find and remove a contingous kernel virtual area
273 * @addr: base address
275 * Search for the kernel VM area starting at @addr, and remove it.
276 * This function returns the found VM area, but using it is NOT safe
277 * on SMP machines.
279 struct vm_struct *remove_vm_area(void *addr)
281 struct vm_struct **p, *tmp;
283 write_lock(&vmlist_lock);
284 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
285 if (tmp->addr == addr)
286 goto found;
288 write_unlock(&vmlist_lock);
289 return NULL;
291 found:
292 unmap_vm_area(tmp);
293 *p = tmp->next;
294 write_unlock(&vmlist_lock);
295 return tmp;
298 void __vunmap(void *addr, int deallocate_pages)
300 struct vm_struct *area;
302 if (!addr)
303 return;
305 if ((PAGE_SIZE-1) & (unsigned long)addr) {
306 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
307 WARN_ON(1);
308 return;
311 area = remove_vm_area(addr);
312 if (unlikely(!area)) {
313 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
314 addr);
315 WARN_ON(1);
316 return;
319 if (deallocate_pages) {
320 int i;
322 for (i = 0; i < area->nr_pages; i++) {
323 if (unlikely(!area->pages[i]))
324 BUG();
325 __free_page(area->pages[i]);
328 kfree(area->pages);
331 kfree(area);
332 return;
336 * vfree - release memory allocated by vmalloc()
338 * @addr: memory base address
340 * Free the virtually contiguous memory area starting at @addr, as
341 * obtained from vmalloc(), vmalloc_32() or __vmalloc().
343 * May not be called in interrupt context.
345 void vfree(void *addr)
347 BUG_ON(in_interrupt());
348 __vunmap(addr, 1);
351 EXPORT_SYMBOL(vfree);
354 * vunmap - release virtual mapping obtained by vmap()
356 * @addr: memory base address
358 * Free the virtually contiguous memory area starting at @addr,
359 * which was created from the page array passed to vmap().
361 * May not be called in interrupt context.
363 void vunmap(void *addr)
365 BUG_ON(in_interrupt());
366 __vunmap(addr, 0);
369 EXPORT_SYMBOL(vunmap);
372 * vmap - map an array of pages into virtually contiguous space
374 * @pages: array of page pointers
375 * @count: number of pages to map
376 * @flags: vm_area->flags
377 * @prot: page protection for the mapping
379 * Maps @count pages from @pages into contiguous kernel virtual
380 * space.
382 void *vmap(struct page **pages, unsigned int count,
383 unsigned long flags, pgprot_t prot)
385 struct vm_struct *area;
387 if (count > num_physpages)
388 return NULL;
390 area = get_vm_area((count << PAGE_SHIFT), flags);
391 if (!area)
392 return NULL;
393 if (map_vm_area(area, prot, &pages)) {
394 vunmap(area->addr);
395 return NULL;
398 return area->addr;
401 EXPORT_SYMBOL(vmap);
404 * __vmalloc - allocate virtually contiguous memory
406 * @size: allocation size
407 * @gfp_mask: flags for the page level allocator
408 * @prot: protection mask for the allocated pages
410 * Allocate enough pages to cover @size from the page level
411 * allocator with @gfp_mask flags. Map them into contiguous
412 * kernel virtual space, using a pagetable protection of @prot.
414 void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
416 struct vm_struct *area;
417 struct page **pages;
418 unsigned int nr_pages, array_size, i;
420 size = PAGE_ALIGN(size);
421 if (!size || (size >> PAGE_SHIFT) > num_physpages)
422 return NULL;
424 area = get_vm_area(size, VM_ALLOC);
425 if (!area)
426 return NULL;
428 nr_pages = size >> PAGE_SHIFT;
429 array_size = (nr_pages * sizeof(struct page *));
431 area->nr_pages = nr_pages;
432 area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
433 if (!area->pages) {
434 remove_vm_area(area->addr);
435 kfree(area);
436 return NULL;
438 memset(area->pages, 0, array_size);
440 for (i = 0; i < area->nr_pages; i++) {
441 area->pages[i] = alloc_page(gfp_mask);
442 if (unlikely(!area->pages[i])) {
443 /* Successfully allocated i pages, free them in __vunmap() */
444 area->nr_pages = i;
445 goto fail;
449 if (map_vm_area(area, prot, &pages))
450 goto fail;
451 return area->addr;
453 fail:
454 vfree(area->addr);
455 return NULL;
458 EXPORT_SYMBOL(__vmalloc);
461 * vmalloc - allocate virtually contiguous memory
463 * @size: allocation size
465 * Allocate enough pages to cover @size from the page level
466 * allocator and map them into contiguous kernel virtual space.
468 * For tight cotrol over page level allocator and protection flags
469 * use __vmalloc() instead.
471 void *vmalloc(unsigned long size)
473 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
476 EXPORT_SYMBOL(vmalloc);
479 * vmalloc_exec - allocate virtually contiguous, executable memory
481 * @size: allocation size
483 * Kernel-internal function to allocate enough pages to cover @size
484 * the page level allocator and map them into contiguous and
485 * executable kernel virtual space.
487 * For tight cotrol over page level allocator and protection flags
488 * use __vmalloc() instead.
491 #ifndef PAGE_KERNEL_EXEC
492 # define PAGE_KERNEL_EXEC PAGE_KERNEL
493 #endif
495 void *vmalloc_exec(unsigned long size)
497 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
501 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
503 * @size: allocation size
505 * Allocate enough 32bit PA addressable pages to cover @size from the
506 * page level allocator and map them into contiguous kernel virtual space.
508 void *vmalloc_32(unsigned long size)
510 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
513 EXPORT_SYMBOL(vmalloc_32);
515 long vread(char *buf, char *addr, unsigned long count)
517 struct vm_struct *tmp;
518 char *vaddr, *buf_start = buf;
519 unsigned long n;
521 /* Don't allow overflow */
522 if ((unsigned long) addr + count < count)
523 count = -(unsigned long) addr;
525 read_lock(&vmlist_lock);
526 for (tmp = vmlist; tmp; tmp = tmp->next) {
527 vaddr = (char *) tmp->addr;
528 if (addr >= vaddr + tmp->size - PAGE_SIZE)
529 continue;
530 while (addr < vaddr) {
531 if (count == 0)
532 goto finished;
533 *buf = '\0';
534 buf++;
535 addr++;
536 count--;
538 n = vaddr + tmp->size - PAGE_SIZE - addr;
539 do {
540 if (count == 0)
541 goto finished;
542 *buf = *addr;
543 buf++;
544 addr++;
545 count--;
546 } while (--n > 0);
548 finished:
549 read_unlock(&vmlist_lock);
550 return buf - buf_start;
553 long vwrite(char *buf, char *addr, unsigned long count)
555 struct vm_struct *tmp;
556 char *vaddr, *buf_start = buf;
557 unsigned long n;
559 /* Don't allow overflow */
560 if ((unsigned long) addr + count < count)
561 count = -(unsigned long) addr;
563 read_lock(&vmlist_lock);
564 for (tmp = vmlist; tmp; tmp = tmp->next) {
565 vaddr = (char *) tmp->addr;
566 if (addr >= vaddr + tmp->size - PAGE_SIZE)
567 continue;
568 while (addr < vaddr) {
569 if (count == 0)
570 goto finished;
571 buf++;
572 addr++;
573 count--;
575 n = vaddr + tmp->size - PAGE_SIZE - addr;
576 do {
577 if (count == 0)
578 goto finished;
579 *addr = *buf;
580 buf++;
581 addr++;
582 count--;
583 } while (--n > 0);
585 finished:
586 read_unlock(&vmlist_lock);
587 return buf - buf_start;