[PATCH] prism54: Align skb patch
[linux-2.6/history.git] / mm / vmalloc.c
blob16e6f88c23ab5af3c89179ffa468def4f2be8c3a
1 /*
2 * linux/mm/vmalloc.c
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 */
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
17 #include <linux/vmalloc.h>
19 #include <asm/uaccess.h>
20 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
24 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
25 struct vm_struct *vmlist;
27 static void unmap_area_pte(pmd_t *pmd, unsigned long address,
28 unsigned long size)
30 unsigned long end;
31 pte_t *pte;
33 if (pmd_none(*pmd))
34 return;
35 if (pmd_bad(*pmd)) {
36 pmd_ERROR(*pmd);
37 pmd_clear(pmd);
38 return;
41 pte = pte_offset_kernel(pmd, address);
42 address &= ~PMD_MASK;
43 end = address + size;
44 if (end > PMD_SIZE)
45 end = PMD_SIZE;
47 do {
48 pte_t page;
49 page = ptep_get_and_clear(pte);
50 address += PAGE_SIZE;
51 pte++;
52 if (pte_none(page))
53 continue;
54 if (pte_present(page))
55 continue;
56 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
57 } while (address < end);
60 static void unmap_area_pmd(pgd_t *dir, unsigned long address,
61 unsigned long size)
63 unsigned long end;
64 pmd_t *pmd;
66 if (pgd_none(*dir))
67 return;
68 if (pgd_bad(*dir)) {
69 pgd_ERROR(*dir);
70 pgd_clear(dir);
71 return;
74 pmd = pmd_offset(dir, address);
75 address &= ~PGDIR_MASK;
76 end = address + size;
77 if (end > PGDIR_SIZE)
78 end = PGDIR_SIZE;
80 do {
81 unmap_area_pte(pmd, address, end - address);
82 address = (address + PMD_SIZE) & PMD_MASK;
83 pmd++;
84 } while (address < end);
87 static int map_area_pte(pte_t *pte, unsigned long address,
88 unsigned long size, pgprot_t prot,
89 struct page ***pages)
91 unsigned long end;
93 address &= ~PMD_MASK;
94 end = address + size;
95 if (end > PMD_SIZE)
96 end = PMD_SIZE;
98 do {
99 struct page *page = **pages;
101 WARN_ON(!pte_none(*pte));
102 if (!page)
103 return -ENOMEM;
105 set_pte(pte, mk_pte(page, prot));
106 address += PAGE_SIZE;
107 pte++;
108 (*pages)++;
109 } while (address < end);
110 return 0;
113 static int map_area_pmd(pmd_t *pmd, unsigned long address,
114 unsigned long size, pgprot_t prot,
115 struct page ***pages)
117 unsigned long base, end;
119 base = address & PGDIR_MASK;
120 address &= ~PGDIR_MASK;
121 end = address + size;
122 if (end > PGDIR_SIZE)
123 end = PGDIR_SIZE;
125 do {
126 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, base + address);
127 if (!pte)
128 return -ENOMEM;
129 if (map_area_pte(pte, address, end - address, prot, pages))
130 return -ENOMEM;
131 address = (address + PMD_SIZE) & PMD_MASK;
132 pmd++;
133 } while (address < end);
135 return 0;
138 void unmap_vm_area(struct vm_struct *area)
140 unsigned long address = (unsigned long) area->addr;
141 unsigned long end = (address + area->size);
142 pgd_t *dir;
144 dir = pgd_offset_k(address);
145 flush_cache_vunmap(address, end);
146 do {
147 unmap_area_pmd(dir, address, end - address);
148 address = (address + PGDIR_SIZE) & PGDIR_MASK;
149 dir++;
150 } while (address && (address < end));
151 flush_tlb_kernel_range((unsigned long) area->addr, end);
154 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
156 unsigned long address = (unsigned long) area->addr;
157 unsigned long end = address + (area->size-PAGE_SIZE);
158 pgd_t *dir;
159 int err = 0;
161 dir = pgd_offset_k(address);
162 spin_lock(&init_mm.page_table_lock);
163 do {
164 pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
165 if (!pmd) {
166 err = -ENOMEM;
167 break;
169 if (map_area_pmd(pmd, address, end - address, prot, pages)) {
170 err = -ENOMEM;
171 break;
174 address = (address + PGDIR_SIZE) & PGDIR_MASK;
175 dir++;
176 } while (address && (address < end));
178 spin_unlock(&init_mm.page_table_lock);
179 flush_cache_vmap((unsigned long) area->addr, end);
180 return err;
183 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
184 unsigned long start, unsigned long end)
186 struct vm_struct **p, *tmp, *area;
187 unsigned long addr = start;
189 area = kmalloc(sizeof(*area), GFP_KERNEL);
190 if (unlikely(!area))
191 return NULL;
194 * We always allocate a guard page.
196 size += PAGE_SIZE;
197 if (unlikely(!size)) {
198 kfree (area);
199 return NULL;
202 write_lock(&vmlist_lock);
203 for (p = &vmlist; (tmp = *p) ;p = &tmp->next) {
204 if ((unsigned long)tmp->addr < addr)
205 continue;
206 if ((size + addr) < addr)
207 goto out;
208 if (size + addr <= (unsigned long)tmp->addr)
209 goto found;
210 addr = tmp->size + (unsigned long)tmp->addr;
211 if (addr > end - size)
212 goto out;
215 found:
216 area->next = *p;
217 *p = area;
219 area->flags = flags;
220 area->addr = (void *)addr;
221 area->size = size;
222 area->pages = NULL;
223 area->nr_pages = 0;
224 area->phys_addr = 0;
225 write_unlock(&vmlist_lock);
227 return area;
229 out:
230 write_unlock(&vmlist_lock);
231 kfree(area);
232 return NULL;
236 * get_vm_area - reserve a contingous kernel virtual area
238 * @size: size of the area
239 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
241 * Search an area of @size in the kernel virtual mapping area,
242 * and reserved it for out purposes. Returns the area descriptor
243 * on success or %NULL on failure.
245 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
247 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
251 * remove_vm_area - find and remove a contingous kernel virtual area
253 * @addr: base address
255 * Search for the kernel VM area starting at @addr, and remove it.
256 * This function returns the found VM area, but using it is NOT safe
257 * on SMP machines.
259 struct vm_struct *remove_vm_area(void *addr)
261 struct vm_struct **p, *tmp;
263 write_lock(&vmlist_lock);
264 for (p = &vmlist ; (tmp = *p) ;p = &tmp->next) {
265 if (tmp->addr == addr)
266 goto found;
268 write_unlock(&vmlist_lock);
269 return NULL;
271 found:
272 unmap_vm_area(tmp);
273 *p = tmp->next;
274 write_unlock(&vmlist_lock);
275 return tmp;
278 void __vunmap(void *addr, int deallocate_pages)
280 struct vm_struct *area;
282 if (!addr)
283 return;
285 if ((PAGE_SIZE-1) & (unsigned long)addr) {
286 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
287 WARN_ON(1);
288 return;
291 area = remove_vm_area(addr);
292 if (unlikely(!area)) {
293 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
294 addr);
295 WARN_ON(1);
296 return;
299 if (deallocate_pages) {
300 int i;
302 for (i = 0; i < area->nr_pages; i++) {
303 if (unlikely(!area->pages[i]))
304 BUG();
305 __free_page(area->pages[i]);
308 kfree(area->pages);
311 kfree(area);
312 return;
316 * vfree - release memory allocated by vmalloc()
318 * @addr: memory base address
320 * Free the virtually contiguous memory area starting at @addr, as
321 * obtained from vmalloc(), vmalloc_32() or __vmalloc().
323 * May not be called in interrupt context.
325 void vfree(void *addr)
327 BUG_ON(in_interrupt());
328 __vunmap(addr, 1);
331 EXPORT_SYMBOL(vfree);
334 * vunmap - release virtual mapping obtained by vmap()
336 * @addr: memory base address
338 * Free the virtually contiguous memory area starting at @addr,
339 * which was created from the page array passed to vmap().
341 * May not be called in interrupt context.
343 void vunmap(void *addr)
345 BUG_ON(in_interrupt());
346 __vunmap(addr, 0);
349 EXPORT_SYMBOL(vunmap);
352 * vmap - map an array of pages into virtually contiguous space
354 * @pages: array of page pointers
355 * @count: number of pages to map
356 * @flags: vm_area->flags
357 * @prot: page protection for the mapping
359 * Maps @count pages from @pages into contiguous kernel virtual
360 * space.
362 void *vmap(struct page **pages, unsigned int count,
363 unsigned long flags, pgprot_t prot)
365 struct vm_struct *area;
367 if (count > num_physpages)
368 return NULL;
370 area = get_vm_area((count << PAGE_SHIFT), flags);
371 if (!area)
372 return NULL;
373 if (map_vm_area(area, prot, &pages)) {
374 vunmap(area->addr);
375 return NULL;
378 return area->addr;
381 EXPORT_SYMBOL(vmap);
384 * __vmalloc - allocate virtually contiguous memory
386 * @size: allocation size
387 * @gfp_mask: flags for the page level allocator
388 * @prot: protection mask for the allocated pages
390 * Allocate enough pages to cover @size from the page level
391 * allocator with @gfp_mask flags. Map them into contiguous
392 * kernel virtual space, using a pagetable protection of @prot.
394 void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
396 struct vm_struct *area;
397 struct page **pages;
398 unsigned int nr_pages, array_size, i;
400 size = PAGE_ALIGN(size);
401 if (!size || (size >> PAGE_SHIFT) > num_physpages)
402 return NULL;
404 area = get_vm_area(size, VM_ALLOC);
405 if (!area)
406 return NULL;
408 nr_pages = size >> PAGE_SHIFT;
409 array_size = (nr_pages * sizeof(struct page *));
411 area->nr_pages = nr_pages;
412 area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
413 if (!area->pages) {
414 remove_vm_area(area->addr);
415 kfree(area);
416 return NULL;
418 memset(area->pages, 0, array_size);
420 for (i = 0; i < area->nr_pages; i++) {
421 area->pages[i] = alloc_page(gfp_mask);
422 if (unlikely(!area->pages[i])) {
423 /* Successfully allocated i pages, free them in __vunmap() */
424 area->nr_pages = i;
425 goto fail;
429 if (map_vm_area(area, prot, &pages))
430 goto fail;
431 return area->addr;
433 fail:
434 vfree(area->addr);
435 return NULL;
438 EXPORT_SYMBOL(__vmalloc);
441 * vmalloc - allocate virtually contiguous memory
443 * @size: allocation size
445 * Allocate enough pages to cover @size from the page level
446 * allocator and map them into contiguous kernel virtual space.
448 * For tight cotrol over page level allocator and protection flags
449 * use __vmalloc() instead.
451 void *vmalloc(unsigned long size)
453 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
456 EXPORT_SYMBOL(vmalloc);
459 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
461 * @size: allocation size
463 * Allocate enough 32bit PA addressable pages to cover @size from the
464 * page level allocator and map them into contiguous kernel virtual space.
466 void *vmalloc_32(unsigned long size)
468 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
471 EXPORT_SYMBOL(vmalloc_32);
473 long vread(char *buf, char *addr, unsigned long count)
475 struct vm_struct *tmp;
476 char *vaddr, *buf_start = buf;
477 unsigned long n;
479 /* Don't allow overflow */
480 if ((unsigned long) addr + count < count)
481 count = -(unsigned long) addr;
483 read_lock(&vmlist_lock);
484 for (tmp = vmlist; tmp; tmp = tmp->next) {
485 vaddr = (char *) tmp->addr;
486 if (addr >= vaddr + tmp->size - PAGE_SIZE)
487 continue;
488 while (addr < vaddr) {
489 if (count == 0)
490 goto finished;
491 *buf = '\0';
492 buf++;
493 addr++;
494 count--;
496 n = vaddr + tmp->size - PAGE_SIZE - addr;
497 do {
498 if (count == 0)
499 goto finished;
500 *buf = *addr;
501 buf++;
502 addr++;
503 count--;
504 } while (--n > 0);
506 finished:
507 read_unlock(&vmlist_lock);
508 return buf - buf_start;
511 long vwrite(char *buf, char *addr, unsigned long count)
513 struct vm_struct *tmp;
514 char *vaddr, *buf_start = buf;
515 unsigned long n;
517 /* Don't allow overflow */
518 if ((unsigned long) addr + count < count)
519 count = -(unsigned long) addr;
521 read_lock(&vmlist_lock);
522 for (tmp = vmlist; tmp; tmp = tmp->next) {
523 vaddr = (char *) tmp->addr;
524 if (addr >= vaddr + tmp->size - PAGE_SIZE)
525 continue;
526 while (addr < vaddr) {
527 if (count == 0)
528 goto finished;
529 buf++;
530 addr++;
531 count--;
533 n = vaddr + tmp->size - PAGE_SIZE - addr;
534 do {
535 if (count == 0)
536 goto finished;
537 *addr = *buf;
538 buf++;
539 addr++;
540 count--;
541 } while (--n > 0);
543 finished:
544 read_unlock(&vmlist_lock);
545 return buf - buf_start;