[PATCH] Add a driver for the Technisat Skystar2 DVB card
[linux-2.6/history.git] / mm / vmalloc.c
blobd746a85391ffd2ae61fb769a6209b02ab0540cd7
1 /*
2 * linux/mm/vmalloc.c
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 */
10 #include <linux/mm.h>
11 #include <linux/highmem.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
16 #include <linux/vmalloc.h>
18 #include <asm/uaccess.h>
19 #include <asm/pgalloc.h>
20 #include <asm/tlbflush.h>
23 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
24 struct vm_struct *vmlist;
26 static void unmap_area_pte(pmd_t *pmd, unsigned long address,
27 unsigned long size)
29 unsigned long end;
30 pte_t *pte;
32 if (pmd_none(*pmd))
33 return;
34 if (pmd_bad(*pmd)) {
35 pmd_ERROR(*pmd);
36 pmd_clear(pmd);
37 return;
40 pte = pte_offset_kernel(pmd, address);
41 address &= ~PMD_MASK;
42 end = address + size;
43 if (end > PMD_SIZE)
44 end = PMD_SIZE;
46 do {
47 pte_t page;
48 page = ptep_get_and_clear(pte);
49 address += PAGE_SIZE;
50 pte++;
51 if (pte_none(page))
52 continue;
53 if (pte_present(page))
54 continue;
55 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
56 } while (address < end);
59 static void unmap_area_pmd(pgd_t *dir, unsigned long address,
60 unsigned long size)
62 unsigned long end;
63 pmd_t *pmd;
65 if (pgd_none(*dir))
66 return;
67 if (pgd_bad(*dir)) {
68 pgd_ERROR(*dir);
69 pgd_clear(dir);
70 return;
73 pmd = pmd_offset(dir, address);
74 address &= ~PGDIR_MASK;
75 end = address + size;
76 if (end > PGDIR_SIZE)
77 end = PGDIR_SIZE;
79 do {
80 unmap_area_pte(pmd, address, end - address);
81 address = (address + PMD_SIZE) & PMD_MASK;
82 pmd++;
83 } while (address < end);
86 static int map_area_pte(pte_t *pte, unsigned long address,
87 unsigned long size, pgprot_t prot,
88 struct page ***pages)
90 unsigned long end;
92 address &= ~PMD_MASK;
93 end = address + size;
94 if (end > PMD_SIZE)
95 end = PMD_SIZE;
97 do {
98 struct page *page = **pages;
100 WARN_ON(!pte_none(*pte));
101 if (!page)
102 return -ENOMEM;
104 set_pte(pte, mk_pte(page, prot));
105 address += PAGE_SIZE;
106 pte++;
107 (*pages)++;
108 } while (address < end);
109 return 0;
112 static int map_area_pmd(pmd_t *pmd, unsigned long address,
113 unsigned long size, pgprot_t prot,
114 struct page ***pages)
116 unsigned long end;
118 address &= ~PGDIR_MASK;
119 end = address + size;
120 if (end > PGDIR_SIZE)
121 end = PGDIR_SIZE;
123 do {
124 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
125 if (!pte)
126 return -ENOMEM;
127 if (map_area_pte(pte, address, end - address, prot, pages))
128 return -ENOMEM;
129 address = (address + PMD_SIZE) & PMD_MASK;
130 pmd++;
131 } while (address < end);
133 return 0;
136 void unmap_vm_area(struct vm_struct *area)
138 unsigned long address = VMALLOC_VMADDR(area->addr);
139 unsigned long end = (address + area->size);
140 pgd_t *dir;
142 dir = pgd_offset_k(address);
143 flush_cache_all();
144 do {
145 unmap_area_pmd(dir, address, end - address);
146 address = (address + PGDIR_SIZE) & PGDIR_MASK;
147 dir++;
148 } while (address && (address < end));
149 flush_tlb_kernel_range(VMALLOC_VMADDR(area->addr), end);
152 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
154 unsigned long address = VMALLOC_VMADDR(area->addr);
155 unsigned long end = address + (area->size-PAGE_SIZE);
156 pgd_t *dir;
157 int err = 0;
159 dir = pgd_offset_k(address);
160 spin_lock(&init_mm.page_table_lock);
161 do {
162 pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
163 if (!pmd) {
164 err = -ENOMEM;
165 break;
167 if (map_area_pmd(pmd, address, end - address, prot, pages)) {
168 err = -ENOMEM;
169 break;
172 address = (address + PGDIR_SIZE) & PGDIR_MASK;
173 dir++;
174 } while (address && (address < end));
176 spin_unlock(&init_mm.page_table_lock);
177 flush_cache_all();
178 return err;
183 * get_vm_area - reserve a contingous kernel virtual area
185 * @size: size of the area
186 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
188 * Search an area of @size in the kernel virtual mapping area,
189 * and reserved it for out purposes. Returns the area descriptor
190 * on success or %NULL on failure.
192 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
194 struct vm_struct **p, *tmp, *area;
195 unsigned long addr = VMALLOC_START;
197 area = kmalloc(sizeof(*area), GFP_KERNEL);
198 if (unlikely(!area))
199 return NULL;
202 * We always allocate a guard page.
204 size += PAGE_SIZE;
205 if (unlikely(!size)) {
206 kfree (area);
207 return NULL;
210 write_lock(&vmlist_lock);
211 for (p = &vmlist; (tmp = *p) ;p = &tmp->next) {
212 if ((size + addr) < addr)
213 goto out;
214 if (size + addr <= (unsigned long)tmp->addr)
215 goto found;
216 addr = tmp->size + (unsigned long)tmp->addr;
217 if (addr > VMALLOC_END-size)
218 goto out;
221 found:
222 area->next = *p;
223 *p = area;
225 area->flags = flags;
226 area->addr = (void *)addr;
227 area->size = size;
228 area->pages = NULL;
229 area->nr_pages = 0;
230 area->phys_addr = 0;
231 write_unlock(&vmlist_lock);
233 return area;
235 out:
236 write_unlock(&vmlist_lock);
237 kfree(area);
238 return NULL;
242 * remove_vm_area - find and remove a contingous kernel virtual area
244 * @addr: base address
246 * Search for the kernel VM area starting at @addr, and remove it.
247 * This function returns the found VM area, but using it is NOT safe
248 * on SMP machines.
250 struct vm_struct *remove_vm_area(void *addr)
252 struct vm_struct **p, *tmp;
254 write_lock(&vmlist_lock);
255 for (p = &vmlist ; (tmp = *p) ;p = &tmp->next) {
256 if (tmp->addr == addr)
257 goto found;
259 write_unlock(&vmlist_lock);
260 return NULL;
262 found:
263 unmap_vm_area(tmp);
264 *p = tmp->next;
265 write_unlock(&vmlist_lock);
266 return tmp;
269 void __vunmap(void *addr, int deallocate_pages)
271 struct vm_struct *area;
273 if (!addr)
274 return;
276 if ((PAGE_SIZE-1) & (unsigned long)addr) {
277 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
278 return;
281 area = remove_vm_area(addr);
282 if (unlikely(!area)) {
283 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
284 addr);
285 return;
288 if (deallocate_pages) {
289 int i;
291 for (i = 0; i < area->nr_pages; i++) {
292 if (unlikely(!area->pages[i]))
293 BUG();
294 __free_page(area->pages[i]);
297 kfree(area->pages);
300 kfree(area);
301 return;
305 * vfree - release memory allocated by vmalloc()
307 * @addr: memory base address
309 * Free the virtually contiguous memory area starting at @addr, as
310 * obtained from vmalloc(), vmalloc_32() or __vmalloc().
312 * May not be called in interrupt context.
314 void vfree(void *addr)
316 BUG_ON(in_interrupt());
317 __vunmap(addr, 1);
321 * vunmap - release virtual mapping obtained by vmap()
323 * @addr: memory base address
325 * Free the virtually contiguous memory area starting at @addr,
326 * which was created from the page array passed to vmap().
328 * May not be called in interrupt context.
330 void vunmap(void *addr)
332 BUG_ON(in_interrupt());
333 __vunmap(addr, 0);
337 * vmap - map an array of pages into virtually contiguous space
339 * @pages: array of page pointers
340 * @count: number of pages to map
341 * @flags: vm_area->flags
342 * @prot: page protection for the mapping
344 * Maps @count pages from @pages into contiguous kernel virtual
345 * space.
347 void *vmap(struct page **pages, unsigned int count,
348 unsigned long flags, pgprot_t prot)
350 struct vm_struct *area;
352 if (count > num_physpages)
353 return NULL;
355 area = get_vm_area((count << PAGE_SHIFT), flags);
356 if (!area)
357 return NULL;
358 if (map_vm_area(area, prot, &pages)) {
359 vunmap(area->addr);
360 return NULL;
363 return area->addr;
367 * __vmalloc - allocate virtually contiguous memory
369 * @size: allocation size
370 * @gfp_mask: flags for the page level allocator
371 * @prot: protection mask for the allocated pages
373 * Allocate enough pages to cover @size from the page level
374 * allocator with @gfp_mask flags. Map them into contiguous
375 * kernel virtual space, using a pagetable protection of @prot.
377 void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
379 struct vm_struct *area;
380 struct page **pages;
381 unsigned int nr_pages, array_size, i;
383 size = PAGE_ALIGN(size);
384 if (!size || (size >> PAGE_SHIFT) > num_physpages)
385 return NULL;
387 area = get_vm_area(size, VM_ALLOC);
388 if (!area)
389 return NULL;
391 nr_pages = size >> PAGE_SHIFT;
392 array_size = (nr_pages * sizeof(struct page *));
394 area->nr_pages = nr_pages;
395 area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
396 if (!area->pages) {
397 remove_vm_area(area->addr);
398 kfree(area);
399 return NULL;
401 memset(area->pages, 0, array_size);
403 for (i = 0; i < area->nr_pages; i++) {
404 area->pages[i] = alloc_page(gfp_mask);
405 if (unlikely(!area->pages[i])) {
406 /* Successfully allocated i pages, free them in __vunmap() */
407 area->nr_pages = i;
408 goto fail;
412 if (map_vm_area(area, prot, &pages))
413 goto fail;
414 return area->addr;
416 fail:
417 vfree(area->addr);
418 return NULL;
422 * vmalloc - allocate virtually contiguous memory
424 * @size: allocation size
426 * Allocate enough pages to cover @size from the page level
427 * allocator and map them into contiguous kernel virtual space.
429 * For tight cotrol over page level allocator and protection flags
430 * use __vmalloc() instead.
432 void *vmalloc(unsigned long size)
434 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
438 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
440 * @size: allocation size
442 * Allocate enough 32bit PA addressable pages to cover @size from the
443 * page level allocator and map them into contiguous kernel virtual space.
445 void *vmalloc_32(unsigned long size)
447 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
450 long vread(char *buf, char *addr, unsigned long count)
452 struct vm_struct *tmp;
453 char *vaddr, *buf_start = buf;
454 unsigned long n;
456 /* Don't allow overflow */
457 if ((unsigned long) addr + count < count)
458 count = -(unsigned long) addr;
460 read_lock(&vmlist_lock);
461 for (tmp = vmlist; tmp; tmp = tmp->next) {
462 vaddr = (char *) tmp->addr;
463 if (addr >= vaddr + tmp->size - PAGE_SIZE)
464 continue;
465 while (addr < vaddr) {
466 if (count == 0)
467 goto finished;
468 *buf = '\0';
469 buf++;
470 addr++;
471 count--;
473 n = vaddr + tmp->size - PAGE_SIZE - addr;
474 do {
475 if (count == 0)
476 goto finished;
477 *buf = *addr;
478 buf++;
479 addr++;
480 count--;
481 } while (--n > 0);
483 finished:
484 read_unlock(&vmlist_lock);
485 return buf - buf_start;
488 long vwrite(char *buf, char *addr, unsigned long count)
490 struct vm_struct *tmp;
491 char *vaddr, *buf_start = buf;
492 unsigned long n;
494 /* Don't allow overflow */
495 if ((unsigned long) addr + count < count)
496 count = -(unsigned long) addr;
498 read_lock(&vmlist_lock);
499 for (tmp = vmlist; tmp; tmp = tmp->next) {
500 vaddr = (char *) tmp->addr;
501 if (addr >= vaddr + tmp->size - PAGE_SIZE)
502 continue;
503 while (addr < vaddr) {
504 if (count == 0)
505 goto finished;
506 buf++;
507 addr++;
508 count--;
510 n = vaddr + tmp->size - PAGE_SIZE - addr;
511 do {
512 if (count == 0)
513 goto finished;
514 *addr = *buf;
515 buf++;
516 addr++;
517 count--;
518 } while (--n > 0);
520 finished:
521 read_unlock(&vmlist_lock);
522 return buf - buf_start;