- Kai Germaschewski: ymfpci cleanups and resource leak fixes
[davej-history.git] / mm / vmalloc.c
blob62ce5f1ffed814878c445f86b011a8ee98566e62
1 /*
2 * linux/mm/vmalloc.c
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 */
9 #include <linux/malloc.h>
10 #include <linux/vmalloc.h>
11 #include <linux/spinlock.h>
12 #include <linux/smp_lock.h>
14 #include <asm/uaccess.h>
15 #include <asm/pgalloc.h>
17 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
18 struct vm_struct * vmlist;
20 static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
22 pte_t * pte;
23 unsigned long end;
25 if (pmd_none(*pmd))
26 return;
27 if (pmd_bad(*pmd)) {
28 pmd_ERROR(*pmd);
29 pmd_clear(pmd);
30 return;
32 pte = pte_offset(pmd, address);
33 address &= ~PMD_MASK;
34 end = address + size;
35 if (end > PMD_SIZE)
36 end = PMD_SIZE;
37 do {
38 pte_t page;
39 page = ptep_get_and_clear(pte);
40 address += PAGE_SIZE;
41 pte++;
42 if (pte_none(page))
43 continue;
44 if (pte_present(page)) {
45 struct page *ptpage = pte_page(page);
46 if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
47 __free_page(ptpage);
48 continue;
50 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
51 } while (address < end);
54 static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
56 pmd_t * pmd;
57 unsigned long end;
59 if (pgd_none(*dir))
60 return;
61 if (pgd_bad(*dir)) {
62 pgd_ERROR(*dir);
63 pgd_clear(dir);
64 return;
66 pmd = pmd_offset(dir, address);
67 address &= ~PGDIR_MASK;
68 end = address + size;
69 if (end > PGDIR_SIZE)
70 end = PGDIR_SIZE;
71 do {
72 free_area_pte(pmd, address, end - address);
73 address = (address + PMD_SIZE) & PMD_MASK;
74 pmd++;
75 } while (address < end);
78 void vmfree_area_pages(unsigned long address, unsigned long size)
80 pgd_t * dir;
81 unsigned long end = address + size;
83 dir = pgd_offset_k(address);
84 flush_cache_all();
85 do {
86 free_area_pmd(dir, address, end - address);
87 address = (address + PGDIR_SIZE) & PGDIR_MASK;
88 dir++;
89 } while (address && (address < end));
90 flush_tlb_all();
93 static inline int alloc_area_pte (pte_t * pte, unsigned long address,
94 unsigned long size, int gfp_mask, pgprot_t prot)
96 unsigned long end;
98 address &= ~PMD_MASK;
99 end = address + size;
100 if (end > PMD_SIZE)
101 end = PMD_SIZE;
102 do {
103 struct page * page;
104 if (!pte_none(*pte))
105 printk(KERN_ERR "alloc_area_pte: page already exists\n");
106 page = alloc_page(gfp_mask);
107 if (!page)
108 return -ENOMEM;
109 set_pte(pte, mk_pte(page, prot));
110 address += PAGE_SIZE;
111 pte++;
112 } while (address < end);
113 return 0;
116 static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot)
118 unsigned long end;
120 address &= ~PGDIR_MASK;
121 end = address + size;
122 if (end > PGDIR_SIZE)
123 end = PGDIR_SIZE;
124 do {
125 pte_t * pte = pte_alloc_kernel(pmd, address);
126 if (!pte)
127 return -ENOMEM;
128 if (alloc_area_pte(pte, address, end - address, gfp_mask, prot))
129 return -ENOMEM;
130 address = (address + PMD_SIZE) & PMD_MASK;
131 pmd++;
132 } while (address < end);
133 return 0;
136 inline int vmalloc_area_pages (unsigned long address, unsigned long size,
137 int gfp_mask, pgprot_t prot)
139 pgd_t * dir;
140 unsigned long end = address + size;
141 int ret;
143 dir = pgd_offset_k(address);
144 flush_cache_all();
145 lock_kernel();
146 do {
147 pmd_t *pmd;
149 pmd = pmd_alloc_kernel(dir, address);
150 ret = -ENOMEM;
151 if (!pmd)
152 break;
154 ret = -ENOMEM;
155 if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot))
156 break;
158 address = (address + PGDIR_SIZE) & PGDIR_MASK;
159 dir++;
161 ret = 0;
162 } while (address && (address < end));
163 unlock_kernel();
164 flush_tlb_all();
165 return ret;
168 struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
170 unsigned long addr;
171 struct vm_struct **p, *tmp, *area;
173 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
174 if (!area)
175 return NULL;
176 size += PAGE_SIZE;
177 addr = VMALLOC_START;
178 write_lock(&vmlist_lock);
179 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
180 if ((size + addr) < addr) {
181 write_unlock(&vmlist_lock);
182 kfree(area);
183 return NULL;
185 if (size + addr < (unsigned long) tmp->addr)
186 break;
187 addr = tmp->size + (unsigned long) tmp->addr;
188 if (addr > VMALLOC_END-size) {
189 write_unlock(&vmlist_lock);
190 kfree(area);
191 return NULL;
194 area->flags = flags;
195 area->addr = (void *)addr;
196 area->size = size;
197 area->next = *p;
198 *p = area;
199 write_unlock(&vmlist_lock);
200 return area;
203 void vfree(void * addr)
205 struct vm_struct **p, *tmp;
207 if (!addr)
208 return;
209 if ((PAGE_SIZE-1) & (unsigned long) addr) {
210 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
211 return;
213 write_lock(&vmlist_lock);
214 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
215 if (tmp->addr == addr) {
216 *p = tmp->next;
217 vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
218 write_unlock(&vmlist_lock);
219 kfree(tmp);
220 return;
223 write_unlock(&vmlist_lock);
224 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
227 void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
229 void * addr;
230 struct vm_struct *area;
232 size = PAGE_ALIGN(size);
233 if (!size || (size >> PAGE_SHIFT) > num_physpages) {
234 BUG();
235 return NULL;
237 area = get_vm_area(size, VM_ALLOC);
238 if (!area)
239 return NULL;
240 addr = area->addr;
241 if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask, prot)) {
242 vfree(addr);
243 return NULL;
245 return addr;
248 long vread(char *buf, char *addr, unsigned long count)
250 struct vm_struct *tmp;
251 char *vaddr, *buf_start = buf;
252 unsigned long n;
254 /* Don't allow overflow */
255 if ((unsigned long) addr + count < count)
256 count = -(unsigned long) addr;
258 read_lock(&vmlist_lock);
259 for (tmp = vmlist; tmp; tmp = tmp->next) {
260 vaddr = (char *) tmp->addr;
261 if (addr >= vaddr + tmp->size - PAGE_SIZE)
262 continue;
263 while (addr < vaddr) {
264 if (count == 0)
265 goto finished;
266 *buf = '\0';
267 buf++;
268 addr++;
269 count--;
271 n = vaddr + tmp->size - PAGE_SIZE - addr;
272 do {
273 if (count == 0)
274 goto finished;
275 *buf = *addr;
276 buf++;
277 addr++;
278 count--;
279 } while (--n > 0);
281 finished:
282 read_unlock(&vmlist_lock);
283 return buf - buf_start;