- pre4:
[davej-history.git] / mm / vmalloc.c
blob817a3966b4ae153fdf74e41cba19ee4274eb0069
1 /*
2 * linux/mm/vmalloc.c
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 */
9 #include <linux/malloc.h>
10 #include <linux/vmalloc.h>
11 #include <linux/spinlock.h>
13 #include <asm/uaccess.h>
14 #include <asm/pgalloc.h>
16 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
17 struct vm_struct * vmlist = NULL;
19 static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
21 pte_t * pte;
22 unsigned long end;
24 if (pmd_none(*pmd))
25 return;
26 if (pmd_bad(*pmd)) {
27 pmd_ERROR(*pmd);
28 pmd_clear(pmd);
29 return;
31 pte = pte_offset(pmd, address);
32 address &= ~PMD_MASK;
33 end = address + size;
34 if (end > PMD_SIZE)
35 end = PMD_SIZE;
36 do {
37 pte_t page = *pte;
38 pte_clear(pte);
39 address += PAGE_SIZE;
40 pte++;
41 if (pte_none(page))
42 continue;
43 if (pte_present(page)) {
44 struct page *ptpage = pte_page(page);
45 if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
46 __free_page(ptpage);
47 continue;
49 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
50 } while (address < end);
53 static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
55 pmd_t * pmd;
56 unsigned long end;
58 if (pgd_none(*dir))
59 return;
60 if (pgd_bad(*dir)) {
61 pgd_ERROR(*dir);
62 pgd_clear(dir);
63 return;
65 pmd = pmd_offset(dir, address);
66 address &= ~PGDIR_MASK;
67 end = address + size;
68 if (end > PGDIR_SIZE)
69 end = PGDIR_SIZE;
70 do {
71 free_area_pte(pmd, address, end - address);
72 address = (address + PMD_SIZE) & PMD_MASK;
73 pmd++;
74 } while (address < end);
77 void vmfree_area_pages(unsigned long address, unsigned long size)
79 pgd_t * dir;
80 unsigned long end = address + size;
82 dir = pgd_offset_k(address);
83 flush_cache_all();
84 do {
85 free_area_pmd(dir, address, end - address);
86 address = (address + PGDIR_SIZE) & PGDIR_MASK;
87 dir++;
88 } while (address && (address < end));
89 flush_tlb_all();
92 static inline int alloc_area_pte (pte_t * pte, unsigned long address,
93 unsigned long size, int gfp_mask, pgprot_t prot)
95 unsigned long end;
97 address &= ~PMD_MASK;
98 end = address + size;
99 if (end > PMD_SIZE)
100 end = PMD_SIZE;
101 do {
102 struct page * page;
103 if (!pte_none(*pte))
104 printk(KERN_ERR "alloc_area_pte: page already exists\n");
105 page = alloc_page(gfp_mask);
106 if (!page)
107 return -ENOMEM;
108 set_pte(pte, mk_pte(page, prot));
109 address += PAGE_SIZE;
110 pte++;
111 } while (address < end);
112 return 0;
115 static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot)
117 unsigned long end;
119 address &= ~PGDIR_MASK;
120 end = address + size;
121 if (end > PGDIR_SIZE)
122 end = PGDIR_SIZE;
123 do {
124 pte_t * pte = pte_alloc_kernel(pmd, address);
125 if (!pte)
126 return -ENOMEM;
127 if (alloc_area_pte(pte, address, end - address, gfp_mask, prot))
128 return -ENOMEM;
129 address = (address + PMD_SIZE) & PMD_MASK;
130 pmd++;
131 } while (address < end);
132 return 0;
135 inline int vmalloc_area_pages (unsigned long address, unsigned long size,
136 int gfp_mask, pgprot_t prot)
138 pgd_t * dir;
139 unsigned long end = address + size;
141 dir = pgd_offset_k(address);
142 flush_cache_all();
143 do {
144 pmd_t *pmd;
145 pgd_t olddir = *dir;
147 pmd = pmd_alloc_kernel(dir, address);
148 if (!pmd)
149 return -ENOMEM;
150 if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot))
151 return -ENOMEM;
152 if (pgd_val(olddir) != pgd_val(*dir))
153 set_pgdir(address, *dir);
154 address = (address + PGDIR_SIZE) & PGDIR_MASK;
155 dir++;
156 } while (address && (address < end));
157 flush_tlb_all();
158 return 0;
161 struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
163 unsigned long addr;
164 struct vm_struct **p, *tmp, *area;
166 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
167 if (!area)
168 return NULL;
169 addr = VMALLOC_START;
170 write_lock(&vmlist_lock);
171 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
172 if (size + addr < (unsigned long) tmp->addr)
173 break;
174 addr = tmp->size + (unsigned long) tmp->addr;
175 if (addr > VMALLOC_END-size) {
176 write_unlock(&vmlist_lock);
177 kfree(area);
178 return NULL;
181 area->flags = flags;
182 area->addr = (void *)addr;
183 area->size = size + PAGE_SIZE;
184 area->next = *p;
185 *p = area;
186 write_unlock(&vmlist_lock);
187 return area;
190 void vfree(void * addr)
192 struct vm_struct **p, *tmp;
194 if (!addr)
195 return;
196 if ((PAGE_SIZE-1) & (unsigned long) addr) {
197 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
198 return;
200 write_lock(&vmlist_lock);
201 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
202 if (tmp->addr == addr) {
203 *p = tmp->next;
204 vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
205 kfree(tmp);
206 write_unlock(&vmlist_lock);
207 return;
210 write_unlock(&vmlist_lock);
211 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
214 void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
216 void * addr;
217 struct vm_struct *area;
219 size = PAGE_ALIGN(size);
220 if (!size || (size >> PAGE_SHIFT) > num_physpages) {
221 BUG();
222 return NULL;
224 area = get_vm_area(size, VM_ALLOC);
225 if (!area) {
226 BUG();
227 return NULL;
229 addr = area->addr;
230 if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask, prot)) {
231 vfree(addr);
232 BUG();
233 return NULL;
235 return addr;
238 long vread(char *buf, char *addr, unsigned long count)
240 struct vm_struct *tmp;
241 char *vaddr, *buf_start = buf;
242 unsigned long n;
244 /* Don't allow overflow */
245 if ((unsigned long) addr + count < count)
246 count = -(unsigned long) addr;
248 read_lock(&vmlist_lock);
249 for (tmp = vmlist; tmp; tmp = tmp->next) {
250 vaddr = (char *) tmp->addr;
251 if (addr >= vaddr + tmp->size - PAGE_SIZE)
252 continue;
253 while (addr < vaddr) {
254 if (count == 0)
255 goto finished;
256 *buf = '\0';
257 buf++;
258 addr++;
259 count--;
261 n = vaddr + tmp->size - PAGE_SIZE - addr;
262 do {
263 if (count == 0)
264 goto finished;
265 *buf = *addr;
266 buf++;
267 addr++;
268 count--;
269 } while (--n > 0);
271 finished:
272 read_unlock(&vmlist_lock);
273 return buf - buf_start;