Import 2.3.46pre3
[davej-history.git] / mm / vmalloc.c
blob1bf39183b238d2c12952e521503f7cef9d099982
1 /*
2 * linux/mm/vmalloc.c
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 */
8 #include <linux/malloc.h>
9 #include <linux/vmalloc.h>
11 #include <asm/uaccess.h>
12 #include <asm/pgalloc.h>
14 struct vm_struct * vmlist = NULL;
16 static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
18 pte_t * pte;
19 unsigned long end;
21 if (pmd_none(*pmd))
22 return;
23 if (pmd_bad(*pmd)) {
24 pmd_ERROR(*pmd);
25 pmd_clear(pmd);
26 return;
28 pte = pte_offset(pmd, address);
29 address &= ~PMD_MASK;
30 end = address + size;
31 if (end > PMD_SIZE)
32 end = PMD_SIZE;
33 do {
34 pte_t page = *pte;
35 pte_clear(pte);
36 address += PAGE_SIZE;
37 pte++;
38 if (pte_none(page))
39 continue;
40 if (pte_present(page)) {
41 unsigned long map_nr = pte_pagenr(page);
42 if (map_nr < max_mapnr)
43 __free_page(mem_map + map_nr);
44 continue;
46 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
47 } while (address < end);
50 static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
52 pmd_t * pmd;
53 unsigned long end;
55 if (pgd_none(*dir))
56 return;
57 if (pgd_bad(*dir)) {
58 pgd_ERROR(*dir);
59 pgd_clear(dir);
60 return;
62 pmd = pmd_offset(dir, address);
63 address &= ~PGDIR_MASK;
64 end = address + size;
65 if (end > PGDIR_SIZE)
66 end = PGDIR_SIZE;
67 do {
68 free_area_pte(pmd, address, end - address);
69 address = (address + PMD_SIZE) & PMD_MASK;
70 pmd++;
71 } while (address < end);
74 void vmfree_area_pages(unsigned long address, unsigned long size)
76 pgd_t * dir;
77 unsigned long end = address + size;
79 dir = pgd_offset_k(address);
80 flush_cache_all();
81 do {
82 free_area_pmd(dir, address, end - address);
83 address = (address + PGDIR_SIZE) & PGDIR_MASK;
84 dir++;
85 } while (address && (address < end));
86 flush_tlb_all();
89 static inline int alloc_area_pte(pte_t * pte, unsigned long address, unsigned long size)
91 unsigned long end;
93 address &= ~PMD_MASK;
94 end = address + size;
95 if (end > PMD_SIZE)
96 end = PMD_SIZE;
97 do {
98 struct page * page;
99 if (!pte_none(*pte))
100 printk(KERN_ERR "alloc_area_pte: page already exists\n");
101 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
102 if (!page)
103 return -ENOMEM;
104 set_pte(pte, mk_pte(page, PAGE_KERNEL));
105 address += PAGE_SIZE;
106 pte++;
107 } while (address < end);
108 return 0;
111 static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size)
113 unsigned long end;
115 address &= ~PGDIR_MASK;
116 end = address + size;
117 if (end > PGDIR_SIZE)
118 end = PGDIR_SIZE;
119 do {
120 pte_t * pte = pte_alloc_kernel(pmd, address);
121 if (!pte)
122 return -ENOMEM;
123 if (alloc_area_pte(pte, address, end - address))
124 return -ENOMEM;
125 address = (address + PMD_SIZE) & PMD_MASK;
126 pmd++;
127 } while (address < end);
128 return 0;
131 int vmalloc_area_pages(unsigned long address, unsigned long size)
133 pgd_t * dir;
134 unsigned long end = address + size;
136 dir = pgd_offset_k(address);
137 flush_cache_all();
138 do {
139 pmd_t *pmd;
140 pgd_t olddir = *dir;
142 pmd = pmd_alloc_kernel(dir, address);
143 if (!pmd)
144 return -ENOMEM;
145 if (alloc_area_pmd(pmd, address, end - address))
146 return -ENOMEM;
147 if (pgd_val(olddir) != pgd_val(*dir))
148 set_pgdir(address, *dir);
149 address = (address + PGDIR_SIZE) & PGDIR_MASK;
150 dir++;
151 } while (address && (address < end));
152 flush_tlb_all();
153 return 0;
156 struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
158 unsigned long addr;
159 struct vm_struct **p, *tmp, *area;
161 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
162 if (!area)
163 return NULL;
164 addr = VMALLOC_START;
165 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
166 if (size + addr < (unsigned long) tmp->addr)
167 break;
168 addr = tmp->size + (unsigned long) tmp->addr;
169 if (addr > VMALLOC_END-size) {
170 kfree(area);
171 return NULL;
174 area->flags = flags;
175 area->addr = (void *)addr;
176 area->size = size + PAGE_SIZE;
177 area->next = *p;
178 *p = area;
179 return area;
182 void vfree(void * addr)
184 struct vm_struct **p, *tmp;
186 if (!addr)
187 return;
188 if ((PAGE_SIZE-1) & (unsigned long) addr) {
189 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
190 return;
192 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
193 if (tmp->addr == addr) {
194 *p = tmp->next;
195 vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
196 kfree(tmp);
197 return;
200 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
203 void * vmalloc(unsigned long size)
205 void * addr;
206 struct vm_struct *area;
208 size = PAGE_ALIGN(size);
209 if (!size || (size >> PAGE_SHIFT) > max_mapnr) {
210 BUG();
211 return NULL;
213 area = get_vm_area(size, VM_ALLOC);
214 if (!area) {
215 BUG();
216 return NULL;
218 addr = area->addr;
219 if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size)) {
220 vfree(addr);
221 BUG();
222 return NULL;
224 return addr;
227 long vread(char *buf, char *addr, unsigned long count)
229 struct vm_struct *tmp;
230 char *vaddr, *buf_start = buf;
231 unsigned long n;
233 /* Don't allow overflow */
234 if ((unsigned long) addr + count < count)
235 count = -(unsigned long) addr;
237 for (tmp = vmlist; tmp; tmp = tmp->next) {
238 vaddr = (char *) tmp->addr;
239 if (addr >= vaddr + tmp->size - PAGE_SIZE)
240 continue;
241 while (addr < vaddr) {
242 if (count == 0)
243 goto finished;
244 put_user('\0', buf);
245 buf++;
246 addr++;
247 count--;
249 n = vaddr + tmp->size - PAGE_SIZE - addr;
250 do {
251 if (count == 0)
252 goto finished;
253 put_user(*addr, buf);
254 buf++;
255 addr++;
256 count--;
257 } while (--n > 0);
259 finished:
260 return buf - buf_start;