4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
9 #include <linux/malloc.h>
10 #include <linux/vmalloc.h>
11 #include <linux/spinlock.h>
13 #include <asm/uaccess.h>
14 #include <asm/pgalloc.h>
16 rwlock_t vmlist_lock
= RW_LOCK_UNLOCKED
;
17 struct vm_struct
* vmlist
= NULL
;
19 static inline void free_area_pte(pmd_t
* pmd
, unsigned long address
, unsigned long size
)
31 pte
= pte_offset(pmd
, address
);
43 if (pte_present(page
)) {
44 unsigned long map_nr
= pte_pagenr(page
);
45 if ((map_nr
< max_mapnr
) &&
46 (!PageReserved(mem_map
+ map_nr
)))
47 __free_page(mem_map
+ map_nr
);
50 printk(KERN_CRIT
"Whee.. Swapped out page in kernel page table\n");
51 } while (address
< end
);
54 static inline void free_area_pmd(pgd_t
* dir
, unsigned long address
, unsigned long size
)
66 pmd
= pmd_offset(dir
, address
);
67 address
&= ~PGDIR_MASK
;
72 free_area_pte(pmd
, address
, end
- address
);
73 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
75 } while (address
< end
);
78 void vmfree_area_pages(unsigned long address
, unsigned long size
)
81 unsigned long end
= address
+ size
;
83 dir
= pgd_offset_k(address
);
86 free_area_pmd(dir
, address
, end
- address
);
87 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
89 } while (address
&& (address
< end
));
93 static inline int alloc_area_pte (pte_t
* pte
, unsigned long address
,
94 unsigned long size
, int gfp_mask
)
105 printk(KERN_ERR
"alloc_area_pte: page already exists\n");
106 page
= alloc_page(gfp_mask
);
109 set_pte(pte
, mk_pte(page
, PAGE_KERNEL
));
110 address
+= PAGE_SIZE
;
112 } while (address
< end
);
116 static inline int alloc_area_pmd(pmd_t
* pmd
, unsigned long address
, unsigned long size
, int gfp_mask
)
120 address
&= ~PGDIR_MASK
;
121 end
= address
+ size
;
122 if (end
> PGDIR_SIZE
)
125 pte_t
* pte
= pte_alloc_kernel(pmd
, address
);
128 if (alloc_area_pte(pte
, address
, end
- address
, gfp_mask
))
130 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
132 } while (address
< end
);
136 inline int vmalloc_area_pages (unsigned long address
,
137 unsigned long size
, int gfp_mask
)
140 unsigned long end
= address
+ size
;
142 dir
= pgd_offset_k(address
);
148 pmd
= pmd_alloc_kernel(dir
, address
);
151 if (alloc_area_pmd(pmd
, address
, end
- address
, gfp_mask
))
153 if (pgd_val(olddir
) != pgd_val(*dir
))
154 set_pgdir(address
, *dir
);
155 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
157 } while (address
&& (address
< end
));
162 struct vm_struct
* get_vm_area(unsigned long size
, unsigned long flags
)
165 struct vm_struct
**p
, *tmp
, *area
;
167 area
= (struct vm_struct
*) kmalloc(sizeof(*area
), GFP_KERNEL
);
170 addr
= VMALLOC_START
;
171 write_lock(&vmlist_lock
);
172 for (p
= &vmlist
; (tmp
= *p
) ; p
= &tmp
->next
) {
173 if (size
+ addr
< (unsigned long) tmp
->addr
)
175 addr
= tmp
->size
+ (unsigned long) tmp
->addr
;
176 if (addr
> VMALLOC_END
-size
) {
177 write_unlock(&vmlist_lock
);
183 area
->addr
= (void *)addr
;
184 area
->size
= size
+ PAGE_SIZE
;
187 write_unlock(&vmlist_lock
);
191 void vfree(void * addr
)
193 struct vm_struct
**p
, *tmp
;
197 if ((PAGE_SIZE
-1) & (unsigned long) addr
) {
198 printk(KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
201 write_lock(&vmlist_lock
);
202 for (p
= &vmlist
; (tmp
= *p
) ; p
= &tmp
->next
) {
203 if (tmp
->addr
== addr
) {
205 vmfree_area_pages(VMALLOC_VMADDR(tmp
->addr
), tmp
->size
);
207 write_unlock(&vmlist_lock
);
211 write_unlock(&vmlist_lock
);
212 printk(KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n", addr
);
215 void * __vmalloc (unsigned long size
, int gfp_mask
)
218 struct vm_struct
*area
;
220 size
= PAGE_ALIGN(size
);
221 if (!size
|| (size
>> PAGE_SHIFT
) > num_physpages
) {
225 area
= get_vm_area(size
, VM_ALLOC
);
231 if (vmalloc_area_pages(VMALLOC_VMADDR(addr
), size
, gfp_mask
)) {
239 long vread(char *buf
, char *addr
, unsigned long count
)
241 struct vm_struct
*tmp
;
242 char *vaddr
, *buf_start
= buf
;
245 /* Don't allow overflow */
246 if ((unsigned long) addr
+ count
< count
)
247 count
= -(unsigned long) addr
;
249 read_lock(&vmlist_lock
);
250 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
251 vaddr
= (char *) tmp
->addr
;
252 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
254 while (addr
< vaddr
) {
262 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
273 read_unlock(&vmlist_lock
);
274 return buf
- buf_start
;