4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
9 #include <linux/malloc.h>
10 #include <linux/vmalloc.h>
11 #include <linux/spinlock.h>
13 #include <asm/uaccess.h>
14 #include <asm/pgalloc.h>
16 rwlock_t vmlist_lock
= RW_LOCK_UNLOCKED
;
17 struct vm_struct
* vmlist
= NULL
;
19 static inline void free_area_pte(pmd_t
* pmd
, unsigned long address
, unsigned long size
)
31 pte
= pte_offset(pmd
, address
);
43 if (pte_present(page
)) {
44 struct page
*ptpage
= pte_page(page
);
45 if (VALID_PAGE(ptpage
) && (!PageReserved(ptpage
)))
49 printk(KERN_CRIT
"Whee.. Swapped out page in kernel page table\n");
50 } while (address
< end
);
53 static inline void free_area_pmd(pgd_t
* dir
, unsigned long address
, unsigned long size
)
65 pmd
= pmd_offset(dir
, address
);
66 address
&= ~PGDIR_MASK
;
71 free_area_pte(pmd
, address
, end
- address
);
72 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
74 } while (address
< end
);
77 void vmfree_area_pages(unsigned long address
, unsigned long size
)
80 unsigned long end
= address
+ size
;
82 dir
= pgd_offset_k(address
);
85 free_area_pmd(dir
, address
, end
- address
);
86 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
88 } while (address
&& (address
< end
));
92 static inline int alloc_area_pte (pte_t
* pte
, unsigned long address
,
93 unsigned long size
, int gfp_mask
, pgprot_t prot
)
104 printk(KERN_ERR
"alloc_area_pte: page already exists\n");
105 page
= alloc_page(gfp_mask
);
108 set_pte(pte
, mk_pte(page
, prot
));
109 address
+= PAGE_SIZE
;
111 } while (address
< end
);
115 static inline int alloc_area_pmd(pmd_t
* pmd
, unsigned long address
, unsigned long size
, int gfp_mask
, pgprot_t prot
)
119 address
&= ~PGDIR_MASK
;
120 end
= address
+ size
;
121 if (end
> PGDIR_SIZE
)
124 pte_t
* pte
= pte_alloc_kernel(pmd
, address
);
127 if (alloc_area_pte(pte
, address
, end
- address
, gfp_mask
, prot
))
129 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
131 } while (address
< end
);
135 inline int vmalloc_area_pages (unsigned long address
, unsigned long size
,
136 int gfp_mask
, pgprot_t prot
)
139 unsigned long end
= address
+ size
;
141 dir
= pgd_offset_k(address
);
147 pmd
= pmd_alloc_kernel(dir
, address
);
150 if (alloc_area_pmd(pmd
, address
, end
- address
, gfp_mask
, prot
))
152 if (pgd_val(olddir
) != pgd_val(*dir
))
153 set_pgdir(address
, *dir
);
154 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
156 } while (address
&& (address
< end
));
161 struct vm_struct
* get_vm_area(unsigned long size
, unsigned long flags
)
164 struct vm_struct
**p
, *tmp
, *area
;
166 area
= (struct vm_struct
*) kmalloc(sizeof(*area
), GFP_KERNEL
);
169 addr
= VMALLOC_START
;
170 write_lock(&vmlist_lock
);
171 for (p
= &vmlist
; (tmp
= *p
) ; p
= &tmp
->next
) {
172 if (size
+ addr
< (unsigned long) tmp
->addr
)
174 addr
= tmp
->size
+ (unsigned long) tmp
->addr
;
175 if (addr
> VMALLOC_END
-size
) {
176 write_unlock(&vmlist_lock
);
182 area
->addr
= (void *)addr
;
183 area
->size
= size
+ PAGE_SIZE
;
186 write_unlock(&vmlist_lock
);
190 void vfree(void * addr
)
192 struct vm_struct
**p
, *tmp
;
196 if ((PAGE_SIZE
-1) & (unsigned long) addr
) {
197 printk(KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
200 write_lock(&vmlist_lock
);
201 for (p
= &vmlist
; (tmp
= *p
) ; p
= &tmp
->next
) {
202 if (tmp
->addr
== addr
) {
204 vmfree_area_pages(VMALLOC_VMADDR(tmp
->addr
), tmp
->size
);
206 write_unlock(&vmlist_lock
);
210 write_unlock(&vmlist_lock
);
211 printk(KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n", addr
);
214 void * __vmalloc (unsigned long size
, int gfp_mask
, pgprot_t prot
)
217 struct vm_struct
*area
;
219 size
= PAGE_ALIGN(size
);
220 if (!size
|| (size
>> PAGE_SHIFT
) > num_physpages
) {
224 area
= get_vm_area(size
, VM_ALLOC
);
230 if (vmalloc_area_pages(VMALLOC_VMADDR(addr
), size
, gfp_mask
, prot
)) {
238 long vread(char *buf
, char *addr
, unsigned long count
)
240 struct vm_struct
*tmp
;
241 char *vaddr
, *buf_start
= buf
;
244 /* Don't allow overflow */
245 if ((unsigned long) addr
+ count
< count
)
246 count
= -(unsigned long) addr
;
248 read_lock(&vmlist_lock
);
249 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
250 vaddr
= (char *) tmp
->addr
;
251 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
253 while (addr
< vaddr
) {
261 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
272 read_unlock(&vmlist_lock
);
273 return buf
- buf_start
;