4 #include <linux/sched.h>
5 #include <linux/errno.h>
6 #include <linux/kernel.h>
10 #include <linux/string.h>
12 extern unsigned long max_mapnr
;
13 extern unsigned long num_physpages
;
14 extern void * high_memory
;
17 #include <asm/atomic.h>
20 * Linux kernel virtual memory manager primitives.
21 * The idea being to have a "virtual" mm in the same way
22 * we have a virtual fs - giving a cleaner interface to the
23 * mm details, and allowing different kinds of memory mappings
24 * (from shared memory to executable loading to arbitrary
29 * This struct defines a memory VMM memory area. There is one of these
30 * per VM-area/task. A VM area is any part of the process virtual memory
31 * space that has a special rule for the page-fault handlers (ie a shared
32 * library, the executable area etc).
34 struct vm_area_struct
{
35 struct mm_struct
* vm_mm
; /* VM area parameters */
36 unsigned long vm_start
;
38 pgprot_t vm_page_prot
;
39 unsigned short vm_flags
;
40 struct vm_area_struct
*vm_next
;
41 struct vm_area_struct
**vm_pprev
;
43 /* For areas with inode, the list inode->i_mmap, for shm areas,
44 * the list of attaches, otherwise unused.
46 struct vm_area_struct
*vm_next_share
;
47 struct vm_area_struct
**vm_pprev_share
;
49 struct vm_operations_struct
* vm_ops
;
50 unsigned long vm_offset
;
51 struct file
* vm_file
;
52 unsigned long vm_pte
; /* shared mem */
58 #define VM_READ 0x0001 /* currently active flags */
59 #define VM_WRITE 0x0002
60 #define VM_EXEC 0x0004
61 #define VM_SHARED 0x0008
63 #define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
64 #define VM_MAYWRITE 0x0020
65 #define VM_MAYEXEC 0x0040
66 #define VM_MAYSHARE 0x0080
68 #define VM_GROWSDOWN 0x0100 /* general info on the segment */
69 #define VM_GROWSUP 0x0200
70 #define VM_SHM 0x0400 /* shared memory area, don't swap out */
71 #define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
73 #define VM_EXECUTABLE 0x1000
74 #define VM_LOCKED 0x2000
75 #define VM_IO 0x4000 /* Memory mapped I/O or similar */
77 #define VM_STACK_FLAGS 0x0177
80 * mapping from the currently active vm_flags protection bits (the
81 * low four bits) to a page protection mask..
83 extern pgprot_t protection_map
[16];
87 * These are the virtual MM functions - opening of an area, closing and
88 * unmapping it (needed to keep files on disk up-to-date etc), pointer
89 * to the functions called when a no-page or a wp-page exception occurs.
91 struct vm_operations_struct
{
92 void (*open
)(struct vm_area_struct
* area
);
93 void (*close
)(struct vm_area_struct
* area
);
94 void (*unmap
)(struct vm_area_struct
*area
, unsigned long, size_t);
95 void (*protect
)(struct vm_area_struct
*area
, unsigned long, size_t, unsigned int newprot
);
96 int (*sync
)(struct vm_area_struct
*area
, unsigned long, size_t, unsigned int flags
);
97 void (*advise
)(struct vm_area_struct
*area
, unsigned long, size_t, unsigned int advise
);
98 unsigned long (*nopage
)(struct vm_area_struct
* area
, unsigned long address
, int write_access
);
99 unsigned long (*wppage
)(struct vm_area_struct
* area
, unsigned long address
,
101 int (*swapout
)(struct vm_area_struct
*, unsigned long, pte_t
*);
102 pte_t (*swapin
)(struct vm_area_struct
*, unsigned long, unsigned long);
106 * Try to keep the most commonly accessed fields in single cache lines
107 * here (16 bytes or greater). This ordering should be particularly
108 * beneficial on 32-bit processors.
110 * The first line is data used in page cache lookup, the second line
111 * is used for linear searches (eg. clock algorithm scans).
113 typedef struct page
{
114 /* these must be first (free area handling) */
118 unsigned long offset
;
119 struct page
*next_hash
;
122 unsigned long flags
; /* atomic flags, some possibly updated asynchronously */
123 struct wait_queue
*wait
;
124 struct page
**pprev_hash
;
125 struct buffer_head
* buffers
;
126 unsigned long map_nr
; /* page->map_nr == page - mem_map */
129 /* Page flag bit values */
132 #define PG_referenced 2
133 #define PG_uptodate 3
134 #define PG_free_after 4
135 #define PG_decr_after 5
136 #define PG_swap_unlock_after 6
139 #define PG_swap_cache 9
141 #define PG_reserved 31
143 /* Make it prettier to test the above... */
144 #define PageLocked(page) (test_bit(PG_locked, &(page)->flags))
145 #define PageError(page) (test_bit(PG_error, &(page)->flags))
146 #define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
147 #define PageUptodate(page) (test_bit(PG_uptodate, &(page)->flags))
148 #define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags))
149 #define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags))
150 #define PageSwapUnlockAfter(page) (test_bit(PG_swap_unlock_after, &(page)->flags))
151 #define PageDMA(page) (test_bit(PG_DMA, &(page)->flags))
152 #define PageSlab(page) (test_bit(PG_Slab, &(page)->flags))
153 #define PageSwapCache(page) (test_bit(PG_swap_cache, &(page)->flags))
154 #define PageReserved(page) (test_bit(PG_reserved, &(page)->flags))
156 #define PageSetSlab(page) (set_bit(PG_Slab, &(page)->flags))
157 #define PageSetSwapCache(page) (set_bit(PG_swap_cache, &(page)->flags))
158 #define PageTestandSetSwapCache(page) \
159 (test_and_set_bit(PG_swap_cache, &(page)->flags))
161 #define PageClearSlab(page) (clear_bit(PG_Slab, &(page)->flags))
162 #define PageClearSwapCache(page)(clear_bit(PG_swap_cache, &(page)->flags))
164 #define PageTestandClearSwapCache(page) \
165 (test_and_clear_bit(PG_swap_cache, &(page)->flags))
168 * page->reserved denotes a page which must never be accessed (which
169 * may not even be present).
171 * page->dma is set for those pages which lie in the range of
172 * physical addresses capable of carrying DMA transfers.
174 * Multiple processes may "see" the same page. E.g. for untouched
175 * mappings of /dev/null, all processes see the same page full of
176 * zeroes, and text pages of executables and shared libraries have
177 * only one copy in memory, at most, normally.
179 * For the non-reserved pages, page->count denotes a reference count.
180 * page->count == 0 means the page is free.
181 * page->count == 1 means the page is used for exactly one purpose
182 * (e.g. a private data page of one process).
184 * A page may be used for kmalloc() or anyone else who does a
185 * get_free_page(). In this case the page->count is at least 1, and
186 * all other fields are unused but should be 0 or NULL. The
187 * management of this page is the responsibility of the one who uses
190 * The other pages (we may call them "process pages") are completely
191 * managed by the Linux memory manager: I/O, buffers, swapping etc.
192 * The following discussion applies only to them.
194 * A page may belong to an inode's memory mapping. In this case,
195 * page->inode is the inode, and page->offset is the file offset
196 * of the page (not necessarily a multiple of PAGE_SIZE).
198 * A page may have buffers allocated to it. In this case,
199 * page->buffers is a circular list of these buffer heads. Else,
200 * page->buffers == NULL.
202 * For pages belonging to inodes, the page->count is the number of
203 * attaches, plus 1 if buffers are allocated to the page.
205 * All pages belonging to an inode make up a doubly linked list
206 * inode->i_pages, using the fields page->next and page->prev. (These
207 * fields are also used for freelist management when page->count==0.)
208 * There is also a hash table mapping (inode,offset) to the page
209 * in memory if present. The lists for this hash table use the fields
210 * page->next_hash and page->prev_hash.
212 * All process pages can do I/O:
213 * - inode pages may need to be read from disk,
214 * - inode pages which have been modified and are MAP_SHARED may need
215 * to be written to disk,
216 * - private pages which have been modified may need to be swapped out
217 * to swap space and (later) to be read back into memory.
218 * During disk I/O, page->locked is true. This bit is set before I/O
219 * and reset when I/O completes. page->wait is a wait queue of all
220 * tasks waiting for the I/O on this page to complete.
221 * page->uptodate tells whether the page's contents is valid.
222 * When a read completes, the page becomes uptodate, unless a disk I/O
224 * When a write completes, and page->free_after is true, the page is
225 * freed without any further delay.
227 * For choosing which pages to swap out, inode pages carry a
228 * page->referenced bit, which is set any time the system accesses
229 * that page through the (inode,offset) hash table.
230 * There is also the page->age counter, which implements a linear
231 * decay (why not an exponential decay?), see swapctl.h.
234 extern mem_map_t
* mem_map
;
237 * This is timing-critical - most of the time in getting a new page
238 * goes to clearing the page. If you want a page without the clearing
239 * overhead, just use __get_free_page() directly..
241 #define __get_free_page(gfp_mask) __get_free_pages((gfp_mask),0)
242 #define __get_dma_pages(gfp_mask, order) __get_free_pages((gfp_mask) | GFP_DMA,(order))
243 extern unsigned long FASTCALL(__get_free_pages(int gfp_mask
, unsigned long gfp_order
));
245 extern inline unsigned long get_free_page(int gfp_mask
)
249 page
= __get_free_page(gfp_mask
);
255 /* memory.c & swap.c*/
257 #define free_page(addr) free_pages((addr),0)
258 extern void FASTCALL(free_pages(unsigned long addr
, unsigned long order
));
259 extern void FASTCALL(__free_page(struct page
*));
261 extern void show_free_areas(void);
262 extern unsigned long put_dirty_page(struct task_struct
* tsk
,unsigned long page
,
263 unsigned long address
);
265 extern void free_page_tables(struct mm_struct
* mm
);
266 extern void clear_page_tables(struct task_struct
* tsk
);
267 extern int new_page_tables(struct task_struct
* tsk
);
269 extern void zap_page_range(struct mm_struct
*mm
, unsigned long address
, unsigned long size
);
270 extern int copy_page_range(struct mm_struct
*dst
, struct mm_struct
*src
, struct vm_area_struct
*vma
);
271 extern int remap_page_range(unsigned long from
, unsigned long to
, unsigned long size
, pgprot_t prot
);
272 extern int zeromap_page_range(unsigned long from
, unsigned long size
, pgprot_t prot
);
274 extern void vmtruncate(struct inode
* inode
, unsigned long offset
);
275 extern int handle_mm_fault(struct task_struct
*tsk
,struct vm_area_struct
*vma
, unsigned long address
, int write_access
);
276 extern void make_pages_present(unsigned long addr
, unsigned long end
);
278 extern int pgt_cache_water
[2];
279 extern int check_pgt_cache(void);
281 extern unsigned long paging_init(unsigned long start_mem
, unsigned long end_mem
);
282 extern void mem_init(unsigned long start_mem
, unsigned long end_mem
);
283 extern void show_mem(void);
284 extern void oom(struct task_struct
* tsk
);
285 extern void si_meminfo(struct sysinfo
* val
);
288 extern void vma_init(void);
289 extern void merge_segments(struct mm_struct
*, unsigned long, unsigned long);
290 extern void insert_vm_struct(struct mm_struct
*, struct vm_area_struct
*);
291 extern void exit_mmap(struct mm_struct
*);
292 extern unsigned long get_unmapped_area(unsigned long, unsigned long);
294 extern unsigned long do_mmap(struct file
*, unsigned long, unsigned long,
295 unsigned long, unsigned long, unsigned long);
296 extern int do_munmap(unsigned long, size_t);
299 extern void remove_inode_page(struct page
*);
300 extern unsigned long page_unuse(struct page
*);
301 extern int shrink_mmap(int, int);
302 extern void truncate_inode_pages(struct inode
*, unsigned long);
303 extern unsigned long get_cached_page(struct inode
*, unsigned long, int);
304 extern void put_cached_page(unsigned long);
309 #define __GFP_WAIT 0x01
310 #define __GFP_LOW 0x02
311 #define __GFP_MED 0x04
312 #define __GFP_HIGH 0x08
314 #define __GFP_DMA 0x80
316 #define GFP_BUFFER (__GFP_LOW | __GFP_WAIT)
317 #define GFP_ATOMIC (__GFP_HIGH)
318 #define GFP_USER (__GFP_LOW | __GFP_WAIT)
319 #define GFP_KERNEL (__GFP_MED | __GFP_WAIT)
320 #define GFP_NFS (__GFP_HIGH | __GFP_WAIT)
322 /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
323 platforms, used as appropriate on others */
325 #define GFP_DMA __GFP_DMA
328 * Decide if we should try to do some swapout..
330 extern int free_memory_available(void);
331 extern struct task_struct
* kswapd_task
;
333 /* vma is the first one with address < vma->vm_end,
334 * and even address < vma->vm_start. Have to extend vma. */
335 static inline int expand_stack(struct vm_area_struct
* vma
, unsigned long address
)
339 address
&= PAGE_MASK
;
340 grow
= vma
->vm_start
- address
;
341 if (vma
->vm_end
- address
342 > (unsigned long) current
->rlim
[RLIMIT_STACK
].rlim_cur
||
343 (vma
->vm_mm
->total_vm
<< PAGE_SHIFT
) + grow
344 > (unsigned long) current
->rlim
[RLIMIT_AS
].rlim_cur
)
346 vma
->vm_start
= address
;
347 vma
->vm_offset
-= grow
;
348 vma
->vm_mm
->total_vm
+= grow
>> PAGE_SHIFT
;
349 if (vma
->vm_flags
& VM_LOCKED
)
350 vma
->vm_mm
->locked_vm
+= grow
>> PAGE_SHIFT
;
354 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
355 static inline struct vm_area_struct
* find_vma(struct mm_struct
* mm
, unsigned long addr
)
357 struct vm_area_struct
*vma
= NULL
;
360 /* Check the cache first. */
361 vma
= mm
->mmap_cache
;
362 if(!vma
|| (vma
->vm_end
<= addr
) || (vma
->vm_start
> addr
)) {
364 while(vma
&& vma
->vm_end
<= addr
)
366 mm
->mmap_cache
= vma
;
372 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
373 NULL if none. Assume start_addr < end_addr. */
374 static inline struct vm_area_struct
* find_vma_intersection(struct mm_struct
* mm
, unsigned long start_addr
, unsigned long end_addr
)
376 struct vm_area_struct
* vma
= find_vma(mm
,start_addr
);
378 if (vma
&& end_addr
<= vma
->vm_start
)
383 #define buffer_under_min() ((buffermem >> PAGE_SHIFT) * 100 < \
384 buffer_mem.min_percent * num_physpages)
385 #define buffer_under_borrow() ((buffermem >> PAGE_SHIFT) * 100 < \
386 buffer_mem.borrow_percent * num_physpages)
387 #define buffer_under_max() ((buffermem >> PAGE_SHIFT) * 100 < \
388 buffer_mem.max_percent * num_physpages)
389 #define buffer_over_min() ((buffermem >> PAGE_SHIFT) * 100 > \
390 buffer_mem.min_percent * num_physpages)
391 #define buffer_over_borrow() ((buffermem >> PAGE_SHIFT) * 100 > \
392 buffer_mem.borrow_percent * num_physpages)
393 #define buffer_over_max() ((buffermem >> PAGE_SHIFT) * 100 > \
394 buffer_mem.max_percent * num_physpages)
395 #define pgcache_under_min() (page_cache_size * 100 < \
396 page_cache.min_percent * num_physpages)
397 #define pgcache_under_borrow() (page_cache_size * 100 < \
398 page_cache.borrow_percent * num_physpages)
399 #define pgcache_under_max() (page_cache_size * 100 < \
400 page_cache.max_percent * num_physpages)
401 #define pgcache_over_min() (page_cache_size * 100 > \
402 page_cache.min_percent * num_physpages)
403 #define pgcache_over_borrow() (page_cache_size * 100 > \
404 page_cache.borrow_percent * num_physpages)
405 #define pgcache_over_max() (page_cache_size * 100 > \
406 page_cache.max_percent * num_physpages)
408 #endif /* __KERNEL__ */