4 #include <linux/sched.h>
5 #include <linux/errno.h>
9 #include <linux/string.h>
11 extern unsigned long max_mapnr
;
12 extern unsigned long num_physpages
;
13 extern void * high_memory
;
14 extern int page_cluster
;
17 #include <asm/atomic.h>
20 * Linux kernel virtual memory manager primitives.
21 * The idea being to have a "virtual" mm in the same way
22 * we have a virtual fs - giving a cleaner interface to the
23 * mm details, and allowing different kinds of memory mappings
24 * (from shared memory to executable loading to arbitrary
29 * This struct defines a memory VMM memory area. There is one of these
30 * per VM-area/task. A VM area is any part of the process virtual memory
31 * space that has a special rule for the page-fault handlers (ie a shared
32 * library, the executable area etc).
34 struct vm_area_struct
{
35 struct mm_struct
* vm_mm
; /* VM area parameters */
36 unsigned long vm_start
;
39 /* linked list of VM areas per task, sorted by address */
40 struct vm_area_struct
*vm_next
;
42 pgprot_t vm_page_prot
;
43 unsigned short vm_flags
;
45 /* AVL tree of VM areas per task, sorted by address */
47 struct vm_area_struct
* vm_avl_left
;
48 struct vm_area_struct
* vm_avl_right
;
50 /* For areas with inode, the list inode->i_mmap, for shm areas,
51 * the list of attaches, otherwise unused.
53 struct vm_area_struct
*vm_next_share
;
54 struct vm_area_struct
**vm_pprev_share
;
56 struct vm_operations_struct
* vm_ops
;
57 unsigned long vm_offset
;
58 struct file
* vm_file
;
59 unsigned long vm_pte
; /* shared mem */
65 #define VM_READ 0x0001 /* currently active flags */
66 #define VM_WRITE 0x0002
67 #define VM_EXEC 0x0004
68 #define VM_SHARED 0x0008
70 #define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
71 #define VM_MAYWRITE 0x0020
72 #define VM_MAYEXEC 0x0040
73 #define VM_MAYSHARE 0x0080
75 #define VM_GROWSDOWN 0x0100 /* general info on the segment */
76 #define VM_GROWSUP 0x0200
77 #define VM_SHM 0x0400 /* shared memory area, don't swap out */
78 #define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
80 #define VM_EXECUTABLE 0x1000
81 #define VM_LOCKED 0x2000
82 #define VM_IO 0x4000 /* Memory mapped I/O or similar */
84 #define VM_STACK_FLAGS 0x0177
87 * mapping from the currently active vm_flags protection bits (the
88 * low four bits) to a page protection mask..
90 extern pgprot_t protection_map
[16];
94 * These are the virtual MM functions - opening of an area, closing and
95 * unmapping it (needed to keep files on disk up-to-date etc), pointer
96 * to the functions called when a no-page or a wp-page exception occurs.
98 struct vm_operations_struct
{
99 void (*open
)(struct vm_area_struct
* area
);
100 void (*close
)(struct vm_area_struct
* area
);
101 void (*unmap
)(struct vm_area_struct
*area
, unsigned long, size_t);
102 void (*protect
)(struct vm_area_struct
*area
, unsigned long, size_t, unsigned int newprot
);
103 int (*sync
)(struct vm_area_struct
*area
, unsigned long, size_t, unsigned int flags
);
104 void (*advise
)(struct vm_area_struct
*area
, unsigned long, size_t, unsigned int advise
);
105 unsigned long (*nopage
)(struct vm_area_struct
* area
, unsigned long address
, int write_access
);
106 unsigned long (*wppage
)(struct vm_area_struct
* area
, unsigned long address
,
108 int (*swapout
)(struct vm_area_struct
*, struct page
*);
109 pte_t (*swapin
)(struct vm_area_struct
*, unsigned long, unsigned long);
113 * Try to keep the most commonly accessed fields in single cache lines
114 * here (16 bytes or greater). This ordering should be particularly
115 * beneficial on 32-bit processors.
117 * The first line is data used in page cache lookup, the second line
118 * is used for linear searches (eg. clock algorithm scans).
120 typedef struct page
{
121 /* these must be first (free area handling) */
125 unsigned long offset
;
126 struct page
*next_hash
;
128 unsigned long flags
; /* atomic flags, some possibly updated asynchronously */
129 wait_queue_head_t wait
;
130 struct page
**pprev_hash
;
131 struct buffer_head
* buffers
;
134 /* Page flag bit values */
137 #define PG_referenced 2
139 #define PG_uptodate 4
140 #define PG_free_after 5
141 #define PG_decr_after 6
142 #define PG_swap_unlock_after 7
145 #define PG_swap_cache 10
147 #define PG_reserved 31
149 /* Make it prettier to test the above... */
150 #define PageLocked(page) (test_bit(PG_locked, &(page)->flags))
151 #define PageError(page) (test_bit(PG_error, &(page)->flags))
152 #define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
153 #define PageDirty(page) (test_bit(PG_dirty, &(page)->flags))
154 #define PageUptodate(page) (test_bit(PG_uptodate, &(page)->flags))
155 #define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags))
156 #define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags))
157 #define PageSwapUnlockAfter(page) (test_bit(PG_swap_unlock_after, &(page)->flags))
158 #define PageDMA(page) (test_bit(PG_DMA, &(page)->flags))
159 #define PageSlab(page) (test_bit(PG_Slab, &(page)->flags))
160 #define PageSwapCache(page) (test_bit(PG_swap_cache, &(page)->flags))
161 #define PageReserved(page) (test_bit(PG_reserved, &(page)->flags))
163 #define PageSetSlab(page) (set_bit(PG_Slab, &(page)->flags))
164 #define PageSetSwapCache(page) (set_bit(PG_swap_cache, &(page)->flags))
166 #define PageTestandSetDirty(page) \
167 (test_and_set_bit(PG_dirty, &(page)->flags))
168 #define PageTestandSetSwapCache(page) \
169 (test_and_set_bit(PG_swap_cache, &(page)->flags))
171 #define PageClearSlab(page) (clear_bit(PG_Slab, &(page)->flags))
172 #define PageClearSwapCache(page)(clear_bit(PG_swap_cache, &(page)->flags))
174 #define PageTestandClearDirty(page) \
175 (test_and_clear_bit(PG_dirty, &(page)->flags))
176 #define PageTestandClearSwapCache(page) \
177 (test_and_clear_bit(PG_swap_cache, &(page)->flags))
180 * Various page->flags bits:
182 * PG_reserved is set for a page which must never be accessed (which
183 * may not even be present).
185 * PG_DMA is set for those pages which lie in the range of
186 * physical addresses capable of carrying DMA transfers.
188 * Multiple processes may "see" the same page. E.g. for untouched
189 * mappings of /dev/null, all processes see the same page full of
190 * zeroes, and text pages of executables and shared libraries have
191 * only one copy in memory, at most, normally.
193 * For the non-reserved pages, page->count denotes a reference count.
194 * page->count == 0 means the page is free.
195 * page->count == 1 means the page is used for exactly one purpose
196 * (e.g. a private data page of one process).
198 * A page may be used for kmalloc() or anyone else who does a
199 * get_free_page(). In this case the page->count is at least 1, and
200 * all other fields are unused but should be 0 or NULL. The
201 * management of this page is the responsibility of the one who uses
204 * The other pages (we may call them "process pages") are completely
205 * managed by the Linux memory manager: I/O, buffers, swapping etc.
206 * The following discussion applies only to them.
208 * A page may belong to an inode's memory mapping. In this case,
209 * page->inode is the pointer to the inode, and page->offset is the
210 * file offset of the page (not necessarily a multiple of PAGE_SIZE).
212 * A page may have buffers allocated to it. In this case,
213 * page->buffers is a circular list of these buffer heads. Else,
214 * page->buffers == NULL.
216 * For pages belonging to inodes, the page->count is the number of
217 * attaches, plus 1 if buffers are allocated to the page.
219 * All pages belonging to an inode make up a doubly linked list
220 * inode->i_pages, using the fields page->next and page->prev. (These
221 * fields are also used for freelist management when page->count==0.)
222 * There is also a hash table mapping (inode,offset) to the page
223 * in memory if present. The lists for this hash table use the fields
224 * page->next_hash and page->pprev_hash.
226 * All process pages can do I/O:
227 * - inode pages may need to be read from disk,
228 * - inode pages which have been modified and are MAP_SHARED may need
229 * to be written to disk,
230 * - private pages which have been modified may need to be swapped out
231 * to swap space and (later) to be read back into memory.
232 * During disk I/O, PG_locked is used. This bit is set before I/O
233 * and reset when I/O completes. page->wait is a wait queue of all
234 * tasks waiting for the I/O on this page to complete.
235 * PG_uptodate tells whether the page's contents is valid.
236 * When a read completes, the page becomes uptodate, unless a disk I/O
238 * When a write completes, and PG_free_after is set, the page is
239 * freed without any further delay.
241 * For choosing which pages to swap out, inode pages carry a
242 * PG_referenced bit, which is set any time the system accesses
243 * that page through the (inode,offset) hash table.
245 * PG_skip is used on sparc/sparc64 architectures to "skip" certain
246 * parts of the address space.
248 * PG_error is set to indicate that an I/O error occurred on this page.
251 extern mem_map_t
* mem_map
;
254 * This is timing-critical - most of the time in getting a new page
255 * goes to clearing the page. If you want a page without the clearing
256 * overhead, just use __get_free_page() directly..
258 #define __get_free_page(gfp_mask) __get_free_pages((gfp_mask),0)
259 #define __get_dma_pages(gfp_mask, order) __get_free_pages((gfp_mask) | GFP_DMA,(order))
260 extern unsigned long FASTCALL(__get_free_pages(int gfp_mask
, unsigned long gfp_order
));
262 extern inline unsigned long get_free_page(int gfp_mask
)
266 page
= __get_free_page(gfp_mask
);
272 extern int low_on_memory
;
274 /* memory.c & swap.c*/
276 #define free_page(addr) free_pages((addr),0)
277 extern void FASTCALL(free_pages(unsigned long addr
, unsigned long order
));
278 extern void FASTCALL(__free_page(struct page
*));
280 extern void show_free_areas(void);
281 extern unsigned long put_dirty_page(struct task_struct
* tsk
,unsigned long page
,
282 unsigned long address
);
284 extern void free_page_tables(struct mm_struct
* mm
);
285 extern void clear_page_tables(struct mm_struct
*, unsigned long, int);
286 extern int new_page_tables(struct task_struct
* tsk
);
288 extern void zap_page_range(struct mm_struct
*mm
, unsigned long address
, unsigned long size
);
289 extern int copy_page_range(struct mm_struct
*dst
, struct mm_struct
*src
, struct vm_area_struct
*vma
);
290 extern int remap_page_range(unsigned long from
, unsigned long to
, unsigned long size
, pgprot_t prot
);
291 extern int zeromap_page_range(unsigned long from
, unsigned long size
, pgprot_t prot
);
293 extern void vmtruncate(struct inode
* inode
, unsigned long offset
);
294 extern int handle_mm_fault(struct task_struct
*tsk
,struct vm_area_struct
*vma
, unsigned long address
, int write_access
);
295 extern void make_pages_present(unsigned long addr
, unsigned long end
);
297 extern int pgt_cache_water
[2];
298 extern int check_pgt_cache(void);
300 extern unsigned long paging_init(unsigned long start_mem
, unsigned long end_mem
);
301 extern void mem_init(unsigned long start_mem
, unsigned long end_mem
);
302 extern void show_mem(void);
303 extern void oom(struct task_struct
* tsk
);
304 extern void si_meminfo(struct sysinfo
* val
);
307 extern void vma_init(void);
308 extern void merge_segments(struct mm_struct
*, unsigned long, unsigned long);
309 extern void insert_vm_struct(struct mm_struct
*, struct vm_area_struct
*);
310 extern void build_mmap_avl(struct mm_struct
*);
311 extern void exit_mmap(struct mm_struct
*);
312 extern unsigned long get_unmapped_area(unsigned long, unsigned long);
314 extern unsigned long do_mmap(struct file
*, unsigned long, unsigned long,
315 unsigned long, unsigned long, unsigned long);
316 extern int do_munmap(unsigned long, size_t);
319 extern void remove_inode_page(struct page
*);
320 extern unsigned long page_unuse(struct page
*);
321 extern int shrink_mmap(int, int);
322 extern void truncate_inode_pages(struct inode
*, unsigned long);
323 extern unsigned long get_cached_page(struct inode
*, unsigned long, int);
324 extern void put_cached_page(unsigned long);
329 #define __GFP_WAIT 0x01
330 #define __GFP_LOW 0x02
331 #define __GFP_MED 0x04
332 #define __GFP_HIGH 0x08
333 #define __GFP_IO 0x10
334 #define __GFP_SWAP 0x20
336 #define __GFP_DMA 0x80
338 #define GFP_BUFFER (__GFP_LOW | __GFP_WAIT)
339 #define GFP_ATOMIC (__GFP_HIGH)
340 #define GFP_USER (__GFP_LOW | __GFP_WAIT | __GFP_IO)
341 #define GFP_KERNEL (__GFP_MED | __GFP_WAIT | __GFP_IO)
342 #define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
343 #define GFP_KSWAPD (__GFP_IO | __GFP_SWAP)
345 /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
346 platforms, used as appropriate on others */
348 #define GFP_DMA __GFP_DMA
350 /* vma is the first one with address < vma->vm_end,
351 * and even address < vma->vm_start. Have to extend vma. */
352 static inline int expand_stack(struct vm_area_struct
* vma
, unsigned long address
)
356 address
&= PAGE_MASK
;
357 grow
= vma
->vm_start
- address
;
358 if (vma
->vm_end
- address
359 > (unsigned long) current
->rlim
[RLIMIT_STACK
].rlim_cur
||
360 (vma
->vm_mm
->total_vm
<< PAGE_SHIFT
) + grow
361 > (unsigned long) current
->rlim
[RLIMIT_AS
].rlim_cur
)
363 vma
->vm_start
= address
;
364 vma
->vm_offset
-= grow
;
365 vma
->vm_mm
->total_vm
+= grow
>> PAGE_SHIFT
;
366 if (vma
->vm_flags
& VM_LOCKED
)
367 vma
->vm_mm
->locked_vm
+= grow
>> PAGE_SHIFT
;
371 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
372 extern struct vm_area_struct
* find_vma(struct mm_struct
* mm
, unsigned long addr
);
374 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
375 NULL if none. Assume start_addr < end_addr. */
376 static inline struct vm_area_struct
* find_vma_intersection(struct mm_struct
* mm
, unsigned long start_addr
, unsigned long end_addr
)
378 struct vm_area_struct
* vma
= find_vma(mm
,start_addr
);
380 if (vma
&& end_addr
<= vma
->vm_start
)
385 #define buffer_under_min() ((buffermem >> PAGE_SHIFT) * 100 < \
386 buffer_mem.min_percent * num_physpages)
387 #define pgcache_under_min() (page_cache_size * 100 < \
388 page_cache.min_percent * num_physpages)
390 #endif /* __KERNEL__ */