1 /* internal.h: mm/ internal definitions
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #ifndef __MM_INTERNAL_H
12 #define __MM_INTERNAL_H
16 void free_pgtables(struct mmu_gather
*tlb
, struct vm_area_struct
*start_vma
,
17 unsigned long floor
, unsigned long ceiling
);
19 extern void prep_compound_page(struct page
*page
, unsigned long order
);
21 static inline void set_page_count(struct page
*page
, int v
)
23 atomic_set(&page
->_count
, v
);
27 * Turn a non-refcounted page (->_count == 0) into refcounted with
30 static inline void set_page_refcounted(struct page
*page
)
32 VM_BUG_ON(PageTail(page
));
33 VM_BUG_ON(atomic_read(&page
->_count
));
34 set_page_count(page
, 1);
37 static inline void __put_page(struct page
*page
)
39 atomic_dec(&page
->_count
);
45 extern int isolate_lru_page(struct page
*page
);
46 extern void putback_lru_page(struct page
*page
);
51 extern void __free_pages_bootmem(struct page
*page
, unsigned int order
);
54 * function for dealing with page's order in buddy system.
55 * zone->lock is already acquired when we use these.
56 * So, we don't need atomic page->flags operations here.
58 static inline unsigned long page_order(struct page
*page
)
60 VM_BUG_ON(!PageBuddy(page
));
61 return page_private(page
);
64 extern int mlock_vma_pages_range(struct vm_area_struct
*vma
,
65 unsigned long start
, unsigned long end
);
66 extern void munlock_vma_pages_all(struct vm_area_struct
*vma
);
68 #ifdef CONFIG_UNEVICTABLE_LRU
70 * unevictable_migrate_page() called only from migrate_page_copy() to
71 * migrate unevictable flag to new page.
72 * Note that the old page has been isolated from the LRU lists at this
73 * point so we don't need to worry about LRU statistics.
75 static inline void unevictable_migrate_page(struct page
*new, struct page
*old
)
77 if (TestClearPageUnevictable(old
))
78 SetPageUnevictable(new);
81 static inline void unevictable_migrate_page(struct page
*new, struct page
*old
)
86 #ifdef CONFIG_UNEVICTABLE_LRU
88 * Called only in fault path via page_evictable() for a new page
89 * to determine if it's being mapped into a LOCKED vma.
90 * If so, mark page as mlocked.
92 static inline int is_mlocked_vma(struct vm_area_struct
*vma
, struct page
*page
)
94 VM_BUG_ON(PageLRU(page
));
96 if (likely((vma
->vm_flags
& (VM_LOCKED
| VM_SPECIAL
)) != VM_LOCKED
))
104 * must be called with vma's mmap_sem held for read, and page locked.
106 extern void mlock_vma_page(struct page
*page
);
109 * Clear the page's PageMlocked(). This can be useful in a situation where
110 * we want to unconditionally remove a page from the pagecache -- e.g.,
111 * on truncation or freeing.
113 * It is legal to call this function for any page, mlocked or not.
114 * If called for a page that is still mapped by mlocked vmas, all we do
115 * is revert to lazy LRU behaviour -- semantics are not broken.
117 extern void __clear_page_mlock(struct page
*page
);
118 static inline void clear_page_mlock(struct page
*page
)
120 if (unlikely(TestClearPageMlocked(page
)))
121 __clear_page_mlock(page
);
125 * mlock_migrate_page - called only from migrate_page_copy() to
126 * migrate the Mlocked page flag
128 static inline void mlock_migrate_page(struct page
*newpage
, struct page
*page
)
130 if (TestClearPageMlocked(page
))
131 SetPageMlocked(newpage
);
135 #else /* CONFIG_UNEVICTABLE_LRU */
136 static inline int is_mlocked_vma(struct vm_area_struct
*v
, struct page
*p
)
140 static inline void clear_page_mlock(struct page
*page
) { }
141 static inline void mlock_vma_page(struct page
*page
) { }
142 static inline void mlock_migrate_page(struct page
*new, struct page
*old
) { }
144 #endif /* CONFIG_UNEVICTABLE_LRU */
147 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
148 * so all functions starting at paging_init should be marked __init
149 * in those cases. SPARSEMEM, however, allows for memory hotplug,
150 * and alloc_bootmem_node is not used.
152 #ifdef CONFIG_SPARSEMEM
153 #define __paginginit __meminit
155 #define __paginginit __init
158 /* Memory initialisation debug and verification */
165 #ifdef CONFIG_DEBUG_MEMORY_INIT
167 extern int mminit_loglevel
;
169 #define mminit_dprintk(level, prefix, fmt, arg...) \
171 if (level < mminit_loglevel) { \
172 printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
173 printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
177 extern void mminit_verify_pageflags_layout(void);
178 extern void mminit_verify_page_links(struct page
*page
,
179 enum zone_type zone
, unsigned long nid
, unsigned long pfn
);
180 extern void mminit_verify_zonelist(void);
184 static inline void mminit_dprintk(enum mminit_level level
,
185 const char *prefix
, const char *fmt
, ...)
189 static inline void mminit_verify_pageflags_layout(void)
193 static inline void mminit_verify_page_links(struct page
*page
,
194 enum zone_type zone
, unsigned long nid
, unsigned long pfn
)
198 static inline void mminit_verify_zonelist(void)
201 #endif /* CONFIG_DEBUG_MEMORY_INIT */
203 /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
204 #if defined(CONFIG_SPARSEMEM)
205 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn
,
206 unsigned long *end_pfn
);
208 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn
,
209 unsigned long *end_pfn
)
212 #endif /* CONFIG_SPARSEMEM */
214 #define GUP_FLAGS_WRITE 0x1
215 #define GUP_FLAGS_FORCE 0x2
216 #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
218 int __get_user_pages(struct task_struct
*tsk
, struct mm_struct
*mm
,
219 unsigned long start
, int len
, int flags
,
220 struct page
**pages
, struct vm_area_struct
**vmas
);