Merge tag 'omap-for-v3.12/fixes-signed' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6.git] / mm / page_isolation.c
blobd1473b2e9481731988695755a618baa0991556a7
1 /*
2 * linux/mm/page_isolation.c
3 */
5 #include <linux/mm.h>
6 #include <linux/page-isolation.h>
7 #include <linux/pageblock-flags.h>
8 #include <linux/memory.h>
9 #include <linux/hugetlb.h>
10 #include "internal.h"
12 int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
14 struct zone *zone;
15 unsigned long flags, pfn;
16 struct memory_isolate_notify arg;
17 int notifier_ret;
18 int ret = -EBUSY;
20 zone = page_zone(page);
22 spin_lock_irqsave(&zone->lock, flags);
24 pfn = page_to_pfn(page);
25 arg.start_pfn = pfn;
26 arg.nr_pages = pageblock_nr_pages;
27 arg.pages_found = 0;
30 * It may be possible to isolate a pageblock even if the
31 * migratetype is not MIGRATE_MOVABLE. The memory isolation
32 * notifier chain is used by balloon drivers to return the
33 * number of pages in a range that are held by the balloon
34 * driver to shrink memory. If all the pages are accounted for
35 * by balloons, are free, or on the LRU, isolation can continue.
36 * Later, for example, when memory hotplug notifier runs, these
37 * pages reported as "can be isolated" should be isolated(freed)
38 * by the balloon driver through the memory notifier chain.
40 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
41 notifier_ret = notifier_to_errno(notifier_ret);
42 if (notifier_ret)
43 goto out;
45 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
46 * We just check MOVABLE pages.
48 if (!has_unmovable_pages(zone, page, arg.pages_found,
49 skip_hwpoisoned_pages))
50 ret = 0;
53 * immobile means "not-on-lru" paes. If immobile is larger than
54 * removable-by-driver pages reported by notifier, we'll fail.
57 out:
58 if (!ret) {
59 unsigned long nr_pages;
60 int migratetype = get_pageblock_migratetype(page);
62 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
63 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
65 __mod_zone_freepage_state(zone, -nr_pages, migratetype);
68 spin_unlock_irqrestore(&zone->lock, flags);
69 if (!ret)
70 drain_all_pages();
71 return ret;
74 void unset_migratetype_isolate(struct page *page, unsigned migratetype)
76 struct zone *zone;
77 unsigned long flags, nr_pages;
79 zone = page_zone(page);
80 spin_lock_irqsave(&zone->lock, flags);
81 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
82 goto out;
83 nr_pages = move_freepages_block(zone, page, migratetype);
84 __mod_zone_freepage_state(zone, nr_pages, migratetype);
85 set_pageblock_migratetype(page, migratetype);
86 out:
87 spin_unlock_irqrestore(&zone->lock, flags);
90 static inline struct page *
91 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
93 int i;
94 for (i = 0; i < nr_pages; i++)
95 if (pfn_valid_within(pfn + i))
96 break;
97 if (unlikely(i == nr_pages))
98 return NULL;
99 return pfn_to_page(pfn + i);
103 * start_isolate_page_range() -- make page-allocation-type of range of pages
104 * to be MIGRATE_ISOLATE.
105 * @start_pfn: The lower PFN of the range to be isolated.
106 * @end_pfn: The upper PFN of the range to be isolated.
107 * @migratetype: migrate type to set in error recovery.
109 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
110 * the range will never be allocated. Any free pages and pages freed in the
111 * future will not be allocated again.
113 * start_pfn/end_pfn must be aligned to pageblock_order.
114 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
116 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
117 unsigned migratetype, bool skip_hwpoisoned_pages)
119 unsigned long pfn;
120 unsigned long undo_pfn;
121 struct page *page;
123 BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
124 BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
126 for (pfn = start_pfn;
127 pfn < end_pfn;
128 pfn += pageblock_nr_pages) {
129 page = __first_valid_page(pfn, pageblock_nr_pages);
130 if (page &&
131 set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
132 undo_pfn = pfn;
133 goto undo;
136 return 0;
137 undo:
138 for (pfn = start_pfn;
139 pfn < undo_pfn;
140 pfn += pageblock_nr_pages)
141 unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
143 return -EBUSY;
147 * Make isolated pages available again.
149 int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
150 unsigned migratetype)
152 unsigned long pfn;
153 struct page *page;
154 BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
155 BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
156 for (pfn = start_pfn;
157 pfn < end_pfn;
158 pfn += pageblock_nr_pages) {
159 page = __first_valid_page(pfn, pageblock_nr_pages);
160 if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
161 continue;
162 unset_migratetype_isolate(page, migratetype);
164 return 0;
167 * Test all pages in the range is free(means isolated) or not.
168 * all pages in [start_pfn...end_pfn) must be in the same zone.
169 * zone->lock must be held before call this.
171 * Returns 1 if all pages in the range are isolated.
173 static int
174 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
175 bool skip_hwpoisoned_pages)
177 struct page *page;
179 while (pfn < end_pfn) {
180 if (!pfn_valid_within(pfn)) {
181 pfn++;
182 continue;
184 page = pfn_to_page(pfn);
185 if (PageBuddy(page)) {
187 * If race between isolatation and allocation happens,
188 * some free pages could be in MIGRATE_MOVABLE list
189 * although pageblock's migratation type of the page
190 * is MIGRATE_ISOLATE. Catch it and move the page into
191 * MIGRATE_ISOLATE list.
193 if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
194 struct page *end_page;
196 end_page = page + (1 << page_order(page)) - 1;
197 move_freepages(page_zone(page), page, end_page,
198 MIGRATE_ISOLATE);
200 pfn += 1 << page_order(page);
202 else if (page_count(page) == 0 &&
203 get_freepage_migratetype(page) == MIGRATE_ISOLATE)
204 pfn += 1;
205 else if (skip_hwpoisoned_pages && PageHWPoison(page)) {
207 * The HWPoisoned page may be not in buddy
208 * system, and page_count() is not 0.
210 pfn++;
211 continue;
213 else
214 break;
216 if (pfn < end_pfn)
217 return 0;
218 return 1;
221 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
222 bool skip_hwpoisoned_pages)
224 unsigned long pfn, flags;
225 struct page *page;
226 struct zone *zone;
227 int ret;
230 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
231 * are not aligned to pageblock_nr_pages.
232 * Then we just check migratetype first.
234 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
235 page = __first_valid_page(pfn, pageblock_nr_pages);
236 if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
237 break;
239 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
240 if ((pfn < end_pfn) || !page)
241 return -EBUSY;
242 /* Check all pages are free or marked as ISOLATED */
243 zone = page_zone(page);
244 spin_lock_irqsave(&zone->lock, flags);
245 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
246 skip_hwpoisoned_pages);
247 spin_unlock_irqrestore(&zone->lock, flags);
248 return ret ? 0 : -EBUSY;
251 struct page *alloc_migrate_target(struct page *page, unsigned long private,
252 int **resultp)
254 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
257 * TODO: allocate a destination hugepage from a nearest neighbor node,
258 * accordance with memory policy of the user process if possible. For
259 * now as a simple work-around, we use the next node for destination.
261 if (PageHuge(page)) {
262 nodemask_t src = nodemask_of_node(page_to_nid(page));
263 nodemask_t dst;
264 nodes_complement(dst, src);
265 return alloc_huge_page_node(page_hstate(compound_head(page)),
266 next_node(page_to_nid(page), dst));
269 if (PageHighMem(page))
270 gfp_mask |= __GFP_HIGHMEM;
272 return alloc_page(gfp_mask);