2 * linux/mm/page_isolation.c
6 #include <linux/page-isolation.h>
7 #include <linux/pageblock-flags.h>
8 #include <linux/memory.h>
11 /* called while holding zone->lock */
12 static void set_pageblock_isolate(struct page
*page
)
14 if (get_pageblock_migratetype(page
) == MIGRATE_ISOLATE
)
17 set_pageblock_migratetype(page
, MIGRATE_ISOLATE
);
18 page_zone(page
)->nr_pageblock_isolate
++;
21 /* called while holding zone->lock */
22 static void restore_pageblock_isolate(struct page
*page
, int migratetype
)
24 struct zone
*zone
= page_zone(page
);
25 if (WARN_ON(get_pageblock_migratetype(page
) != MIGRATE_ISOLATE
))
28 BUG_ON(zone
->nr_pageblock_isolate
<= 0);
29 set_pageblock_migratetype(page
, migratetype
);
30 zone
->nr_pageblock_isolate
--;
33 int set_migratetype_isolate(struct page
*page
)
36 unsigned long flags
, pfn
;
37 struct memory_isolate_notify arg
;
41 zone
= page_zone(page
);
43 spin_lock_irqsave(&zone
->lock
, flags
);
45 pfn
= page_to_pfn(page
);
47 arg
.nr_pages
= pageblock_nr_pages
;
51 * It may be possible to isolate a pageblock even if the
52 * migratetype is not MIGRATE_MOVABLE. The memory isolation
53 * notifier chain is used by balloon drivers to return the
54 * number of pages in a range that are held by the balloon
55 * driver to shrink memory. If all the pages are accounted for
56 * by balloons, are free, or on the LRU, isolation can continue.
57 * Later, for example, when memory hotplug notifier runs, these
58 * pages reported as "can be isolated" should be isolated(freed)
59 * by the balloon driver through the memory notifier chain.
61 notifier_ret
= memory_isolate_notify(MEM_ISOLATE_COUNT
, &arg
);
62 notifier_ret
= notifier_to_errno(notifier_ret
);
66 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
67 * We just check MOVABLE pages.
69 if (!has_unmovable_pages(zone
, page
, arg
.pages_found
))
73 * immobile means "not-on-lru" paes. If immobile is larger than
74 * removable-by-driver pages reported by notifier, we'll fail.
79 set_pageblock_isolate(page
);
80 move_freepages_block(zone
, page
, MIGRATE_ISOLATE
);
83 spin_unlock_irqrestore(&zone
->lock
, flags
);
89 void unset_migratetype_isolate(struct page
*page
, unsigned migratetype
)
93 zone
= page_zone(page
);
94 spin_lock_irqsave(&zone
->lock
, flags
);
95 if (get_pageblock_migratetype(page
) != MIGRATE_ISOLATE
)
97 move_freepages_block(zone
, page
, migratetype
);
98 restore_pageblock_isolate(page
, migratetype
);
100 spin_unlock_irqrestore(&zone
->lock
, flags
);
103 static inline struct page
*
104 __first_valid_page(unsigned long pfn
, unsigned long nr_pages
)
107 for (i
= 0; i
< nr_pages
; i
++)
108 if (pfn_valid_within(pfn
+ i
))
110 if (unlikely(i
== nr_pages
))
112 return pfn_to_page(pfn
+ i
);
116 * start_isolate_page_range() -- make page-allocation-type of range of pages
117 * to be MIGRATE_ISOLATE.
118 * @start_pfn: The lower PFN of the range to be isolated.
119 * @end_pfn: The upper PFN of the range to be isolated.
120 * @migratetype: migrate type to set in error recovery.
122 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
123 * the range will never be allocated. Any free pages and pages freed in the
124 * future will not be allocated again.
126 * start_pfn/end_pfn must be aligned to pageblock_order.
127 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
129 int start_isolate_page_range(unsigned long start_pfn
, unsigned long end_pfn
,
130 unsigned migratetype
)
133 unsigned long undo_pfn
;
136 BUG_ON((start_pfn
) & (pageblock_nr_pages
- 1));
137 BUG_ON((end_pfn
) & (pageblock_nr_pages
- 1));
139 for (pfn
= start_pfn
;
141 pfn
+= pageblock_nr_pages
) {
142 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
143 if (page
&& set_migratetype_isolate(page
)) {
150 for (pfn
= start_pfn
;
152 pfn
+= pageblock_nr_pages
)
153 unset_migratetype_isolate(pfn_to_page(pfn
), migratetype
);
159 * Make isolated pages available again.
161 int undo_isolate_page_range(unsigned long start_pfn
, unsigned long end_pfn
,
162 unsigned migratetype
)
166 BUG_ON((start_pfn
) & (pageblock_nr_pages
- 1));
167 BUG_ON((end_pfn
) & (pageblock_nr_pages
- 1));
168 for (pfn
= start_pfn
;
170 pfn
+= pageblock_nr_pages
) {
171 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
172 if (!page
|| get_pageblock_migratetype(page
) != MIGRATE_ISOLATE
)
174 unset_migratetype_isolate(page
, migratetype
);
179 * Test all pages in the range is free(means isolated) or not.
180 * all pages in [start_pfn...end_pfn) must be in the same zone.
181 * zone->lock must be held before call this.
183 * Returns 1 if all pages in the range are isolated.
186 __test_page_isolated_in_pageblock(unsigned long pfn
, unsigned long end_pfn
)
190 while (pfn
< end_pfn
) {
191 if (!pfn_valid_within(pfn
)) {
195 page
= pfn_to_page(pfn
);
197 pfn
+= 1 << page_order(page
);
198 else if (page_count(page
) == 0 &&
199 page_private(page
) == MIGRATE_ISOLATE
)
209 int test_pages_isolated(unsigned long start_pfn
, unsigned long end_pfn
)
211 unsigned long pfn
, flags
;
217 * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
218 * is not aligned to pageblock_nr_pages.
219 * Then we just check pagetype fist.
221 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= pageblock_nr_pages
) {
222 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
223 if (page
&& get_pageblock_migratetype(page
) != MIGRATE_ISOLATE
)
226 page
= __first_valid_page(start_pfn
, end_pfn
- start_pfn
);
227 if ((pfn
< end_pfn
) || !page
)
229 /* Check all pages are free or Marked as ISOLATED */
230 zone
= page_zone(page
);
231 spin_lock_irqsave(&zone
->lock
, flags
);
232 ret
= __test_page_isolated_in_pageblock(start_pfn
, end_pfn
);
233 spin_unlock_irqrestore(&zone
->lock
, flags
);
234 return ret
? 0 : -EBUSY
;