net: replace hooks in __netif_receive_skb V5
[linux-2.6/libata-dev.git] / mm / compaction.c
blob94cce51b0b3535af75c20f29ecb86a11aba32a71
1 /*
2 * linux/mm/compaction.c
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
10 #include <linux/swap.h>
11 #include <linux/migrate.h>
12 #include <linux/compaction.h>
13 #include <linux/mm_inline.h>
14 #include <linux/backing-dev.h>
15 #include <linux/sysctl.h>
16 #include <linux/sysfs.h>
17 #include "internal.h"
20 * compact_control is used to track pages being migrated and the free pages
21 * they are being migrated to during memory compaction. The free_pfn starts
22 * at the end of a zone and migrate_pfn begins at the start. Movable pages
23 * are moved to the end of a zone during a compaction run and the run
24 * completes when free_pfn <= migrate_pfn
26 struct compact_control {
27 struct list_head freepages; /* List of free pages to migrate to */
28 struct list_head migratepages; /* List of pages being migrated */
29 unsigned long nr_freepages; /* Number of isolated free pages */
30 unsigned long nr_migratepages; /* Number of pages to migrate */
31 unsigned long free_pfn; /* isolate_freepages search base */
32 unsigned long migrate_pfn; /* isolate_migratepages search base */
34 /* Account for isolated anon and file pages */
35 unsigned long nr_anon;
36 unsigned long nr_file;
38 unsigned int order; /* order a direct compactor needs */
39 int migratetype; /* MOVABLE, RECLAIMABLE etc */
40 struct zone *zone;
43 static unsigned long release_freepages(struct list_head *freelist)
45 struct page *page, *next;
46 unsigned long count = 0;
48 list_for_each_entry_safe(page, next, freelist, lru) {
49 list_del(&page->lru);
50 __free_page(page);
51 count++;
54 return count;
57 /* Isolate free pages onto a private freelist. Must hold zone->lock */
58 static unsigned long isolate_freepages_block(struct zone *zone,
59 unsigned long blockpfn,
60 struct list_head *freelist)
62 unsigned long zone_end_pfn, end_pfn;
63 int total_isolated = 0;
64 struct page *cursor;
66 /* Get the last PFN we should scan for free pages at */
67 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
68 end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
70 /* Find the first usable PFN in the block to initialse page cursor */
71 for (; blockpfn < end_pfn; blockpfn++) {
72 if (pfn_valid_within(blockpfn))
73 break;
75 cursor = pfn_to_page(blockpfn);
77 /* Isolate free pages. This assumes the block is valid */
78 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
79 int isolated, i;
80 struct page *page = cursor;
82 if (!pfn_valid_within(blockpfn))
83 continue;
85 if (!PageBuddy(page))
86 continue;
88 /* Found a free page, break it into order-0 pages */
89 isolated = split_free_page(page);
90 total_isolated += isolated;
91 for (i = 0; i < isolated; i++) {
92 list_add(&page->lru, freelist);
93 page++;
96 /* If a page was split, advance to the end of it */
97 if (isolated) {
98 blockpfn += isolated - 1;
99 cursor += isolated - 1;
103 return total_isolated;
106 /* Returns true if the page is within a block suitable for migration to */
107 static bool suitable_migration_target(struct page *page)
110 int migratetype = get_pageblock_migratetype(page);
112 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
113 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
114 return false;
116 /* If the page is a large free page, then allow migration */
117 if (PageBuddy(page) && page_order(page) >= pageblock_order)
118 return true;
120 /* If the block is MIGRATE_MOVABLE, allow migration */
121 if (migratetype == MIGRATE_MOVABLE)
122 return true;
124 /* Otherwise skip the block */
125 return false;
129 * Based on information in the current compact_control, find blocks
130 * suitable for isolating free pages from and then isolate them.
132 static void isolate_freepages(struct zone *zone,
133 struct compact_control *cc)
135 struct page *page;
136 unsigned long high_pfn, low_pfn, pfn;
137 unsigned long flags;
138 int nr_freepages = cc->nr_freepages;
139 struct list_head *freelist = &cc->freepages;
141 pfn = cc->free_pfn;
142 low_pfn = cc->migrate_pfn + pageblock_nr_pages;
143 high_pfn = low_pfn;
146 * Isolate free pages until enough are available to migrate the
147 * pages on cc->migratepages. We stop searching if the migrate
148 * and free page scanners meet or enough free pages are isolated.
150 spin_lock_irqsave(&zone->lock, flags);
151 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
152 pfn -= pageblock_nr_pages) {
153 unsigned long isolated;
155 if (!pfn_valid(pfn))
156 continue;
159 * Check for overlapping nodes/zones. It's possible on some
160 * configurations to have a setup like
161 * node0 node1 node0
162 * i.e. it's possible that all pages within a zones range of
163 * pages do not belong to a single zone.
165 page = pfn_to_page(pfn);
166 if (page_zone(page) != zone)
167 continue;
169 /* Check the block is suitable for migration */
170 if (!suitable_migration_target(page))
171 continue;
173 /* Found a block suitable for isolating free pages from */
174 isolated = isolate_freepages_block(zone, pfn, freelist);
175 nr_freepages += isolated;
178 * Record the highest PFN we isolated pages from. When next
179 * looking for free pages, the search will restart here as
180 * page migration may have returned some pages to the allocator
182 if (isolated)
183 high_pfn = max(high_pfn, pfn);
185 spin_unlock_irqrestore(&zone->lock, flags);
187 /* split_free_page does not map the pages */
188 list_for_each_entry(page, freelist, lru) {
189 arch_alloc_page(page, 0);
190 kernel_map_pages(page, 1, 1);
193 cc->free_pfn = high_pfn;
194 cc->nr_freepages = nr_freepages;
197 /* Update the number of anon and file isolated pages in the zone */
198 static void acct_isolated(struct zone *zone, struct compact_control *cc)
200 struct page *page;
201 unsigned int count[NR_LRU_LISTS] = { 0, };
203 list_for_each_entry(page, &cc->migratepages, lru) {
204 int lru = page_lru_base_type(page);
205 count[lru]++;
208 cc->nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
209 cc->nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
210 __mod_zone_page_state(zone, NR_ISOLATED_ANON, cc->nr_anon);
211 __mod_zone_page_state(zone, NR_ISOLATED_FILE, cc->nr_file);
214 /* Similar to reclaim, but different enough that they don't share logic */
215 static bool too_many_isolated(struct zone *zone)
218 unsigned long inactive, isolated;
220 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
221 zone_page_state(zone, NR_INACTIVE_ANON);
222 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
223 zone_page_state(zone, NR_ISOLATED_ANON);
225 return isolated > inactive;
229 * Isolate all pages that can be migrated from the block pointed to by
230 * the migrate scanner within compact_control.
232 static unsigned long isolate_migratepages(struct zone *zone,
233 struct compact_control *cc)
235 unsigned long low_pfn, end_pfn;
236 struct list_head *migratelist = &cc->migratepages;
238 /* Do not scan outside zone boundaries */
239 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
241 /* Only scan within a pageblock boundary */
242 end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
244 /* Do not cross the free scanner or scan within a memory hole */
245 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
246 cc->migrate_pfn = end_pfn;
247 return 0;
251 * Ensure that there are not too many pages isolated from the LRU
252 * list by either parallel reclaimers or compaction. If there are,
253 * delay for some time until fewer pages are isolated
255 while (unlikely(too_many_isolated(zone))) {
256 congestion_wait(BLK_RW_ASYNC, HZ/10);
258 if (fatal_signal_pending(current))
259 return 0;
262 /* Time to isolate some pages for migration */
263 spin_lock_irq(&zone->lru_lock);
264 for (; low_pfn < end_pfn; low_pfn++) {
265 struct page *page;
266 if (!pfn_valid_within(low_pfn))
267 continue;
269 /* Get the page and skip if free */
270 page = pfn_to_page(low_pfn);
271 if (PageBuddy(page))
272 continue;
274 /* Try isolate the page */
275 if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
276 continue;
278 /* Successfully isolated */
279 del_page_from_lru_list(zone, page, page_lru(page));
280 list_add(&page->lru, migratelist);
281 mem_cgroup_del_lru(page);
282 cc->nr_migratepages++;
284 /* Avoid isolating too much */
285 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
286 break;
289 acct_isolated(zone, cc);
291 spin_unlock_irq(&zone->lru_lock);
292 cc->migrate_pfn = low_pfn;
294 return cc->nr_migratepages;
298 * This is a migrate-callback that "allocates" freepages by taking pages
299 * from the isolated freelists in the block we are migrating to.
301 static struct page *compaction_alloc(struct page *migratepage,
302 unsigned long data,
303 int **result)
305 struct compact_control *cc = (struct compact_control *)data;
306 struct page *freepage;
308 /* Isolate free pages if necessary */
309 if (list_empty(&cc->freepages)) {
310 isolate_freepages(cc->zone, cc);
312 if (list_empty(&cc->freepages))
313 return NULL;
316 freepage = list_entry(cc->freepages.next, struct page, lru);
317 list_del(&freepage->lru);
318 cc->nr_freepages--;
320 return freepage;
324 * We cannot control nr_migratepages and nr_freepages fully when migration is
325 * running as migrate_pages() has no knowledge of compact_control. When
326 * migration is complete, we count the number of pages on the lists by hand.
328 static void update_nr_listpages(struct compact_control *cc)
330 int nr_migratepages = 0;
331 int nr_freepages = 0;
332 struct page *page;
334 list_for_each_entry(page, &cc->migratepages, lru)
335 nr_migratepages++;
336 list_for_each_entry(page, &cc->freepages, lru)
337 nr_freepages++;
339 cc->nr_migratepages = nr_migratepages;
340 cc->nr_freepages = nr_freepages;
343 static int compact_finished(struct zone *zone,
344 struct compact_control *cc)
346 unsigned int order;
347 unsigned long watermark = low_wmark_pages(zone) + (1 << cc->order);
349 if (fatal_signal_pending(current))
350 return COMPACT_PARTIAL;
352 /* Compaction run completes if the migrate and free scanner meet */
353 if (cc->free_pfn <= cc->migrate_pfn)
354 return COMPACT_COMPLETE;
356 /* Compaction run is not finished if the watermark is not met */
357 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
358 return COMPACT_CONTINUE;
360 if (cc->order == -1)
361 return COMPACT_CONTINUE;
363 /* Direct compactor: Is a suitable page free? */
364 for (order = cc->order; order < MAX_ORDER; order++) {
365 /* Job done if page is free of the right migratetype */
366 if (!list_empty(&zone->free_area[order].free_list[cc->migratetype]))
367 return COMPACT_PARTIAL;
369 /* Job done if allocation would set block type */
370 if (order >= pageblock_order && zone->free_area[order].nr_free)
371 return COMPACT_PARTIAL;
374 return COMPACT_CONTINUE;
377 static int compact_zone(struct zone *zone, struct compact_control *cc)
379 int ret;
381 /* Setup to move all movable pages to the end of the zone */
382 cc->migrate_pfn = zone->zone_start_pfn;
383 cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
384 cc->free_pfn &= ~(pageblock_nr_pages-1);
386 migrate_prep_local();
388 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
389 unsigned long nr_migrate, nr_remaining;
391 if (!isolate_migratepages(zone, cc))
392 continue;
394 nr_migrate = cc->nr_migratepages;
395 migrate_pages(&cc->migratepages, compaction_alloc,
396 (unsigned long)cc, 0);
397 update_nr_listpages(cc);
398 nr_remaining = cc->nr_migratepages;
400 count_vm_event(COMPACTBLOCKS);
401 count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
402 if (nr_remaining)
403 count_vm_events(COMPACTPAGEFAILED, nr_remaining);
405 /* Release LRU pages not migrated */
406 if (!list_empty(&cc->migratepages)) {
407 putback_lru_pages(&cc->migratepages);
408 cc->nr_migratepages = 0;
413 /* Release free pages and check accounting */
414 cc->nr_freepages -= release_freepages(&cc->freepages);
415 VM_BUG_ON(cc->nr_freepages != 0);
417 return ret;
420 static unsigned long compact_zone_order(struct zone *zone,
421 int order, gfp_t gfp_mask)
423 struct compact_control cc = {
424 .nr_freepages = 0,
425 .nr_migratepages = 0,
426 .order = order,
427 .migratetype = allocflags_to_migratetype(gfp_mask),
428 .zone = zone,
430 INIT_LIST_HEAD(&cc.freepages);
431 INIT_LIST_HEAD(&cc.migratepages);
433 return compact_zone(zone, &cc);
436 int sysctl_extfrag_threshold = 500;
439 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
440 * @zonelist: The zonelist used for the current allocation
441 * @order: The order of the current allocation
442 * @gfp_mask: The GFP mask of the current allocation
443 * @nodemask: The allowed nodes to allocate from
445 * This is the main entry point for direct page compaction.
447 unsigned long try_to_compact_pages(struct zonelist *zonelist,
448 int order, gfp_t gfp_mask, nodemask_t *nodemask)
450 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
451 int may_enter_fs = gfp_mask & __GFP_FS;
452 int may_perform_io = gfp_mask & __GFP_IO;
453 unsigned long watermark;
454 struct zoneref *z;
455 struct zone *zone;
456 int rc = COMPACT_SKIPPED;
459 * Check whether it is worth even starting compaction. The order check is
460 * made because an assumption is made that the page allocator can satisfy
461 * the "cheaper" orders without taking special steps
463 if (order <= PAGE_ALLOC_COSTLY_ORDER || !may_enter_fs || !may_perform_io)
464 return rc;
466 count_vm_event(COMPACTSTALL);
468 /* Compact each zone in the list */
469 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
470 nodemask) {
471 int fragindex;
472 int status;
475 * Watermarks for order-0 must be met for compaction. Note
476 * the 2UL. This is because during migration, copies of
477 * pages need to be allocated and for a short time, the
478 * footprint is higher
480 watermark = low_wmark_pages(zone) + (2UL << order);
481 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
482 continue;
485 * fragmentation index determines if allocation failures are
486 * due to low memory or external fragmentation
488 * index of -1 implies allocations might succeed depending
489 * on watermarks
490 * index towards 0 implies failure is due to lack of memory
491 * index towards 1000 implies failure is due to fragmentation
493 * Only compact if a failure would be due to fragmentation.
495 fragindex = fragmentation_index(zone, order);
496 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
497 continue;
499 if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) {
500 rc = COMPACT_PARTIAL;
501 break;
504 status = compact_zone_order(zone, order, gfp_mask);
505 rc = max(status, rc);
507 if (zone_watermark_ok(zone, order, watermark, 0, 0))
508 break;
511 return rc;
515 /* Compact all zones within a node */
516 static int compact_node(int nid)
518 int zoneid;
519 pg_data_t *pgdat;
520 struct zone *zone;
522 if (nid < 0 || nid >= nr_node_ids || !node_online(nid))
523 return -EINVAL;
524 pgdat = NODE_DATA(nid);
526 /* Flush pending updates to the LRU lists */
527 lru_add_drain_all();
529 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
530 struct compact_control cc = {
531 .nr_freepages = 0,
532 .nr_migratepages = 0,
533 .order = -1,
536 zone = &pgdat->node_zones[zoneid];
537 if (!populated_zone(zone))
538 continue;
540 cc.zone = zone;
541 INIT_LIST_HEAD(&cc.freepages);
542 INIT_LIST_HEAD(&cc.migratepages);
544 compact_zone(zone, &cc);
546 VM_BUG_ON(!list_empty(&cc.freepages));
547 VM_BUG_ON(!list_empty(&cc.migratepages));
550 return 0;
553 /* Compact all nodes in the system */
554 static int compact_nodes(void)
556 int nid;
558 for_each_online_node(nid)
559 compact_node(nid);
561 return COMPACT_COMPLETE;
564 /* The written value is actually unused, all memory is compacted */
565 int sysctl_compact_memory;
567 /* This is the entry point for compacting all nodes via /proc/sys/vm */
568 int sysctl_compaction_handler(struct ctl_table *table, int write,
569 void __user *buffer, size_t *length, loff_t *ppos)
571 if (write)
572 return compact_nodes();
574 return 0;
577 int sysctl_extfrag_handler(struct ctl_table *table, int write,
578 void __user *buffer, size_t *length, loff_t *ppos)
580 proc_dointvec_minmax(table, write, buffer, length, ppos);
582 return 0;
585 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
586 ssize_t sysfs_compact_node(struct sys_device *dev,
587 struct sysdev_attribute *attr,
588 const char *buf, size_t count)
590 compact_node(dev->id);
592 return count;
594 static SYSDEV_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
596 int compaction_register_node(struct node *node)
598 return sysdev_create_file(&node->sysdev, &attr_compact);
601 void compaction_unregister_node(struct node *node)
603 return sysdev_remove_file(&node->sysdev, &attr_compact);
605 #endif /* CONFIG_SYSFS && CONFIG_NUMA */