x86, ia64: remove duplicated swiotlb code
[linux-2.6/mini2440.git] / mm / migrate.c
blob21631ab8c08b8d0b22fc7c48e38249a66bf18ce2
1 /*
2 * Memory Migration functionality - linux/mm/migration.c
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter
15 #include <linux/migrate.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/pagemap.h>
20 #include <linux/buffer_head.h>
21 #include <linux/mm_inline.h>
22 #include <linux/nsproxy.h>
23 #include <linux/pagevec.h>
24 #include <linux/rmap.h>
25 #include <linux/topology.h>
26 #include <linux/cpu.h>
27 #include <linux/cpuset.h>
28 #include <linux/writeback.h>
29 #include <linux/mempolicy.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/memcontrol.h>
33 #include <linux/syscalls.h>
35 #include "internal.h"
37 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
40 * migrate_prep() needs to be called before we start compiling a list of pages
41 * to be migrated using isolate_lru_page().
43 int migrate_prep(void)
46 * Clear the LRU lists so pages can be isolated.
47 * Note that pages may be moved off the LRU after we have
48 * drained them. Those pages will fail to migrate like other
49 * pages that may be busy.
51 lru_add_drain_all();
53 return 0;
57 * Add isolated pages on the list back to the LRU under page lock
58 * to avoid leaking evictable pages back onto unevictable list.
60 * returns the number of pages put back.
62 int putback_lru_pages(struct list_head *l)
64 struct page *page;
65 struct page *page2;
66 int count = 0;
68 list_for_each_entry_safe(page, page2, l, lru) {
69 list_del(&page->lru);
70 putback_lru_page(page);
71 count++;
73 return count;
77 * Restore a potential migration pte to a working pte entry
79 static void remove_migration_pte(struct vm_area_struct *vma,
80 struct page *old, struct page *new)
82 struct mm_struct *mm = vma->vm_mm;
83 swp_entry_t entry;
84 pgd_t *pgd;
85 pud_t *pud;
86 pmd_t *pmd;
87 pte_t *ptep, pte;
88 spinlock_t *ptl;
89 unsigned long addr = page_address_in_vma(new, vma);
91 if (addr == -EFAULT)
92 return;
94 pgd = pgd_offset(mm, addr);
95 if (!pgd_present(*pgd))
96 return;
98 pud = pud_offset(pgd, addr);
99 if (!pud_present(*pud))
100 return;
102 pmd = pmd_offset(pud, addr);
103 if (!pmd_present(*pmd))
104 return;
106 ptep = pte_offset_map(pmd, addr);
108 if (!is_swap_pte(*ptep)) {
109 pte_unmap(ptep);
110 return;
113 ptl = pte_lockptr(mm, pmd);
114 spin_lock(ptl);
115 pte = *ptep;
116 if (!is_swap_pte(pte))
117 goto out;
119 entry = pte_to_swp_entry(pte);
121 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
122 goto out;
125 * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge.
126 * Failure is not an option here: we're now expected to remove every
127 * migration pte, and will cause crashes otherwise. Normally this
128 * is not an issue: mem_cgroup_prepare_migration bumped up the old
129 * page_cgroup count for safety, that's now attached to the new page,
130 * so this charge should just be another incrementation of the count,
131 * to keep in balance with rmap.c's mem_cgroup_uncharging. But if
132 * there's been a force_empty, those reference counts may no longer
133 * be reliable, and this charge can actually fail: oh well, we don't
134 * make the situation any worse by proceeding as if it had succeeded.
136 mem_cgroup_charge(new, mm, GFP_ATOMIC);
138 get_page(new);
139 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
140 if (is_write_migration_entry(entry))
141 pte = pte_mkwrite(pte);
142 flush_cache_page(vma, addr, pte_pfn(pte));
143 set_pte_at(mm, addr, ptep, pte);
145 if (PageAnon(new))
146 page_add_anon_rmap(new, vma, addr);
147 else
148 page_add_file_rmap(new);
150 /* No need to invalidate - it was non-present before */
151 update_mmu_cache(vma, addr, pte);
153 out:
154 pte_unmap_unlock(ptep, ptl);
158 * Note that remove_file_migration_ptes will only work on regular mappings,
159 * Nonlinear mappings do not use migration entries.
161 static void remove_file_migration_ptes(struct page *old, struct page *new)
163 struct vm_area_struct *vma;
164 struct address_space *mapping = page_mapping(new);
165 struct prio_tree_iter iter;
166 pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
168 if (!mapping)
169 return;
171 spin_lock(&mapping->i_mmap_lock);
173 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
174 remove_migration_pte(vma, old, new);
176 spin_unlock(&mapping->i_mmap_lock);
180 * Must hold mmap_sem lock on at least one of the vmas containing
181 * the page so that the anon_vma cannot vanish.
183 static void remove_anon_migration_ptes(struct page *old, struct page *new)
185 struct anon_vma *anon_vma;
186 struct vm_area_struct *vma;
187 unsigned long mapping;
189 mapping = (unsigned long)new->mapping;
191 if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
192 return;
195 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
197 anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
198 spin_lock(&anon_vma->lock);
200 list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
201 remove_migration_pte(vma, old, new);
203 spin_unlock(&anon_vma->lock);
207 * Get rid of all migration entries and replace them by
208 * references to the indicated page.
210 static void remove_migration_ptes(struct page *old, struct page *new)
212 if (PageAnon(new))
213 remove_anon_migration_ptes(old, new);
214 else
215 remove_file_migration_ptes(old, new);
219 * Something used the pte of a page under migration. We need to
220 * get to the page and wait until migration is finished.
221 * When we return from this function the fault will be retried.
223 * This function is called from do_swap_page().
225 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
226 unsigned long address)
228 pte_t *ptep, pte;
229 spinlock_t *ptl;
230 swp_entry_t entry;
231 struct page *page;
233 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
234 pte = *ptep;
235 if (!is_swap_pte(pte))
236 goto out;
238 entry = pte_to_swp_entry(pte);
239 if (!is_migration_entry(entry))
240 goto out;
242 page = migration_entry_to_page(entry);
245 * Once radix-tree replacement of page migration started, page_count
246 * *must* be zero. And, we don't want to call wait_on_page_locked()
247 * against a page without get_page().
248 * So, we use get_page_unless_zero(), here. Even failed, page fault
249 * will occur again.
251 if (!get_page_unless_zero(page))
252 goto out;
253 pte_unmap_unlock(ptep, ptl);
254 wait_on_page_locked(page);
255 put_page(page);
256 return;
257 out:
258 pte_unmap_unlock(ptep, ptl);
262 * Replace the page in the mapping.
264 * The number of remaining references must be:
265 * 1 for anonymous pages without a mapping
266 * 2 for pages with a mapping
267 * 3 for pages with a mapping and PagePrivate set.
269 static int migrate_page_move_mapping(struct address_space *mapping,
270 struct page *newpage, struct page *page)
272 int expected_count;
273 void **pslot;
275 if (!mapping) {
276 /* Anonymous page without mapping */
277 if (page_count(page) != 1)
278 return -EAGAIN;
279 return 0;
282 spin_lock_irq(&mapping->tree_lock);
284 pslot = radix_tree_lookup_slot(&mapping->page_tree,
285 page_index(page));
287 expected_count = 2 + !!PagePrivate(page);
288 if (page_count(page) != expected_count ||
289 (struct page *)radix_tree_deref_slot(pslot) != page) {
290 spin_unlock_irq(&mapping->tree_lock);
291 return -EAGAIN;
294 if (!page_freeze_refs(page, expected_count)) {
295 spin_unlock_irq(&mapping->tree_lock);
296 return -EAGAIN;
300 * Now we know that no one else is looking at the page.
302 get_page(newpage); /* add cache reference */
303 #ifdef CONFIG_SWAP
304 if (PageSwapCache(page)) {
305 SetPageSwapCache(newpage);
306 set_page_private(newpage, page_private(page));
308 #endif
310 radix_tree_replace_slot(pslot, newpage);
312 page_unfreeze_refs(page, expected_count);
314 * Drop cache reference from old page.
315 * We know this isn't the last reference.
317 __put_page(page);
320 * If moved to a different zone then also account
321 * the page for that zone. Other VM counters will be
322 * taken care of when we establish references to the
323 * new page and drop references to the old page.
325 * Note that anonymous pages are accounted for
326 * via NR_FILE_PAGES and NR_ANON_PAGES if they
327 * are mapped to swap space.
329 __dec_zone_page_state(page, NR_FILE_PAGES);
330 __inc_zone_page_state(newpage, NR_FILE_PAGES);
332 spin_unlock_irq(&mapping->tree_lock);
334 return 0;
338 * Copy the page to its new location
340 static void migrate_page_copy(struct page *newpage, struct page *page)
342 int anon;
344 copy_highpage(newpage, page);
346 if (PageError(page))
347 SetPageError(newpage);
348 if (PageReferenced(page))
349 SetPageReferenced(newpage);
350 if (PageUptodate(page))
351 SetPageUptodate(newpage);
352 if (TestClearPageActive(page)) {
353 VM_BUG_ON(PageUnevictable(page));
354 SetPageActive(newpage);
355 } else
356 unevictable_migrate_page(newpage, page);
357 if (PageChecked(page))
358 SetPageChecked(newpage);
359 if (PageMappedToDisk(page))
360 SetPageMappedToDisk(newpage);
362 if (PageDirty(page)) {
363 clear_page_dirty_for_io(page);
365 * Want to mark the page and the radix tree as dirty, and
366 * redo the accounting that clear_page_dirty_for_io undid,
367 * but we can't use set_page_dirty because that function
368 * is actually a signal that all of the page has become dirty.
369 * Wheras only part of our page may be dirty.
371 __set_page_dirty_nobuffers(newpage);
374 mlock_migrate_page(newpage, page);
376 #ifdef CONFIG_SWAP
377 ClearPageSwapCache(page);
378 #endif
379 ClearPagePrivate(page);
380 set_page_private(page, 0);
381 /* page->mapping contains a flag for PageAnon() */
382 anon = PageAnon(page);
383 page->mapping = NULL;
385 if (!anon) /* This page was removed from radix-tree. */
386 mem_cgroup_uncharge_cache_page(page);
389 * If any waiters have accumulated on the new page then
390 * wake them up.
392 if (PageWriteback(newpage))
393 end_page_writeback(newpage);
396 /************************************************************
397 * Migration functions
398 ***********************************************************/
400 /* Always fail migration. Used for mappings that are not movable */
401 int fail_migrate_page(struct address_space *mapping,
402 struct page *newpage, struct page *page)
404 return -EIO;
406 EXPORT_SYMBOL(fail_migrate_page);
409 * Common logic to directly migrate a single page suitable for
410 * pages that do not use PagePrivate.
412 * Pages are locked upon entry and exit.
414 int migrate_page(struct address_space *mapping,
415 struct page *newpage, struct page *page)
417 int rc;
419 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
421 rc = migrate_page_move_mapping(mapping, newpage, page);
423 if (rc)
424 return rc;
426 migrate_page_copy(newpage, page);
427 return 0;
429 EXPORT_SYMBOL(migrate_page);
431 #ifdef CONFIG_BLOCK
433 * Migration function for pages with buffers. This function can only be used
434 * if the underlying filesystem guarantees that no other references to "page"
435 * exist.
437 int buffer_migrate_page(struct address_space *mapping,
438 struct page *newpage, struct page *page)
440 struct buffer_head *bh, *head;
441 int rc;
443 if (!page_has_buffers(page))
444 return migrate_page(mapping, newpage, page);
446 head = page_buffers(page);
448 rc = migrate_page_move_mapping(mapping, newpage, page);
450 if (rc)
451 return rc;
453 bh = head;
454 do {
455 get_bh(bh);
456 lock_buffer(bh);
457 bh = bh->b_this_page;
459 } while (bh != head);
461 ClearPagePrivate(page);
462 set_page_private(newpage, page_private(page));
463 set_page_private(page, 0);
464 put_page(page);
465 get_page(newpage);
467 bh = head;
468 do {
469 set_bh_page(bh, newpage, bh_offset(bh));
470 bh = bh->b_this_page;
472 } while (bh != head);
474 SetPagePrivate(newpage);
476 migrate_page_copy(newpage, page);
478 bh = head;
479 do {
480 unlock_buffer(bh);
481 put_bh(bh);
482 bh = bh->b_this_page;
484 } while (bh != head);
486 return 0;
488 EXPORT_SYMBOL(buffer_migrate_page);
489 #endif
492 * Writeback a page to clean the dirty state
494 static int writeout(struct address_space *mapping, struct page *page)
496 struct writeback_control wbc = {
497 .sync_mode = WB_SYNC_NONE,
498 .nr_to_write = 1,
499 .range_start = 0,
500 .range_end = LLONG_MAX,
501 .nonblocking = 1,
502 .for_reclaim = 1
504 int rc;
506 if (!mapping->a_ops->writepage)
507 /* No write method for the address space */
508 return -EINVAL;
510 if (!clear_page_dirty_for_io(page))
511 /* Someone else already triggered a write */
512 return -EAGAIN;
515 * A dirty page may imply that the underlying filesystem has
516 * the page on some queue. So the page must be clean for
517 * migration. Writeout may mean we loose the lock and the
518 * page state is no longer what we checked for earlier.
519 * At this point we know that the migration attempt cannot
520 * be successful.
522 remove_migration_ptes(page, page);
524 rc = mapping->a_ops->writepage(page, &wbc);
526 if (rc != AOP_WRITEPAGE_ACTIVATE)
527 /* unlocked. Relock */
528 lock_page(page);
530 return (rc < 0) ? -EIO : -EAGAIN;
534 * Default handling if a filesystem does not provide a migration function.
536 static int fallback_migrate_page(struct address_space *mapping,
537 struct page *newpage, struct page *page)
539 if (PageDirty(page))
540 return writeout(mapping, page);
543 * Buffers may be managed in a filesystem specific way.
544 * We must have no buffers or drop them.
546 if (PagePrivate(page) &&
547 !try_to_release_page(page, GFP_KERNEL))
548 return -EAGAIN;
550 return migrate_page(mapping, newpage, page);
554 * Move a page to a newly allocated page
555 * The page is locked and all ptes have been successfully removed.
557 * The new page will have replaced the old page if this function
558 * is successful.
560 * Return value:
561 * < 0 - error code
562 * == 0 - success
564 static int move_to_new_page(struct page *newpage, struct page *page)
566 struct address_space *mapping;
567 int rc;
570 * Block others from accessing the page when we get around to
571 * establishing additional references. We are the only one
572 * holding a reference to the new page at this point.
574 if (!trylock_page(newpage))
575 BUG();
577 /* Prepare mapping for the new page.*/
578 newpage->index = page->index;
579 newpage->mapping = page->mapping;
580 if (PageSwapBacked(page))
581 SetPageSwapBacked(newpage);
583 mapping = page_mapping(page);
584 if (!mapping)
585 rc = migrate_page(mapping, newpage, page);
586 else if (mapping->a_ops->migratepage)
588 * Most pages have a mapping and most filesystems
589 * should provide a migration function. Anonymous
590 * pages are part of swap space which also has its
591 * own migration function. This is the most common
592 * path for page migration.
594 rc = mapping->a_ops->migratepage(mapping,
595 newpage, page);
596 else
597 rc = fallback_migrate_page(mapping, newpage, page);
599 if (!rc) {
600 remove_migration_ptes(page, newpage);
601 } else
602 newpage->mapping = NULL;
604 unlock_page(newpage);
606 return rc;
610 * Obtain the lock on page, remove all ptes and migrate the page
611 * to the newly allocated page in newpage.
613 static int unmap_and_move(new_page_t get_new_page, unsigned long private,
614 struct page *page, int force)
616 int rc = 0;
617 int *result = NULL;
618 struct page *newpage = get_new_page(page, private, &result);
619 int rcu_locked = 0;
620 int charge = 0;
622 if (!newpage)
623 return -ENOMEM;
625 if (page_count(page) == 1) {
626 /* page was freed from under us. So we are done. */
627 goto move_newpage;
630 charge = mem_cgroup_prepare_migration(page, newpage);
631 if (charge == -ENOMEM) {
632 rc = -ENOMEM;
633 goto move_newpage;
635 /* prepare cgroup just returns 0 or -ENOMEM */
636 BUG_ON(charge);
638 rc = -EAGAIN;
639 if (!trylock_page(page)) {
640 if (!force)
641 goto move_newpage;
642 lock_page(page);
645 if (PageWriteback(page)) {
646 if (!force)
647 goto unlock;
648 wait_on_page_writeback(page);
651 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
652 * we cannot notice that anon_vma is freed while we migrates a page.
653 * This rcu_read_lock() delays freeing anon_vma pointer until the end
654 * of migration. File cache pages are no problem because of page_lock()
655 * File Caches may use write_page() or lock_page() in migration, then,
656 * just care Anon page here.
658 if (PageAnon(page)) {
659 rcu_read_lock();
660 rcu_locked = 1;
664 * Corner case handling:
665 * 1. When a new swap-cache page is read into, it is added to the LRU
666 * and treated as swapcache but it has no rmap yet.
667 * Calling try_to_unmap() against a page->mapping==NULL page will
668 * trigger a BUG. So handle it here.
669 * 2. An orphaned page (see truncate_complete_page) might have
670 * fs-private metadata. The page can be picked up due to memory
671 * offlining. Everywhere else except page reclaim, the page is
672 * invisible to the vm, so the page can not be migrated. So try to
673 * free the metadata, so the page can be freed.
675 if (!page->mapping) {
676 if (!PageAnon(page) && PagePrivate(page)) {
678 * Go direct to try_to_free_buffers() here because
679 * a) that's what try_to_release_page() would do anyway
680 * b) we may be under rcu_read_lock() here, so we can't
681 * use GFP_KERNEL which is what try_to_release_page()
682 * needs to be effective.
684 try_to_free_buffers(page);
686 goto rcu_unlock;
689 /* Establish migration ptes or remove ptes */
690 try_to_unmap(page, 1);
692 if (!page_mapped(page))
693 rc = move_to_new_page(newpage, page);
695 if (rc)
696 remove_migration_ptes(page, page);
697 rcu_unlock:
698 if (rcu_locked)
699 rcu_read_unlock();
701 unlock:
702 unlock_page(page);
704 if (rc != -EAGAIN) {
706 * A page that has been migrated has all references
707 * removed and will be freed. A page that has not been
708 * migrated will have kepts its references and be
709 * restored.
711 list_del(&page->lru);
712 putback_lru_page(page);
715 move_newpage:
716 if (!charge)
717 mem_cgroup_end_migration(newpage);
720 * Move the new page to the LRU. If migration was not successful
721 * then this will free the page.
723 putback_lru_page(newpage);
725 if (result) {
726 if (rc)
727 *result = rc;
728 else
729 *result = page_to_nid(newpage);
731 return rc;
735 * migrate_pages
737 * The function takes one list of pages to migrate and a function
738 * that determines from the page to be migrated and the private data
739 * the target of the move and allocates the page.
741 * The function returns after 10 attempts or if no pages
742 * are movable anymore because to has become empty
743 * or no retryable pages exist anymore. All pages will be
744 * returned to the LRU or freed.
746 * Return: Number of pages not migrated or error code.
748 int migrate_pages(struct list_head *from,
749 new_page_t get_new_page, unsigned long private)
751 int retry = 1;
752 int nr_failed = 0;
753 int pass = 0;
754 struct page *page;
755 struct page *page2;
756 int swapwrite = current->flags & PF_SWAPWRITE;
757 int rc;
759 if (!swapwrite)
760 current->flags |= PF_SWAPWRITE;
762 for(pass = 0; pass < 10 && retry; pass++) {
763 retry = 0;
765 list_for_each_entry_safe(page, page2, from, lru) {
766 cond_resched();
768 rc = unmap_and_move(get_new_page, private,
769 page, pass > 2);
771 switch(rc) {
772 case -ENOMEM:
773 goto out;
774 case -EAGAIN:
775 retry++;
776 break;
777 case 0:
778 break;
779 default:
780 /* Permanent failure */
781 nr_failed++;
782 break;
786 rc = 0;
787 out:
788 if (!swapwrite)
789 current->flags &= ~PF_SWAPWRITE;
791 putback_lru_pages(from);
793 if (rc)
794 return rc;
796 return nr_failed + retry;
799 #ifdef CONFIG_NUMA
801 * Move a list of individual pages
803 struct page_to_node {
804 unsigned long addr;
805 struct page *page;
806 int node;
807 int status;
810 static struct page *new_page_node(struct page *p, unsigned long private,
811 int **result)
813 struct page_to_node *pm = (struct page_to_node *)private;
815 while (pm->node != MAX_NUMNODES && pm->page != p)
816 pm++;
818 if (pm->node == MAX_NUMNODES)
819 return NULL;
821 *result = &pm->status;
823 return alloc_pages_node(pm->node,
824 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
828 * Move a set of pages as indicated in the pm array. The addr
829 * field must be set to the virtual address of the page to be moved
830 * and the node number must contain a valid target node.
831 * The pm array ends with node = MAX_NUMNODES.
833 static int do_move_page_to_node_array(struct mm_struct *mm,
834 struct page_to_node *pm,
835 int migrate_all)
837 int err;
838 struct page_to_node *pp;
839 LIST_HEAD(pagelist);
841 migrate_prep();
842 down_read(&mm->mmap_sem);
845 * Build a list of pages to migrate
847 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
848 struct vm_area_struct *vma;
849 struct page *page;
852 * A valid page pointer that will not match any of the
853 * pages that will be moved.
855 pp->page = ZERO_PAGE(0);
857 err = -EFAULT;
858 vma = find_vma(mm, pp->addr);
859 if (!vma || !vma_migratable(vma))
860 goto set_status;
862 page = follow_page(vma, pp->addr, FOLL_GET);
864 err = PTR_ERR(page);
865 if (IS_ERR(page))
866 goto set_status;
868 err = -ENOENT;
869 if (!page)
870 goto set_status;
872 if (PageReserved(page)) /* Check for zero page */
873 goto put_and_set;
875 pp->page = page;
876 err = page_to_nid(page);
878 if (err == pp->node)
880 * Node already in the right place
882 goto put_and_set;
884 err = -EACCES;
885 if (page_mapcount(page) > 1 &&
886 !migrate_all)
887 goto put_and_set;
889 err = isolate_lru_page(page);
890 if (!err)
891 list_add_tail(&page->lru, &pagelist);
892 put_and_set:
894 * Either remove the duplicate refcount from
895 * isolate_lru_page() or drop the page ref if it was
896 * not isolated.
898 put_page(page);
899 set_status:
900 pp->status = err;
903 err = 0;
904 if (!list_empty(&pagelist))
905 err = migrate_pages(&pagelist, new_page_node,
906 (unsigned long)pm);
908 up_read(&mm->mmap_sem);
909 return err;
913 * Migrate an array of page address onto an array of nodes and fill
914 * the corresponding array of status.
916 static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
917 unsigned long nr_pages,
918 const void __user * __user *pages,
919 const int __user *nodes,
920 int __user *status, int flags)
922 struct page_to_node *pm = NULL;
923 nodemask_t task_nodes;
924 int err = 0;
925 int i;
927 task_nodes = cpuset_mems_allowed(task);
929 /* Limit nr_pages so that the multiplication may not overflow */
930 if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) {
931 err = -E2BIG;
932 goto out;
935 pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node));
936 if (!pm) {
937 err = -ENOMEM;
938 goto out;
942 * Get parameters from user space and initialize the pm
943 * array. Return various errors if the user did something wrong.
945 for (i = 0; i < nr_pages; i++) {
946 const void __user *p;
948 err = -EFAULT;
949 if (get_user(p, pages + i))
950 goto out_pm;
952 pm[i].addr = (unsigned long)p;
953 if (nodes) {
954 int node;
956 if (get_user(node, nodes + i))
957 goto out_pm;
959 err = -ENODEV;
960 if (!node_state(node, N_HIGH_MEMORY))
961 goto out_pm;
963 err = -EACCES;
964 if (!node_isset(node, task_nodes))
965 goto out_pm;
967 pm[i].node = node;
968 } else
969 pm[i].node = 0; /* anything to not match MAX_NUMNODES */
971 /* End marker */
972 pm[nr_pages].node = MAX_NUMNODES;
974 err = do_move_page_to_node_array(mm, pm, flags & MPOL_MF_MOVE_ALL);
975 if (err >= 0)
976 /* Return status information */
977 for (i = 0; i < nr_pages; i++)
978 if (put_user(pm[i].status, status + i))
979 err = -EFAULT;
981 out_pm:
982 vfree(pm);
983 out:
984 return err;
988 * Determine the nodes of an array of pages and store it in an array of status.
990 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
991 const void __user **pages, int *status)
993 unsigned long i;
995 down_read(&mm->mmap_sem);
997 for (i = 0; i < nr_pages; i++) {
998 unsigned long addr = (unsigned long)(*pages);
999 struct vm_area_struct *vma;
1000 struct page *page;
1001 int err = -EFAULT;
1003 vma = find_vma(mm, addr);
1004 if (!vma)
1005 goto set_status;
1007 page = follow_page(vma, addr, 0);
1009 err = PTR_ERR(page);
1010 if (IS_ERR(page))
1011 goto set_status;
1013 err = -ENOENT;
1014 /* Use PageReserved to check for zero page */
1015 if (!page || PageReserved(page))
1016 goto set_status;
1018 err = page_to_nid(page);
1019 set_status:
1020 *status = err;
1022 pages++;
1023 status++;
1026 up_read(&mm->mmap_sem);
1030 * Determine the nodes of a user array of pages and store it in
1031 * a user array of status.
1033 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1034 const void __user * __user *pages,
1035 int __user *status)
1037 #define DO_PAGES_STAT_CHUNK_NR 16
1038 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1039 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1040 unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1041 int err;
1043 for (i = 0; i < nr_pages; i += chunk_nr) {
1044 if (chunk_nr + i > nr_pages)
1045 chunk_nr = nr_pages - i;
1047 err = copy_from_user(chunk_pages, &pages[i],
1048 chunk_nr * sizeof(*chunk_pages));
1049 if (err) {
1050 err = -EFAULT;
1051 goto out;
1054 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1056 err = copy_to_user(&status[i], chunk_status,
1057 chunk_nr * sizeof(*chunk_status));
1058 if (err) {
1059 err = -EFAULT;
1060 goto out;
1063 err = 0;
1065 out:
1066 return err;
1070 * Move a list of pages in the address space of the currently executing
1071 * process.
1073 asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
1074 const void __user * __user *pages,
1075 const int __user *nodes,
1076 int __user *status, int flags)
1078 const struct cred *cred = current_cred(), *tcred;
1079 struct task_struct *task;
1080 struct mm_struct *mm;
1081 int err;
1083 /* Check flags */
1084 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1085 return -EINVAL;
1087 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1088 return -EPERM;
1090 /* Find the mm_struct */
1091 read_lock(&tasklist_lock);
1092 task = pid ? find_task_by_vpid(pid) : current;
1093 if (!task) {
1094 read_unlock(&tasklist_lock);
1095 return -ESRCH;
1097 mm = get_task_mm(task);
1098 read_unlock(&tasklist_lock);
1100 if (!mm)
1101 return -EINVAL;
1104 * Check if this process has the right to modify the specified
1105 * process. The right exists if the process has administrative
1106 * capabilities, superuser privileges or the same
1107 * userid as the target process.
1109 rcu_read_lock();
1110 tcred = __task_cred(task);
1111 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1112 cred->uid != tcred->suid && cred->uid != tcred->uid &&
1113 !capable(CAP_SYS_NICE)) {
1114 rcu_read_unlock();
1115 err = -EPERM;
1116 goto out;
1118 rcu_read_unlock();
1120 err = security_task_movememory(task);
1121 if (err)
1122 goto out;
1124 if (nodes) {
1125 err = do_pages_move(mm, task, nr_pages, pages, nodes, status,
1126 flags);
1127 } else {
1128 err = do_pages_stat(mm, nr_pages, pages, status);
1131 out:
1132 mmput(mm);
1133 return err;
1137 * Call migration functions in the vma_ops that may prepare
1138 * memory in a vm for migration. migration functions may perform
1139 * the migration for vmas that do not have an underlying page struct.
1141 int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1142 const nodemask_t *from, unsigned long flags)
1144 struct vm_area_struct *vma;
1145 int err = 0;
1147 for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) {
1148 if (vma->vm_ops && vma->vm_ops->migrate) {
1149 err = vma->vm_ops->migrate(vma, to, from, flags);
1150 if (err)
1151 break;
1154 return err;
1156 #endif