2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
22 * bind Only allocate memory on a specific set of nodes,
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
57 fix mmap readahead to honour policy and enable policy for any page cache
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
62 handle mremap for shared memory (currently ignored for the policy)
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
68 #include <linux/mempolicy.h>
70 #include <linux/highmem.h>
71 #include <linux/hugetlb.h>
72 #include <linux/kernel.h>
73 #include <linux/sched.h>
74 #include <linux/nodemask.h>
75 #include <linux/cpuset.h>
76 #include <linux/gfp.h>
77 #include <linux/slab.h>
78 #include <linux/string.h>
79 #include <linux/module.h>
80 #include <linux/nsproxy.h>
81 #include <linux/interrupt.h>
82 #include <linux/init.h>
83 #include <linux/compat.h>
84 #include <linux/swap.h>
85 #include <linux/seq_file.h>
86 #include <linux/proc_fs.h>
87 #include <linux/migrate.h>
88 #include <linux/rmap.h>
89 #include <linux/security.h>
90 #include <linux/syscalls.h>
91 #include <linux/ctype.h>
93 #include <asm/tlbflush.h>
94 #include <asm/uaccess.h>
99 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
100 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
101 #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
103 static struct kmem_cache
*policy_cache
;
104 static struct kmem_cache
*sn_cache
;
106 /* Highest zone. An specific allocation for a zone below that is not
108 enum zone_type policy_zone
= 0;
111 * run-time system-wide default policy => local allocation
113 struct mempolicy default_policy
= {
114 .refcnt
= ATOMIC_INIT(1), /* never free it */
115 .mode
= MPOL_PREFERRED
,
116 .flags
= MPOL_F_LOCAL
,
119 static const struct mempolicy_operations
{
120 int (*create
)(struct mempolicy
*pol
, const nodemask_t
*nodes
);
121 void (*rebind
)(struct mempolicy
*pol
, const nodemask_t
*nodes
);
122 } mpol_ops
[MPOL_MAX
];
124 /* Check that the nodemask contains at least one populated zone */
125 static int is_valid_nodemask(const nodemask_t
*nodemask
)
129 /* Check that there is something useful in this mask */
132 for_each_node_mask(nd
, *nodemask
) {
135 for (k
= 0; k
<= policy_zone
; k
++) {
136 z
= &NODE_DATA(nd
)->node_zones
[k
];
137 if (z
->present_pages
> 0)
145 static inline int mpol_store_user_nodemask(const struct mempolicy
*pol
)
147 return pol
->flags
& (MPOL_F_STATIC_NODES
| MPOL_F_RELATIVE_NODES
);
150 static void mpol_relative_nodemask(nodemask_t
*ret
, const nodemask_t
*orig
,
151 const nodemask_t
*rel
)
154 nodes_fold(tmp
, *orig
, nodes_weight(*rel
));
155 nodes_onto(*ret
, tmp
, *rel
);
158 static int mpol_new_interleave(struct mempolicy
*pol
, const nodemask_t
*nodes
)
160 if (nodes_empty(*nodes
))
162 pol
->v
.nodes
= *nodes
;
166 static int mpol_new_preferred(struct mempolicy
*pol
, const nodemask_t
*nodes
)
169 pol
->flags
|= MPOL_F_LOCAL
; /* local allocation */
170 else if (nodes_empty(*nodes
))
171 return -EINVAL
; /* no allowed nodes */
173 pol
->v
.preferred_node
= first_node(*nodes
);
177 static int mpol_new_bind(struct mempolicy
*pol
, const nodemask_t
*nodes
)
179 if (!is_valid_nodemask(nodes
))
181 pol
->v
.nodes
= *nodes
;
186 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
187 * any, for the new policy. mpol_new() has already validated the nodes
188 * parameter with respect to the policy mode and flags. But, we need to
189 * handle an empty nodemask with MPOL_PREFERRED here.
191 * Must be called holding task's alloc_lock to protect task's mems_allowed
192 * and mempolicy. May also be called holding the mmap_semaphore for write.
194 static int mpol_set_nodemask(struct mempolicy
*pol
,
195 const nodemask_t
*nodes
, struct nodemask_scratch
*nsc
)
199 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
202 /* Check N_HIGH_MEMORY */
203 nodes_and(nsc
->mask1
,
204 cpuset_current_mems_allowed
, node_states
[N_HIGH_MEMORY
]);
207 if (pol
->mode
== MPOL_PREFERRED
&& nodes_empty(*nodes
))
208 nodes
= NULL
; /* explicit local allocation */
210 if (pol
->flags
& MPOL_F_RELATIVE_NODES
)
211 mpol_relative_nodemask(&nsc
->mask2
, nodes
,&nsc
->mask1
);
213 nodes_and(nsc
->mask2
, *nodes
, nsc
->mask1
);
215 if (mpol_store_user_nodemask(pol
))
216 pol
->w
.user_nodemask
= *nodes
;
218 pol
->w
.cpuset_mems_allowed
=
219 cpuset_current_mems_allowed
;
223 ret
= mpol_ops
[pol
->mode
].create(pol
, &nsc
->mask2
);
225 ret
= mpol_ops
[pol
->mode
].create(pol
, NULL
);
230 * This function just creates a new policy, does some check and simple
231 * initialization. You must invoke mpol_set_nodemask() to set nodes.
233 static struct mempolicy
*mpol_new(unsigned short mode
, unsigned short flags
,
236 struct mempolicy
*policy
;
238 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
239 mode
, flags
, nodes
? nodes_addr(*nodes
)[0] : -1);
241 if (mode
== MPOL_DEFAULT
) {
242 if (nodes
&& !nodes_empty(*nodes
))
243 return ERR_PTR(-EINVAL
);
244 return NULL
; /* simply delete any existing policy */
249 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
250 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
251 * All other modes require a valid pointer to a non-empty nodemask.
253 if (mode
== MPOL_PREFERRED
) {
254 if (nodes_empty(*nodes
)) {
255 if (((flags
& MPOL_F_STATIC_NODES
) ||
256 (flags
& MPOL_F_RELATIVE_NODES
)))
257 return ERR_PTR(-EINVAL
);
259 } else if (nodes_empty(*nodes
))
260 return ERR_PTR(-EINVAL
);
261 policy
= kmem_cache_alloc(policy_cache
, GFP_KERNEL
);
263 return ERR_PTR(-ENOMEM
);
264 atomic_set(&policy
->refcnt
, 1);
266 policy
->flags
= flags
;
271 /* Slow path of a mpol destructor. */
272 void __mpol_put(struct mempolicy
*p
)
274 if (!atomic_dec_and_test(&p
->refcnt
))
276 kmem_cache_free(policy_cache
, p
);
279 static void mpol_rebind_default(struct mempolicy
*pol
, const nodemask_t
*nodes
)
283 static void mpol_rebind_nodemask(struct mempolicy
*pol
,
284 const nodemask_t
*nodes
)
288 if (pol
->flags
& MPOL_F_STATIC_NODES
)
289 nodes_and(tmp
, pol
->w
.user_nodemask
, *nodes
);
290 else if (pol
->flags
& MPOL_F_RELATIVE_NODES
)
291 mpol_relative_nodemask(&tmp
, &pol
->w
.user_nodemask
, nodes
);
293 nodes_remap(tmp
, pol
->v
.nodes
, pol
->w
.cpuset_mems_allowed
,
295 pol
->w
.cpuset_mems_allowed
= *nodes
;
299 if (!node_isset(current
->il_next
, tmp
)) {
300 current
->il_next
= next_node(current
->il_next
, tmp
);
301 if (current
->il_next
>= MAX_NUMNODES
)
302 current
->il_next
= first_node(tmp
);
303 if (current
->il_next
>= MAX_NUMNODES
)
304 current
->il_next
= numa_node_id();
308 static void mpol_rebind_preferred(struct mempolicy
*pol
,
309 const nodemask_t
*nodes
)
313 if (pol
->flags
& MPOL_F_STATIC_NODES
) {
314 int node
= first_node(pol
->w
.user_nodemask
);
316 if (node_isset(node
, *nodes
)) {
317 pol
->v
.preferred_node
= node
;
318 pol
->flags
&= ~MPOL_F_LOCAL
;
320 pol
->flags
|= MPOL_F_LOCAL
;
321 } else if (pol
->flags
& MPOL_F_RELATIVE_NODES
) {
322 mpol_relative_nodemask(&tmp
, &pol
->w
.user_nodemask
, nodes
);
323 pol
->v
.preferred_node
= first_node(tmp
);
324 } else if (!(pol
->flags
& MPOL_F_LOCAL
)) {
325 pol
->v
.preferred_node
= node_remap(pol
->v
.preferred_node
,
326 pol
->w
.cpuset_mems_allowed
,
328 pol
->w
.cpuset_mems_allowed
= *nodes
;
332 /* Migrate a policy to a different set of nodes */
333 static void mpol_rebind_policy(struct mempolicy
*pol
,
334 const nodemask_t
*newmask
)
338 if (!mpol_store_user_nodemask(pol
) &&
339 nodes_equal(pol
->w
.cpuset_mems_allowed
, *newmask
))
341 mpol_ops
[pol
->mode
].rebind(pol
, newmask
);
345 * Wrapper for mpol_rebind_policy() that just requires task
346 * pointer, and updates task mempolicy.
348 * Called with task's alloc_lock held.
351 void mpol_rebind_task(struct task_struct
*tsk
, const nodemask_t
*new)
353 mpol_rebind_policy(tsk
->mempolicy
, new);
357 * Rebind each vma in mm to new nodemask.
359 * Call holding a reference to mm. Takes mm->mmap_sem during call.
362 void mpol_rebind_mm(struct mm_struct
*mm
, nodemask_t
*new)
364 struct vm_area_struct
*vma
;
366 down_write(&mm
->mmap_sem
);
367 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
)
368 mpol_rebind_policy(vma
->vm_policy
, new);
369 up_write(&mm
->mmap_sem
);
372 static const struct mempolicy_operations mpol_ops
[MPOL_MAX
] = {
374 .rebind
= mpol_rebind_default
,
376 [MPOL_INTERLEAVE
] = {
377 .create
= mpol_new_interleave
,
378 .rebind
= mpol_rebind_nodemask
,
381 .create
= mpol_new_preferred
,
382 .rebind
= mpol_rebind_preferred
,
385 .create
= mpol_new_bind
,
386 .rebind
= mpol_rebind_nodemask
,
390 static void gather_stats(struct page
*, void *, int pte_dirty
);
391 static void migrate_page_add(struct page
*page
, struct list_head
*pagelist
,
392 unsigned long flags
);
394 /* Scan through pages checking if pages follow certain conditions. */
395 static int check_pte_range(struct vm_area_struct
*vma
, pmd_t
*pmd
,
396 unsigned long addr
, unsigned long end
,
397 const nodemask_t
*nodes
, unsigned long flags
,
404 orig_pte
= pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
409 if (!pte_present(*pte
))
411 page
= vm_normal_page(vma
, addr
, *pte
);
415 * The check for PageReserved here is important to avoid
416 * handling zero pages and other pages that may have been
417 * marked special by the system.
419 * If the PageReserved would not be checked here then f.e.
420 * the location of the zero page could have an influence
421 * on MPOL_MF_STRICT, zero pages would be counted for
422 * the per node stats, and there would be useless attempts
423 * to put zero pages on the migration list.
425 if (PageReserved(page
))
427 nid
= page_to_nid(page
);
428 if (node_isset(nid
, *nodes
) == !!(flags
& MPOL_MF_INVERT
))
431 if (flags
& MPOL_MF_STATS
)
432 gather_stats(page
, private, pte_dirty(*pte
));
433 else if (flags
& (MPOL_MF_MOVE
| MPOL_MF_MOVE_ALL
))
434 migrate_page_add(page
, private, flags
);
437 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
438 pte_unmap_unlock(orig_pte
, ptl
);
442 static inline int check_pmd_range(struct vm_area_struct
*vma
, pud_t
*pud
,
443 unsigned long addr
, unsigned long end
,
444 const nodemask_t
*nodes
, unsigned long flags
,
450 pmd
= pmd_offset(pud
, addr
);
452 next
= pmd_addr_end(addr
, end
);
453 if (pmd_none_or_clear_bad(pmd
))
455 if (check_pte_range(vma
, pmd
, addr
, next
, nodes
,
458 } while (pmd
++, addr
= next
, addr
!= end
);
462 static inline int check_pud_range(struct vm_area_struct
*vma
, pgd_t
*pgd
,
463 unsigned long addr
, unsigned long end
,
464 const nodemask_t
*nodes
, unsigned long flags
,
470 pud
= pud_offset(pgd
, addr
);
472 next
= pud_addr_end(addr
, end
);
473 if (pud_none_or_clear_bad(pud
))
475 if (check_pmd_range(vma
, pud
, addr
, next
, nodes
,
478 } while (pud
++, addr
= next
, addr
!= end
);
482 static inline int check_pgd_range(struct vm_area_struct
*vma
,
483 unsigned long addr
, unsigned long end
,
484 const nodemask_t
*nodes
, unsigned long flags
,
490 pgd
= pgd_offset(vma
->vm_mm
, addr
);
492 next
= pgd_addr_end(addr
, end
);
493 if (pgd_none_or_clear_bad(pgd
))
495 if (check_pud_range(vma
, pgd
, addr
, next
, nodes
,
498 } while (pgd
++, addr
= next
, addr
!= end
);
503 * Check if all pages in a range are on a set of nodes.
504 * If pagelist != NULL then isolate pages from the LRU and
505 * put them on the pagelist.
507 static struct vm_area_struct
*
508 check_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
,
509 const nodemask_t
*nodes
, unsigned long flags
, void *private)
512 struct vm_area_struct
*first
, *vma
, *prev
;
515 first
= find_vma(mm
, start
);
517 return ERR_PTR(-EFAULT
);
519 for (vma
= first
; vma
&& vma
->vm_start
< end
; vma
= vma
->vm_next
) {
520 if (!(flags
& MPOL_MF_DISCONTIG_OK
)) {
521 if (!vma
->vm_next
&& vma
->vm_end
< end
)
522 return ERR_PTR(-EFAULT
);
523 if (prev
&& prev
->vm_end
< vma
->vm_start
)
524 return ERR_PTR(-EFAULT
);
526 if (!is_vm_hugetlb_page(vma
) &&
527 ((flags
& MPOL_MF_STRICT
) ||
528 ((flags
& (MPOL_MF_MOVE
| MPOL_MF_MOVE_ALL
)) &&
529 vma_migratable(vma
)))) {
530 unsigned long endvma
= vma
->vm_end
;
534 if (vma
->vm_start
> start
)
535 start
= vma
->vm_start
;
536 err
= check_pgd_range(vma
, start
, endvma
, nodes
,
539 first
= ERR_PTR(err
);
548 /* Apply policy to a single VMA */
549 static int policy_vma(struct vm_area_struct
*vma
, struct mempolicy
*new)
552 struct mempolicy
*old
= vma
->vm_policy
;
554 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
555 vma
->vm_start
, vma
->vm_end
, vma
->vm_pgoff
,
556 vma
->vm_ops
, vma
->vm_file
,
557 vma
->vm_ops
? vma
->vm_ops
->set_policy
: NULL
);
559 if (vma
->vm_ops
&& vma
->vm_ops
->set_policy
)
560 err
= vma
->vm_ops
->set_policy(vma
, new);
563 vma
->vm_policy
= new;
569 /* Step 2: apply policy to a range and do splits. */
570 static int mbind_range(struct vm_area_struct
*vma
, unsigned long start
,
571 unsigned long end
, struct mempolicy
*new)
573 struct vm_area_struct
*next
;
577 for (; vma
&& vma
->vm_start
< end
; vma
= next
) {
579 if (vma
->vm_start
< start
)
580 err
= split_vma(vma
->vm_mm
, vma
, start
, 1);
581 if (!err
&& vma
->vm_end
> end
)
582 err
= split_vma(vma
->vm_mm
, vma
, end
, 0);
584 err
= policy_vma(vma
, new);
592 * Update task->flags PF_MEMPOLICY bit: set iff non-default
593 * mempolicy. Allows more rapid checking of this (combined perhaps
594 * with other PF_* flag bits) on memory allocation hot code paths.
596 * If called from outside this file, the task 'p' should -only- be
597 * a newly forked child not yet visible on the task list, because
598 * manipulating the task flags of a visible task is not safe.
600 * The above limitation is why this routine has the funny name
601 * mpol_fix_fork_child_flag().
603 * It is also safe to call this with a task pointer of current,
604 * which the static wrapper mpol_set_task_struct_flag() does,
605 * for use within this file.
608 void mpol_fix_fork_child_flag(struct task_struct
*p
)
611 p
->flags
|= PF_MEMPOLICY
;
613 p
->flags
&= ~PF_MEMPOLICY
;
616 static void mpol_set_task_struct_flag(void)
618 mpol_fix_fork_child_flag(current
);
621 /* Set the process memory policy */
622 static long do_set_mempolicy(unsigned short mode
, unsigned short flags
,
625 struct mempolicy
*new, *old
;
626 struct mm_struct
*mm
= current
->mm
;
627 NODEMASK_SCRATCH(scratch
);
633 new = mpol_new(mode
, flags
, nodes
);
639 * prevent changing our mempolicy while show_numa_maps()
641 * Note: do_set_mempolicy() can be called at init time
645 down_write(&mm
->mmap_sem
);
647 ret
= mpol_set_nodemask(new, nodes
, scratch
);
649 task_unlock(current
);
651 up_write(&mm
->mmap_sem
);
655 old
= current
->mempolicy
;
656 current
->mempolicy
= new;
657 mpol_set_task_struct_flag();
658 if (new && new->mode
== MPOL_INTERLEAVE
&&
659 nodes_weight(new->v
.nodes
))
660 current
->il_next
= first_node(new->v
.nodes
);
661 task_unlock(current
);
663 up_write(&mm
->mmap_sem
);
668 NODEMASK_SCRATCH_FREE(scratch
);
673 * Return nodemask for policy for get_mempolicy() query
675 * Called with task's alloc_lock held
677 static void get_policy_nodemask(struct mempolicy
*p
, nodemask_t
*nodes
)
680 if (p
== &default_policy
)
686 case MPOL_INTERLEAVE
:
690 if (!(p
->flags
& MPOL_F_LOCAL
))
691 node_set(p
->v
.preferred_node
, *nodes
);
692 /* else return empty node mask for local allocation */
699 static int lookup_node(struct mm_struct
*mm
, unsigned long addr
)
704 err
= get_user_pages(current
, mm
, addr
& PAGE_MASK
, 1, 0, 0, &p
, NULL
);
706 err
= page_to_nid(p
);
712 /* Retrieve NUMA policy */
713 static long do_get_mempolicy(int *policy
, nodemask_t
*nmask
,
714 unsigned long addr
, unsigned long flags
)
717 struct mm_struct
*mm
= current
->mm
;
718 struct vm_area_struct
*vma
= NULL
;
719 struct mempolicy
*pol
= current
->mempolicy
;
722 ~(unsigned long)(MPOL_F_NODE
|MPOL_F_ADDR
|MPOL_F_MEMS_ALLOWED
))
725 if (flags
& MPOL_F_MEMS_ALLOWED
) {
726 if (flags
& (MPOL_F_NODE
|MPOL_F_ADDR
))
728 *policy
= 0; /* just so it's initialized */
730 *nmask
= cpuset_current_mems_allowed
;
731 task_unlock(current
);
735 if (flags
& MPOL_F_ADDR
) {
737 * Do NOT fall back to task policy if the
738 * vma/shared policy at addr is NULL. We
739 * want to return MPOL_DEFAULT in this case.
741 down_read(&mm
->mmap_sem
);
742 vma
= find_vma_intersection(mm
, addr
, addr
+1);
744 up_read(&mm
->mmap_sem
);
747 if (vma
->vm_ops
&& vma
->vm_ops
->get_policy
)
748 pol
= vma
->vm_ops
->get_policy(vma
, addr
);
750 pol
= vma
->vm_policy
;
755 pol
= &default_policy
; /* indicates default behavior */
757 if (flags
& MPOL_F_NODE
) {
758 if (flags
& MPOL_F_ADDR
) {
759 err
= lookup_node(mm
, addr
);
763 } else if (pol
== current
->mempolicy
&&
764 pol
->mode
== MPOL_INTERLEAVE
) {
765 *policy
= current
->il_next
;
771 *policy
= pol
== &default_policy
? MPOL_DEFAULT
:
774 * Internal mempolicy flags must be masked off before exposing
775 * the policy to userspace.
777 *policy
|= (pol
->flags
& MPOL_MODE_FLAGS
);
781 up_read(¤t
->mm
->mmap_sem
);
788 get_policy_nodemask(pol
, nmask
);
789 task_unlock(current
);
795 up_read(¤t
->mm
->mmap_sem
);
799 #ifdef CONFIG_MIGRATION
803 static void migrate_page_add(struct page
*page
, struct list_head
*pagelist
,
807 * Avoid migrating a page that is shared with others.
809 if ((flags
& MPOL_MF_MOVE_ALL
) || page_mapcount(page
) == 1) {
810 if (!isolate_lru_page(page
)) {
811 list_add_tail(&page
->lru
, pagelist
);
816 static struct page
*new_node_page(struct page
*page
, unsigned long node
, int **x
)
818 return alloc_pages_exact_node(node
, GFP_HIGHUSER_MOVABLE
, 0);
822 * Migrate pages from one node to a target node.
823 * Returns error or the number of pages not migrated.
825 static int migrate_to_node(struct mm_struct
*mm
, int source
, int dest
,
833 node_set(source
, nmask
);
835 check_range(mm
, mm
->mmap
->vm_start
, TASK_SIZE
, &nmask
,
836 flags
| MPOL_MF_DISCONTIG_OK
, &pagelist
);
838 if (!list_empty(&pagelist
))
839 err
= migrate_pages(&pagelist
, new_node_page
, dest
);
845 * Move pages between the two nodesets so as to preserve the physical
846 * layout as much as possible.
848 * Returns the number of page that could not be moved.
850 int do_migrate_pages(struct mm_struct
*mm
,
851 const nodemask_t
*from_nodes
, const nodemask_t
*to_nodes
, int flags
)
857 err
= migrate_prep();
861 down_read(&mm
->mmap_sem
);
863 err
= migrate_vmas(mm
, from_nodes
, to_nodes
, flags
);
868 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
869 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
870 * bit in 'tmp', and return that <source, dest> pair for migration.
871 * The pair of nodemasks 'to' and 'from' define the map.
873 * If no pair of bits is found that way, fallback to picking some
874 * pair of 'source' and 'dest' bits that are not the same. If the
875 * 'source' and 'dest' bits are the same, this represents a node
876 * that will be migrating to itself, so no pages need move.
878 * If no bits are left in 'tmp', or if all remaining bits left
879 * in 'tmp' correspond to the same bit in 'to', return false
880 * (nothing left to migrate).
882 * This lets us pick a pair of nodes to migrate between, such that
883 * if possible the dest node is not already occupied by some other
884 * source node, minimizing the risk of overloading the memory on a
885 * node that would happen if we migrated incoming memory to a node
886 * before migrating outgoing memory source that same node.
888 * A single scan of tmp is sufficient. As we go, we remember the
889 * most recent <s, d> pair that moved (s != d). If we find a pair
890 * that not only moved, but what's better, moved to an empty slot
891 * (d is not set in tmp), then we break out then, with that pair.
892 * Otherwise when we finish scannng from_tmp, we at least have the
893 * most recent <s, d> pair that moved. If we get all the way through
894 * the scan of tmp without finding any node that moved, much less
895 * moved to an empty node, then there is nothing left worth migrating.
899 while (!nodes_empty(tmp
)) {
904 for_each_node_mask(s
, tmp
) {
905 d
= node_remap(s
, *from_nodes
, *to_nodes
);
909 source
= s
; /* Node moved. Memorize */
912 /* dest not in remaining from nodes? */
913 if (!node_isset(dest
, tmp
))
919 node_clear(source
, tmp
);
920 err
= migrate_to_node(mm
, source
, dest
, flags
);
927 up_read(&mm
->mmap_sem
);
935 * Allocate a new page for page migration based on vma policy.
936 * Start assuming that page is mapped by vma pointed to by @private.
937 * Search forward from there, if not. N.B., this assumes that the
938 * list of pages handed to migrate_pages()--which is how we get here--
939 * is in virtual address order.
941 static struct page
*new_vma_page(struct page
*page
, unsigned long private, int **x
)
943 struct vm_area_struct
*vma
= (struct vm_area_struct
*)private;
944 unsigned long uninitialized_var(address
);
947 address
= page_address_in_vma(page
, vma
);
948 if (address
!= -EFAULT
)
954 * if !vma, alloc_page_vma() will use task or system default policy
956 return alloc_page_vma(GFP_HIGHUSER_MOVABLE
, vma
, address
);
960 static void migrate_page_add(struct page
*page
, struct list_head
*pagelist
,
965 int do_migrate_pages(struct mm_struct
*mm
,
966 const nodemask_t
*from_nodes
, const nodemask_t
*to_nodes
, int flags
)
971 static struct page
*new_vma_page(struct page
*page
, unsigned long private, int **x
)
977 static long do_mbind(unsigned long start
, unsigned long len
,
978 unsigned short mode
, unsigned short mode_flags
,
979 nodemask_t
*nmask
, unsigned long flags
)
981 struct vm_area_struct
*vma
;
982 struct mm_struct
*mm
= current
->mm
;
983 struct mempolicy
*new;
988 if (flags
& ~(unsigned long)(MPOL_MF_STRICT
|
989 MPOL_MF_MOVE
| MPOL_MF_MOVE_ALL
))
991 if ((flags
& MPOL_MF_MOVE_ALL
) && !capable(CAP_SYS_NICE
))
994 if (start
& ~PAGE_MASK
)
997 if (mode
== MPOL_DEFAULT
)
998 flags
&= ~MPOL_MF_STRICT
;
1000 len
= (len
+ PAGE_SIZE
- 1) & PAGE_MASK
;
1008 new = mpol_new(mode
, mode_flags
, nmask
);
1010 return PTR_ERR(new);
1013 * If we are using the default policy then operation
1014 * on discontinuous address spaces is okay after all
1017 flags
|= MPOL_MF_DISCONTIG_OK
;
1019 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1020 start
, start
+ len
, mode
, mode_flags
,
1021 nmask
? nodes_addr(*nmask
)[0] : -1);
1023 if (flags
& (MPOL_MF_MOVE
| MPOL_MF_MOVE_ALL
)) {
1025 err
= migrate_prep();
1030 NODEMASK_SCRATCH(scratch
);
1032 down_write(&mm
->mmap_sem
);
1034 err
= mpol_set_nodemask(new, nmask
, scratch
);
1035 task_unlock(current
);
1037 up_write(&mm
->mmap_sem
);
1040 NODEMASK_SCRATCH_FREE(scratch
);
1045 vma
= check_range(mm
, start
, end
, nmask
,
1046 flags
| MPOL_MF_INVERT
, &pagelist
);
1052 err
= mbind_range(vma
, start
, end
, new);
1054 if (!list_empty(&pagelist
))
1055 nr_failed
= migrate_pages(&pagelist
, new_vma_page
,
1056 (unsigned long)vma
);
1058 if (!err
&& nr_failed
&& (flags
& MPOL_MF_STRICT
))
1061 putback_lru_pages(&pagelist
);
1063 up_write(&mm
->mmap_sem
);
1070 * User space interface with variable sized bitmaps for nodelists.
1073 /* Copy a node mask from user space. */
1074 static int get_nodes(nodemask_t
*nodes
, const unsigned long __user
*nmask
,
1075 unsigned long maxnode
)
1078 unsigned long nlongs
;
1079 unsigned long endmask
;
1082 nodes_clear(*nodes
);
1083 if (maxnode
== 0 || !nmask
)
1085 if (maxnode
> PAGE_SIZE
*BITS_PER_BYTE
)
1088 nlongs
= BITS_TO_LONGS(maxnode
);
1089 if ((maxnode
% BITS_PER_LONG
) == 0)
1092 endmask
= (1UL << (maxnode
% BITS_PER_LONG
)) - 1;
1094 /* When the user specified more nodes than supported just check
1095 if the non supported part is all zero. */
1096 if (nlongs
> BITS_TO_LONGS(MAX_NUMNODES
)) {
1097 if (nlongs
> PAGE_SIZE
/sizeof(long))
1099 for (k
= BITS_TO_LONGS(MAX_NUMNODES
); k
< nlongs
; k
++) {
1101 if (get_user(t
, nmask
+ k
))
1103 if (k
== nlongs
- 1) {
1109 nlongs
= BITS_TO_LONGS(MAX_NUMNODES
);
1113 if (copy_from_user(nodes_addr(*nodes
), nmask
, nlongs
*sizeof(unsigned long)))
1115 nodes_addr(*nodes
)[nlongs
-1] &= endmask
;
1119 /* Copy a kernel node mask to user space */
1120 static int copy_nodes_to_user(unsigned long __user
*mask
, unsigned long maxnode
,
1123 unsigned long copy
= ALIGN(maxnode
-1, 64) / 8;
1124 const int nbytes
= BITS_TO_LONGS(MAX_NUMNODES
) * sizeof(long);
1126 if (copy
> nbytes
) {
1127 if (copy
> PAGE_SIZE
)
1129 if (clear_user((char __user
*)mask
+ nbytes
, copy
- nbytes
))
1133 return copy_to_user(mask
, nodes_addr(*nodes
), copy
) ? -EFAULT
: 0;
1136 SYSCALL_DEFINE6(mbind
, unsigned long, start
, unsigned long, len
,
1137 unsigned long, mode
, unsigned long __user
*, nmask
,
1138 unsigned long, maxnode
, unsigned, flags
)
1142 unsigned short mode_flags
;
1144 mode_flags
= mode
& MPOL_MODE_FLAGS
;
1145 mode
&= ~MPOL_MODE_FLAGS
;
1146 if (mode
>= MPOL_MAX
)
1148 if ((mode_flags
& MPOL_F_STATIC_NODES
) &&
1149 (mode_flags
& MPOL_F_RELATIVE_NODES
))
1151 err
= get_nodes(&nodes
, nmask
, maxnode
);
1154 return do_mbind(start
, len
, mode
, mode_flags
, &nodes
, flags
);
1157 /* Set the process memory policy */
1158 SYSCALL_DEFINE3(set_mempolicy
, int, mode
, unsigned long __user
*, nmask
,
1159 unsigned long, maxnode
)
1163 unsigned short flags
;
1165 flags
= mode
& MPOL_MODE_FLAGS
;
1166 mode
&= ~MPOL_MODE_FLAGS
;
1167 if ((unsigned int)mode
>= MPOL_MAX
)
1169 if ((flags
& MPOL_F_STATIC_NODES
) && (flags
& MPOL_F_RELATIVE_NODES
))
1171 err
= get_nodes(&nodes
, nmask
, maxnode
);
1174 return do_set_mempolicy(mode
, flags
, &nodes
);
1177 SYSCALL_DEFINE4(migrate_pages
, pid_t
, pid
, unsigned long, maxnode
,
1178 const unsigned long __user
*, old_nodes
,
1179 const unsigned long __user
*, new_nodes
)
1181 const struct cred
*cred
= current_cred(), *tcred
;
1182 struct mm_struct
*mm
;
1183 struct task_struct
*task
;
1186 nodemask_t task_nodes
;
1189 err
= get_nodes(&old
, old_nodes
, maxnode
);
1193 err
= get_nodes(&new, new_nodes
, maxnode
);
1197 /* Find the mm_struct */
1198 read_lock(&tasklist_lock
);
1199 task
= pid
? find_task_by_vpid(pid
) : current
;
1201 read_unlock(&tasklist_lock
);
1204 mm
= get_task_mm(task
);
1205 read_unlock(&tasklist_lock
);
1211 * Check if this process has the right to modify the specified
1212 * process. The right exists if the process has administrative
1213 * capabilities, superuser privileges or the same
1214 * userid as the target process.
1217 tcred
= __task_cred(task
);
1218 if (cred
->euid
!= tcred
->suid
&& cred
->euid
!= tcred
->uid
&&
1219 cred
->uid
!= tcred
->suid
&& cred
->uid
!= tcred
->uid
&&
1220 !capable(CAP_SYS_NICE
)) {
1227 task_nodes
= cpuset_mems_allowed(task
);
1228 /* Is the user allowed to access the target nodes? */
1229 if (!nodes_subset(new, task_nodes
) && !capable(CAP_SYS_NICE
)) {
1234 if (!nodes_subset(new, node_states
[N_HIGH_MEMORY
])) {
1239 err
= security_task_movememory(task
);
1243 err
= do_migrate_pages(mm
, &old
, &new,
1244 capable(CAP_SYS_NICE
) ? MPOL_MF_MOVE_ALL
: MPOL_MF_MOVE
);
1251 /* Retrieve NUMA policy */
1252 SYSCALL_DEFINE5(get_mempolicy
, int __user
*, policy
,
1253 unsigned long __user
*, nmask
, unsigned long, maxnode
,
1254 unsigned long, addr
, unsigned long, flags
)
1257 int uninitialized_var(pval
);
1260 if (nmask
!= NULL
&& maxnode
< MAX_NUMNODES
)
1263 err
= do_get_mempolicy(&pval
, &nodes
, addr
, flags
);
1268 if (policy
&& put_user(pval
, policy
))
1272 err
= copy_nodes_to_user(nmask
, maxnode
, &nodes
);
1277 #ifdef CONFIG_COMPAT
1279 asmlinkage
long compat_sys_get_mempolicy(int __user
*policy
,
1280 compat_ulong_t __user
*nmask
,
1281 compat_ulong_t maxnode
,
1282 compat_ulong_t addr
, compat_ulong_t flags
)
1285 unsigned long __user
*nm
= NULL
;
1286 unsigned long nr_bits
, alloc_size
;
1287 DECLARE_BITMAP(bm
, MAX_NUMNODES
);
1289 nr_bits
= min_t(unsigned long, maxnode
-1, MAX_NUMNODES
);
1290 alloc_size
= ALIGN(nr_bits
, BITS_PER_LONG
) / 8;
1293 nm
= compat_alloc_user_space(alloc_size
);
1295 err
= sys_get_mempolicy(policy
, nm
, nr_bits
+1, addr
, flags
);
1297 if (!err
&& nmask
) {
1298 err
= copy_from_user(bm
, nm
, alloc_size
);
1299 /* ensure entire bitmap is zeroed */
1300 err
|= clear_user(nmask
, ALIGN(maxnode
-1, 8) / 8);
1301 err
|= compat_put_bitmap(nmask
, bm
, nr_bits
);
1307 asmlinkage
long compat_sys_set_mempolicy(int mode
, compat_ulong_t __user
*nmask
,
1308 compat_ulong_t maxnode
)
1311 unsigned long __user
*nm
= NULL
;
1312 unsigned long nr_bits
, alloc_size
;
1313 DECLARE_BITMAP(bm
, MAX_NUMNODES
);
1315 nr_bits
= min_t(unsigned long, maxnode
-1, MAX_NUMNODES
);
1316 alloc_size
= ALIGN(nr_bits
, BITS_PER_LONG
) / 8;
1319 err
= compat_get_bitmap(bm
, nmask
, nr_bits
);
1320 nm
= compat_alloc_user_space(alloc_size
);
1321 err
|= copy_to_user(nm
, bm
, alloc_size
);
1327 return sys_set_mempolicy(mode
, nm
, nr_bits
+1);
1330 asmlinkage
long compat_sys_mbind(compat_ulong_t start
, compat_ulong_t len
,
1331 compat_ulong_t mode
, compat_ulong_t __user
*nmask
,
1332 compat_ulong_t maxnode
, compat_ulong_t flags
)
1335 unsigned long __user
*nm
= NULL
;
1336 unsigned long nr_bits
, alloc_size
;
1339 nr_bits
= min_t(unsigned long, maxnode
-1, MAX_NUMNODES
);
1340 alloc_size
= ALIGN(nr_bits
, BITS_PER_LONG
) / 8;
1343 err
= compat_get_bitmap(nodes_addr(bm
), nmask
, nr_bits
);
1344 nm
= compat_alloc_user_space(alloc_size
);
1345 err
|= copy_to_user(nm
, nodes_addr(bm
), alloc_size
);
1351 return sys_mbind(start
, len
, mode
, nm
, nr_bits
+1, flags
);
1357 * get_vma_policy(@task, @vma, @addr)
1358 * @task - task for fallback if vma policy == default
1359 * @vma - virtual memory area whose policy is sought
1360 * @addr - address in @vma for shared policy lookup
1362 * Returns effective policy for a VMA at specified address.
1363 * Falls back to @task or system default policy, as necessary.
1364 * Current or other task's task mempolicy and non-shared vma policies
1365 * are protected by the task's mmap_sem, which must be held for read by
1367 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1368 * count--added by the get_policy() vm_op, as appropriate--to protect against
1369 * freeing by another task. It is the caller's responsibility to free the
1370 * extra reference for shared policies.
1372 static struct mempolicy
*get_vma_policy(struct task_struct
*task
,
1373 struct vm_area_struct
*vma
, unsigned long addr
)
1375 struct mempolicy
*pol
= task
->mempolicy
;
1378 if (vma
->vm_ops
&& vma
->vm_ops
->get_policy
) {
1379 struct mempolicy
*vpol
= vma
->vm_ops
->get_policy(vma
,
1383 } else if (vma
->vm_policy
)
1384 pol
= vma
->vm_policy
;
1387 pol
= &default_policy
;
1392 * Return a nodemask representing a mempolicy for filtering nodes for
1395 static nodemask_t
*policy_nodemask(gfp_t gfp
, struct mempolicy
*policy
)
1397 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1398 if (unlikely(policy
->mode
== MPOL_BIND
) &&
1399 gfp_zone(gfp
) >= policy_zone
&&
1400 cpuset_nodemask_valid_mems_allowed(&policy
->v
.nodes
))
1401 return &policy
->v
.nodes
;
1406 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1407 static struct zonelist
*policy_zonelist(gfp_t gfp
, struct mempolicy
*policy
)
1409 int nd
= numa_node_id();
1411 switch (policy
->mode
) {
1412 case MPOL_PREFERRED
:
1413 if (!(policy
->flags
& MPOL_F_LOCAL
))
1414 nd
= policy
->v
.preferred_node
;
1418 * Normally, MPOL_BIND allocations are node-local within the
1419 * allowed nodemask. However, if __GFP_THISNODE is set and the
1420 * current node is part of the mask, we use the zonelist for
1421 * the first node in the mask instead.
1423 if (unlikely(gfp
& __GFP_THISNODE
) &&
1424 unlikely(!node_isset(nd
, policy
->v
.nodes
)))
1425 nd
= first_node(policy
->v
.nodes
);
1427 case MPOL_INTERLEAVE
: /* should not happen */
1432 return node_zonelist(nd
, gfp
);
1435 /* Do dynamic interleaving for a process */
1436 static unsigned interleave_nodes(struct mempolicy
*policy
)
1439 struct task_struct
*me
= current
;
1442 next
= next_node(nid
, policy
->v
.nodes
);
1443 if (next
>= MAX_NUMNODES
)
1444 next
= first_node(policy
->v
.nodes
);
1445 if (next
< MAX_NUMNODES
)
1451 * Depending on the memory policy provide a node from which to allocate the
1453 * @policy must be protected by freeing by the caller. If @policy is
1454 * the current task's mempolicy, this protection is implicit, as only the
1455 * task can change it's policy. The system default policy requires no
1458 unsigned slab_node(struct mempolicy
*policy
)
1460 if (!policy
|| policy
->flags
& MPOL_F_LOCAL
)
1461 return numa_node_id();
1463 switch (policy
->mode
) {
1464 case MPOL_PREFERRED
:
1466 * handled MPOL_F_LOCAL above
1468 return policy
->v
.preferred_node
;
1470 case MPOL_INTERLEAVE
:
1471 return interleave_nodes(policy
);
1475 * Follow bind policy behavior and start allocation at the
1478 struct zonelist
*zonelist
;
1480 enum zone_type highest_zoneidx
= gfp_zone(GFP_KERNEL
);
1481 zonelist
= &NODE_DATA(numa_node_id())->node_zonelists
[0];
1482 (void)first_zones_zonelist(zonelist
, highest_zoneidx
,
1493 /* Do static interleaving for a VMA with known offset. */
1494 static unsigned offset_il_node(struct mempolicy
*pol
,
1495 struct vm_area_struct
*vma
, unsigned long off
)
1497 unsigned nnodes
= nodes_weight(pol
->v
.nodes
);
1503 return numa_node_id();
1504 target
= (unsigned int)off
% nnodes
;
1507 nid
= next_node(nid
, pol
->v
.nodes
);
1509 } while (c
<= target
);
1513 /* Determine a node number for interleave */
1514 static inline unsigned interleave_nid(struct mempolicy
*pol
,
1515 struct vm_area_struct
*vma
, unsigned long addr
, int shift
)
1521 * for small pages, there is no difference between
1522 * shift and PAGE_SHIFT, so the bit-shift is safe.
1523 * for huge pages, since vm_pgoff is in units of small
1524 * pages, we need to shift off the always 0 bits to get
1527 BUG_ON(shift
< PAGE_SHIFT
);
1528 off
= vma
->vm_pgoff
>> (shift
- PAGE_SHIFT
);
1529 off
+= (addr
- vma
->vm_start
) >> shift
;
1530 return offset_il_node(pol
, vma
, off
);
1532 return interleave_nodes(pol
);
1535 #ifdef CONFIG_HUGETLBFS
1537 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1538 * @vma = virtual memory area whose policy is sought
1539 * @addr = address in @vma for shared policy lookup and interleave policy
1540 * @gfp_flags = for requested zone
1541 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1542 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1544 * Returns a zonelist suitable for a huge page allocation and a pointer
1545 * to the struct mempolicy for conditional unref after allocation.
1546 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1547 * @nodemask for filtering the zonelist.
1549 struct zonelist
*huge_zonelist(struct vm_area_struct
*vma
, unsigned long addr
,
1550 gfp_t gfp_flags
, struct mempolicy
**mpol
,
1551 nodemask_t
**nodemask
)
1553 struct zonelist
*zl
;
1555 *mpol
= get_vma_policy(current
, vma
, addr
);
1556 *nodemask
= NULL
; /* assume !MPOL_BIND */
1558 if (unlikely((*mpol
)->mode
== MPOL_INTERLEAVE
)) {
1559 zl
= node_zonelist(interleave_nid(*mpol
, vma
, addr
,
1560 huge_page_shift(hstate_vma(vma
))), gfp_flags
);
1562 zl
= policy_zonelist(gfp_flags
, *mpol
);
1563 if ((*mpol
)->mode
== MPOL_BIND
)
1564 *nodemask
= &(*mpol
)->v
.nodes
;
1570 /* Allocate a page in interleaved policy.
1571 Own path because it needs to do special accounting. */
1572 static struct page
*alloc_page_interleave(gfp_t gfp
, unsigned order
,
1575 struct zonelist
*zl
;
1578 zl
= node_zonelist(nid
, gfp
);
1579 page
= __alloc_pages(gfp
, order
, zl
);
1580 if (page
&& page_zone(page
) == zonelist_zone(&zl
->_zonerefs
[0]))
1581 inc_zone_page_state(page
, NUMA_INTERLEAVE_HIT
);
1586 * alloc_page_vma - Allocate a page for a VMA.
1589 * %GFP_USER user allocation.
1590 * %GFP_KERNEL kernel allocations,
1591 * %GFP_HIGHMEM highmem/user allocations,
1592 * %GFP_FS allocation should not call back into a file system.
1593 * %GFP_ATOMIC don't sleep.
1595 * @vma: Pointer to VMA or NULL if not available.
1596 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1598 * This function allocates a page from the kernel page pool and applies
1599 * a NUMA policy associated with the VMA or the current process.
1600 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1601 * mm_struct of the VMA to prevent it from going away. Should be used for
1602 * all allocations for pages that will be mapped into
1603 * user space. Returns NULL when no page can be allocated.
1605 * Should be called with the mm_sem of the vma hold.
1608 alloc_page_vma(gfp_t gfp
, struct vm_area_struct
*vma
, unsigned long addr
)
1610 struct mempolicy
*pol
= get_vma_policy(current
, vma
, addr
);
1611 struct zonelist
*zl
;
1613 if (unlikely(pol
->mode
== MPOL_INTERLEAVE
)) {
1616 nid
= interleave_nid(pol
, vma
, addr
, PAGE_SHIFT
);
1618 return alloc_page_interleave(gfp
, 0, nid
);
1620 zl
= policy_zonelist(gfp
, pol
);
1621 if (unlikely(mpol_needs_cond_ref(pol
))) {
1623 * slow path: ref counted shared policy
1625 struct page
*page
= __alloc_pages_nodemask(gfp
, 0,
1626 zl
, policy_nodemask(gfp
, pol
));
1631 * fast path: default or task policy
1633 return __alloc_pages_nodemask(gfp
, 0, zl
, policy_nodemask(gfp
, pol
));
1637 * alloc_pages_current - Allocate pages.
1640 * %GFP_USER user allocation,
1641 * %GFP_KERNEL kernel allocation,
1642 * %GFP_HIGHMEM highmem allocation,
1643 * %GFP_FS don't call back into a file system.
1644 * %GFP_ATOMIC don't sleep.
1645 * @order: Power of two of allocation size in pages. 0 is a single page.
1647 * Allocate a page from the kernel page pool. When not in
1648 * interrupt context and apply the current process NUMA policy.
1649 * Returns NULL when no page can be allocated.
1651 * Don't call cpuset_update_task_memory_state() unless
1652 * 1) it's ok to take cpuset_sem (can WAIT), and
1653 * 2) allocating for current task (not interrupt).
1655 struct page
*alloc_pages_current(gfp_t gfp
, unsigned order
)
1657 struct mempolicy
*pol
= current
->mempolicy
;
1659 if (!pol
|| in_interrupt() || (gfp
& __GFP_THISNODE
))
1660 pol
= &default_policy
;
1663 * No reference counting needed for current->mempolicy
1664 * nor system default_policy
1666 if (pol
->mode
== MPOL_INTERLEAVE
)
1667 return alloc_page_interleave(gfp
, order
, interleave_nodes(pol
));
1668 return __alloc_pages_nodemask(gfp
, order
,
1669 policy_zonelist(gfp
, pol
), policy_nodemask(gfp
, pol
));
1671 EXPORT_SYMBOL(alloc_pages_current
);
1674 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1675 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1676 * with the mems_allowed returned by cpuset_mems_allowed(). This
1677 * keeps mempolicies cpuset relative after its cpuset moves. See
1678 * further kernel/cpuset.c update_nodemask().
1681 /* Slow path of a mempolicy duplicate */
1682 struct mempolicy
*__mpol_dup(struct mempolicy
*old
)
1684 struct mempolicy
*new = kmem_cache_alloc(policy_cache
, GFP_KERNEL
);
1687 return ERR_PTR(-ENOMEM
);
1688 if (current_cpuset_is_being_rebound()) {
1689 nodemask_t mems
= cpuset_mems_allowed(current
);
1690 mpol_rebind_policy(old
, &mems
);
1693 atomic_set(&new->refcnt
, 1);
1698 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1699 * eliminate the * MPOL_F_* flags that require conditional ref and
1700 * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
1701 * after return. Use the returned value.
1703 * Allows use of a mempolicy for, e.g., multiple allocations with a single
1704 * policy lookup, even if the policy needs/has extra ref on lookup.
1705 * shmem_readahead needs this.
1707 struct mempolicy
*__mpol_cond_copy(struct mempolicy
*tompol
,
1708 struct mempolicy
*frompol
)
1710 if (!mpol_needs_cond_ref(frompol
))
1714 tompol
->flags
&= ~MPOL_F_SHARED
; /* copy doesn't need unref */
1715 __mpol_put(frompol
);
1719 static int mpol_match_intent(const struct mempolicy
*a
,
1720 const struct mempolicy
*b
)
1722 if (a
->flags
!= b
->flags
)
1724 if (!mpol_store_user_nodemask(a
))
1726 return nodes_equal(a
->w
.user_nodemask
, b
->w
.user_nodemask
);
1729 /* Slow path of a mempolicy comparison */
1730 int __mpol_equal(struct mempolicy
*a
, struct mempolicy
*b
)
1734 if (a
->mode
!= b
->mode
)
1736 if (a
->mode
!= MPOL_DEFAULT
&& !mpol_match_intent(a
, b
))
1741 case MPOL_INTERLEAVE
:
1742 return nodes_equal(a
->v
.nodes
, b
->v
.nodes
);
1743 case MPOL_PREFERRED
:
1744 return a
->v
.preferred_node
== b
->v
.preferred_node
&&
1745 a
->flags
== b
->flags
;
1753 * Shared memory backing store policy support.
1755 * Remember policies even when nobody has shared memory mapped.
1756 * The policies are kept in Red-Black tree linked from the inode.
1757 * They are protected by the sp->lock spinlock, which should be held
1758 * for any accesses to the tree.
1761 /* lookup first element intersecting start-end */
1762 /* Caller holds sp->lock */
1763 static struct sp_node
*
1764 sp_lookup(struct shared_policy
*sp
, unsigned long start
, unsigned long end
)
1766 struct rb_node
*n
= sp
->root
.rb_node
;
1769 struct sp_node
*p
= rb_entry(n
, struct sp_node
, nd
);
1771 if (start
>= p
->end
)
1773 else if (end
<= p
->start
)
1781 struct sp_node
*w
= NULL
;
1782 struct rb_node
*prev
= rb_prev(n
);
1785 w
= rb_entry(prev
, struct sp_node
, nd
);
1786 if (w
->end
<= start
)
1790 return rb_entry(n
, struct sp_node
, nd
);
1793 /* Insert a new shared policy into the list. */
1794 /* Caller holds sp->lock */
1795 static void sp_insert(struct shared_policy
*sp
, struct sp_node
*new)
1797 struct rb_node
**p
= &sp
->root
.rb_node
;
1798 struct rb_node
*parent
= NULL
;
1803 nd
= rb_entry(parent
, struct sp_node
, nd
);
1804 if (new->start
< nd
->start
)
1806 else if (new->end
> nd
->end
)
1807 p
= &(*p
)->rb_right
;
1811 rb_link_node(&new->nd
, parent
, p
);
1812 rb_insert_color(&new->nd
, &sp
->root
);
1813 pr_debug("inserting %lx-%lx: %d\n", new->start
, new->end
,
1814 new->policy
? new->policy
->mode
: 0);
1817 /* Find shared policy intersecting idx */
1819 mpol_shared_policy_lookup(struct shared_policy
*sp
, unsigned long idx
)
1821 struct mempolicy
*pol
= NULL
;
1824 if (!sp
->root
.rb_node
)
1826 spin_lock(&sp
->lock
);
1827 sn
= sp_lookup(sp
, idx
, idx
+1);
1829 mpol_get(sn
->policy
);
1832 spin_unlock(&sp
->lock
);
1836 static void sp_delete(struct shared_policy
*sp
, struct sp_node
*n
)
1838 pr_debug("deleting %lx-l%lx\n", n
->start
, n
->end
);
1839 rb_erase(&n
->nd
, &sp
->root
);
1840 mpol_put(n
->policy
);
1841 kmem_cache_free(sn_cache
, n
);
1844 static struct sp_node
*sp_alloc(unsigned long start
, unsigned long end
,
1845 struct mempolicy
*pol
)
1847 struct sp_node
*n
= kmem_cache_alloc(sn_cache
, GFP_KERNEL
);
1854 pol
->flags
|= MPOL_F_SHARED
; /* for unref */
1859 /* Replace a policy range. */
1860 static int shared_policy_replace(struct shared_policy
*sp
, unsigned long start
,
1861 unsigned long end
, struct sp_node
*new)
1863 struct sp_node
*n
, *new2
= NULL
;
1866 spin_lock(&sp
->lock
);
1867 n
= sp_lookup(sp
, start
, end
);
1868 /* Take care of old policies in the same range. */
1869 while (n
&& n
->start
< end
) {
1870 struct rb_node
*next
= rb_next(&n
->nd
);
1871 if (n
->start
>= start
) {
1877 /* Old policy spanning whole new range. */
1880 spin_unlock(&sp
->lock
);
1881 new2
= sp_alloc(end
, n
->end
, n
->policy
);
1887 sp_insert(sp
, new2
);
1895 n
= rb_entry(next
, struct sp_node
, nd
);
1899 spin_unlock(&sp
->lock
);
1901 mpol_put(new2
->policy
);
1902 kmem_cache_free(sn_cache
, new2
);
1908 * mpol_shared_policy_init - initialize shared policy for inode
1909 * @sp: pointer to inode shared policy
1910 * @mpol: struct mempolicy to install
1912 * Install non-NULL @mpol in inode's shared policy rb-tree.
1913 * On entry, the current task has a reference on a non-NULL @mpol.
1914 * This must be released on exit.
1915 * This is called at get_inode() calls and we can use GFP_KERNEL.
1917 void mpol_shared_policy_init(struct shared_policy
*sp
, struct mempolicy
*mpol
)
1921 sp
->root
= RB_ROOT
; /* empty tree == default mempolicy */
1922 spin_lock_init(&sp
->lock
);
1925 struct vm_area_struct pvma
;
1926 struct mempolicy
*new;
1927 NODEMASK_SCRATCH(scratch
);
1931 /* contextualize the tmpfs mount point mempolicy */
1932 new = mpol_new(mpol
->mode
, mpol
->flags
, &mpol
->w
.user_nodemask
);
1934 mpol_put(mpol
); /* drop our ref on sb mpol */
1935 NODEMASK_SCRATCH_FREE(scratch
);
1936 return; /* no valid nodemask intersection */
1940 ret
= mpol_set_nodemask(new, &mpol
->w
.user_nodemask
, scratch
);
1941 task_unlock(current
);
1942 mpol_put(mpol
); /* drop our ref on sb mpol */
1944 NODEMASK_SCRATCH_FREE(scratch
);
1949 /* Create pseudo-vma that contains just the policy */
1950 memset(&pvma
, 0, sizeof(struct vm_area_struct
));
1951 pvma
.vm_end
= TASK_SIZE
; /* policy covers entire file */
1952 mpol_set_shared_policy(sp
, &pvma
, new); /* adds ref */
1953 mpol_put(new); /* drop initial ref */
1954 NODEMASK_SCRATCH_FREE(scratch
);
1958 int mpol_set_shared_policy(struct shared_policy
*info
,
1959 struct vm_area_struct
*vma
, struct mempolicy
*npol
)
1962 struct sp_node
*new = NULL
;
1963 unsigned long sz
= vma_pages(vma
);
1965 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1967 sz
, npol
? npol
->mode
: -1,
1968 npol
? npol
->flags
: -1,
1969 npol
? nodes_addr(npol
->v
.nodes
)[0] : -1);
1972 new = sp_alloc(vma
->vm_pgoff
, vma
->vm_pgoff
+ sz
, npol
);
1976 err
= shared_policy_replace(info
, vma
->vm_pgoff
, vma
->vm_pgoff
+sz
, new);
1978 kmem_cache_free(sn_cache
, new);
1982 /* Free a backing policy store on inode delete. */
1983 void mpol_free_shared_policy(struct shared_policy
*p
)
1986 struct rb_node
*next
;
1988 if (!p
->root
.rb_node
)
1990 spin_lock(&p
->lock
);
1991 next
= rb_first(&p
->root
);
1993 n
= rb_entry(next
, struct sp_node
, nd
);
1994 next
= rb_next(&n
->nd
);
1995 rb_erase(&n
->nd
, &p
->root
);
1996 mpol_put(n
->policy
);
1997 kmem_cache_free(sn_cache
, n
);
1999 spin_unlock(&p
->lock
);
2002 /* assumes fs == KERNEL_DS */
2003 void __init
numa_policy_init(void)
2005 nodemask_t interleave_nodes
;
2006 unsigned long largest
= 0;
2007 int nid
, prefer
= 0;
2009 policy_cache
= kmem_cache_create("numa_policy",
2010 sizeof(struct mempolicy
),
2011 0, SLAB_PANIC
, NULL
);
2013 sn_cache
= kmem_cache_create("shared_policy_node",
2014 sizeof(struct sp_node
),
2015 0, SLAB_PANIC
, NULL
);
2018 * Set interleaving policy for system init. Interleaving is only
2019 * enabled across suitably sized nodes (default is >= 16MB), or
2020 * fall back to the largest node if they're all smaller.
2022 nodes_clear(interleave_nodes
);
2023 for_each_node_state(nid
, N_HIGH_MEMORY
) {
2024 unsigned long total_pages
= node_present_pages(nid
);
2026 /* Preserve the largest node */
2027 if (largest
< total_pages
) {
2028 largest
= total_pages
;
2032 /* Interleave this node? */
2033 if ((total_pages
<< PAGE_SHIFT
) >= (16 << 20))
2034 node_set(nid
, interleave_nodes
);
2037 /* All too small, use the largest */
2038 if (unlikely(nodes_empty(interleave_nodes
)))
2039 node_set(prefer
, interleave_nodes
);
2041 if (do_set_mempolicy(MPOL_INTERLEAVE
, 0, &interleave_nodes
))
2042 printk("numa_policy_init: interleaving failed\n");
2045 /* Reset policy of current process to default */
2046 void numa_default_policy(void)
2048 do_set_mempolicy(MPOL_DEFAULT
, 0, NULL
);
2052 * Parse and format mempolicy from/to strings
2056 * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
2057 * Used only for mpol_parse_str() and mpol_to_str()
2059 #define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
2060 static const char * const policy_types
[] =
2061 { "default", "prefer", "bind", "interleave", "local" };
2066 * mpol_parse_str - parse string to mempolicy
2067 * @str: string containing mempolicy to parse
2068 * @mpol: pointer to struct mempolicy pointer, returned on success.
2069 * @no_context: flag whether to "contextualize" the mempolicy
2072 * <mode>[=<flags>][:<nodelist>]
2074 * if @no_context is true, save the input nodemask in w.user_nodemask in
2075 * the returned mempolicy. This will be used to "clone" the mempolicy in
2076 * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
2077 * mount option. Note that if 'static' or 'relative' mode flags were
2078 * specified, the input nodemask will already have been saved. Saving
2079 * it again is redundant, but safe.
2081 * On success, returns 0, else 1
2083 int mpol_parse_str(char *str
, struct mempolicy
**mpol
, int no_context
)
2085 struct mempolicy
*new = NULL
;
2086 unsigned short uninitialized_var(mode
);
2087 unsigned short uninitialized_var(mode_flags
);
2089 char *nodelist
= strchr(str
, ':');
2090 char *flags
= strchr(str
, '=');
2095 /* NUL-terminate mode or flags string */
2097 if (nodelist_parse(nodelist
, nodes
))
2099 if (!nodes_subset(nodes
, node_states
[N_HIGH_MEMORY
]))
2105 *flags
++ = '\0'; /* terminate mode string */
2107 for (i
= 0; i
<= MPOL_LOCAL
; i
++) {
2108 if (!strcmp(str
, policy_types
[i
])) {
2117 case MPOL_PREFERRED
:
2119 * Insist on a nodelist of one node only
2122 char *rest
= nodelist
;
2123 while (isdigit(*rest
))
2129 case MPOL_INTERLEAVE
:
2131 * Default to online nodes with memory if no nodelist
2134 nodes
= node_states
[N_HIGH_MEMORY
];
2138 * Don't allow a nodelist; mpol_new() checks flags
2142 mode
= MPOL_PREFERRED
;
2146 * Insist on a empty nodelist
2153 * Insist on a nodelist
2162 * Currently, we only support two mutually exclusive
2165 if (!strcmp(flags
, "static"))
2166 mode_flags
|= MPOL_F_STATIC_NODES
;
2167 else if (!strcmp(flags
, "relative"))
2168 mode_flags
|= MPOL_F_RELATIVE_NODES
;
2173 new = mpol_new(mode
, mode_flags
, &nodes
);
2179 NODEMASK_SCRATCH(scratch
);
2182 ret
= mpol_set_nodemask(new, &nodes
, scratch
);
2183 task_unlock(current
);
2186 NODEMASK_SCRATCH_FREE(scratch
);
2194 /* save for contextualization */
2195 new->w
.user_nodemask
= nodes
;
2199 /* Restore string for error message */
2208 #endif /* CONFIG_TMPFS */
2211 * mpol_to_str - format a mempolicy structure for printing
2212 * @buffer: to contain formatted mempolicy string
2213 * @maxlen: length of @buffer
2214 * @pol: pointer to mempolicy to be formatted
2215 * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
2217 * Convert a mempolicy into a string.
2218 * Returns the number of characters in buffer (if positive)
2219 * or an error (negative)
2221 int mpol_to_str(char *buffer
, int maxlen
, struct mempolicy
*pol
, int no_context
)
2226 unsigned short mode
;
2227 unsigned short flags
= pol
? pol
->flags
: 0;
2230 * Sanity check: room for longest mode, flag and some nodes
2232 VM_BUG_ON(maxlen
< strlen("interleave") + strlen("relative") + 16);
2234 if (!pol
|| pol
== &default_policy
)
2235 mode
= MPOL_DEFAULT
;
2244 case MPOL_PREFERRED
:
2246 if (flags
& MPOL_F_LOCAL
)
2247 mode
= MPOL_LOCAL
; /* pseudo-policy */
2249 node_set(pol
->v
.preferred_node
, nodes
);
2254 case MPOL_INTERLEAVE
:
2256 nodes
= pol
->w
.user_nodemask
;
2258 nodes
= pol
->v
.nodes
;
2265 l
= strlen(policy_types
[mode
]);
2266 if (buffer
+ maxlen
< p
+ l
+ 1)
2269 strcpy(p
, policy_types
[mode
]);
2272 if (flags
& MPOL_MODE_FLAGS
) {
2273 if (buffer
+ maxlen
< p
+ 2)
2278 * Currently, the only defined flags are mutually exclusive
2280 if (flags
& MPOL_F_STATIC_NODES
)
2281 p
+= snprintf(p
, buffer
+ maxlen
- p
, "static");
2282 else if (flags
& MPOL_F_RELATIVE_NODES
)
2283 p
+= snprintf(p
, buffer
+ maxlen
- p
, "relative");
2286 if (!nodes_empty(nodes
)) {
2287 if (buffer
+ maxlen
< p
+ 2)
2290 p
+= nodelist_scnprintf(p
, buffer
+ maxlen
- p
, nodes
);
2296 unsigned long pages
;
2298 unsigned long active
;
2299 unsigned long writeback
;
2300 unsigned long mapcount_max
;
2301 unsigned long dirty
;
2302 unsigned long swapcache
;
2303 unsigned long node
[MAX_NUMNODES
];
2306 static void gather_stats(struct page
*page
, void *private, int pte_dirty
)
2308 struct numa_maps
*md
= private;
2309 int count
= page_mapcount(page
);
2312 if (pte_dirty
|| PageDirty(page
))
2315 if (PageSwapCache(page
))
2318 if (PageActive(page
) || PageUnevictable(page
))
2321 if (PageWriteback(page
))
2327 if (count
> md
->mapcount_max
)
2328 md
->mapcount_max
= count
;
2330 md
->node
[page_to_nid(page
)]++;
2333 #ifdef CONFIG_HUGETLB_PAGE
2334 static void check_huge_range(struct vm_area_struct
*vma
,
2335 unsigned long start
, unsigned long end
,
2336 struct numa_maps
*md
)
2340 struct hstate
*h
= hstate_vma(vma
);
2341 unsigned long sz
= huge_page_size(h
);
2343 for (addr
= start
; addr
< end
; addr
+= sz
) {
2344 pte_t
*ptep
= huge_pte_offset(vma
->vm_mm
,
2345 addr
& huge_page_mask(h
));
2355 page
= pte_page(pte
);
2359 gather_stats(page
, md
, pte_dirty(*ptep
));
2363 static inline void check_huge_range(struct vm_area_struct
*vma
,
2364 unsigned long start
, unsigned long end
,
2365 struct numa_maps
*md
)
2371 * Display pages allocated per node and memory policy via /proc.
2373 int show_numa_map(struct seq_file
*m
, void *v
)
2375 struct proc_maps_private
*priv
= m
->private;
2376 struct vm_area_struct
*vma
= v
;
2377 struct numa_maps
*md
;
2378 struct file
*file
= vma
->vm_file
;
2379 struct mm_struct
*mm
= vma
->vm_mm
;
2380 struct mempolicy
*pol
;
2387 md
= kzalloc(sizeof(struct numa_maps
), GFP_KERNEL
);
2391 pol
= get_vma_policy(priv
->task
, vma
, vma
->vm_start
);
2392 mpol_to_str(buffer
, sizeof(buffer
), pol
, 0);
2395 seq_printf(m
, "%08lx %s", vma
->vm_start
, buffer
);
2398 seq_printf(m
, " file=");
2399 seq_path(m
, &file
->f_path
, "\n\t= ");
2400 } else if (vma
->vm_start
<= mm
->brk
&& vma
->vm_end
>= mm
->start_brk
) {
2401 seq_printf(m
, " heap");
2402 } else if (vma
->vm_start
<= mm
->start_stack
&&
2403 vma
->vm_end
>= mm
->start_stack
) {
2404 seq_printf(m
, " stack");
2407 if (is_vm_hugetlb_page(vma
)) {
2408 check_huge_range(vma
, vma
->vm_start
, vma
->vm_end
, md
);
2409 seq_printf(m
, " huge");
2411 check_pgd_range(vma
, vma
->vm_start
, vma
->vm_end
,
2412 &node_states
[N_HIGH_MEMORY
], MPOL_MF_STATS
, md
);
2419 seq_printf(m
," anon=%lu",md
->anon
);
2422 seq_printf(m
," dirty=%lu",md
->dirty
);
2424 if (md
->pages
!= md
->anon
&& md
->pages
!= md
->dirty
)
2425 seq_printf(m
, " mapped=%lu", md
->pages
);
2427 if (md
->mapcount_max
> 1)
2428 seq_printf(m
, " mapmax=%lu", md
->mapcount_max
);
2431 seq_printf(m
," swapcache=%lu", md
->swapcache
);
2433 if (md
->active
< md
->pages
&& !is_vm_hugetlb_page(vma
))
2434 seq_printf(m
," active=%lu", md
->active
);
2437 seq_printf(m
," writeback=%lu", md
->writeback
);
2439 for_each_node_state(n
, N_HIGH_MEMORY
)
2441 seq_printf(m
, " N%d=%lu", n
, md
->node
[n
]);
2446 if (m
->count
< m
->size
)
2447 m
->version
= (vma
!= priv
->tail_vma
) ? vma
->vm_start
: 0;