2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
22 * bind Only allocate memory on a specific set of nodes,
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
57 fix mmap readahead to honour policy and enable policy for any page cache
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
62 handle mremap for shared memory (currently ignored for the policy)
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
69 #include <linux/mempolicy.h>
71 #include <linux/highmem.h>
72 #include <linux/hugetlb.h>
73 #include <linux/kernel.h>
74 #include <linux/sched.h>
76 #include <linux/nodemask.h>
77 #include <linux/cpuset.h>
78 #include <linux/gfp.h>
79 #include <linux/slab.h>
80 #include <linux/string.h>
81 #include <linux/module.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/mempolicy.h>
86 #include <linux/swap.h>
87 #include <linux/seq_file.h>
88 #include <linux/proc_fs.h>
89 #include <linux/migrate.h>
90 #include <linux/rmap.h>
91 #include <linux/security.h>
93 #include <asm/tlbflush.h>
94 #include <asm/uaccess.h>
97 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
98 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
99 #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
101 static struct kmem_cache
*policy_cache
;
102 static struct kmem_cache
*sn_cache
;
104 /* Highest zone. An specific allocation for a zone below that is not
106 enum zone_type policy_zone
= 0;
108 struct mempolicy default_policy
= {
109 .refcnt
= ATOMIC_INIT(1), /* never free it */
110 .policy
= MPOL_DEFAULT
,
113 /* Do sanity checking on a policy */
114 static int mpol_check_policy(int mode
, nodemask_t
*nodes
)
116 int empty
= nodes_empty(*nodes
);
124 case MPOL_INTERLEAVE
:
125 /* Preferred will only use the first bit, but allow
131 return nodes_subset(*nodes
, node_online_map
) ? 0 : -EINVAL
;
134 /* Generate a custom zonelist for the BIND policy. */
135 static struct zonelist
*bind_zonelist(nodemask_t
*nodes
)
141 max
= 1 + MAX_NR_ZONES
* nodes_weight(*nodes
);
142 max
++; /* space for zlcache_ptr (see mmzone.h) */
143 zl
= kmalloc(sizeof(struct zone
*) * max
, GFP_KERNEL
);
145 return ERR_PTR(-ENOMEM
);
146 zl
->zlcache_ptr
= NULL
;
148 /* First put in the highest zones from all nodes, then all the next
149 lower zones etc. Avoid empty zones because the memory allocator
150 doesn't like them. If you implement node hot removal you
152 k
= MAX_NR_ZONES
- 1;
154 for_each_node_mask(nd
, *nodes
) {
155 struct zone
*z
= &NODE_DATA(nd
)->node_zones
[k
];
156 if (z
->present_pages
> 0)
157 zl
->zones
[num
++] = z
;
165 return ERR_PTR(-EINVAL
);
167 zl
->zones
[num
] = NULL
;
171 /* Create a new policy */
172 static struct mempolicy
*mpol_new(int mode
, nodemask_t
*nodes
)
174 struct mempolicy
*policy
;
176 pr_debug("setting mode %d nodes[0] %lx\n",
177 mode
, nodes
? nodes_addr(*nodes
)[0] : -1);
179 if (mode
== MPOL_DEFAULT
)
181 policy
= kmem_cache_alloc(policy_cache
, GFP_KERNEL
);
183 return ERR_PTR(-ENOMEM
);
184 atomic_set(&policy
->refcnt
, 1);
186 case MPOL_INTERLEAVE
:
187 policy
->v
.nodes
= *nodes
;
188 if (nodes_weight(*nodes
) == 0) {
189 kmem_cache_free(policy_cache
, policy
);
190 return ERR_PTR(-EINVAL
);
194 policy
->v
.preferred_node
= first_node(*nodes
);
195 if (policy
->v
.preferred_node
>= MAX_NUMNODES
)
196 policy
->v
.preferred_node
= -1;
199 policy
->v
.zonelist
= bind_zonelist(nodes
);
200 if (IS_ERR(policy
->v
.zonelist
)) {
201 void *error_code
= policy
->v
.zonelist
;
202 kmem_cache_free(policy_cache
, policy
);
207 policy
->policy
= mode
;
208 policy
->cpuset_mems_allowed
= cpuset_mems_allowed(current
);
212 static void gather_stats(struct page
*, void *, int pte_dirty
);
213 static void migrate_page_add(struct page
*page
, struct list_head
*pagelist
,
214 unsigned long flags
);
216 /* Scan through pages checking if pages follow certain conditions. */
217 static int check_pte_range(struct vm_area_struct
*vma
, pmd_t
*pmd
,
218 unsigned long addr
, unsigned long end
,
219 const nodemask_t
*nodes
, unsigned long flags
,
226 orig_pte
= pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
231 if (!pte_present(*pte
))
233 page
= vm_normal_page(vma
, addr
, *pte
);
237 * The check for PageReserved here is important to avoid
238 * handling zero pages and other pages that may have been
239 * marked special by the system.
241 * If the PageReserved would not be checked here then f.e.
242 * the location of the zero page could have an influence
243 * on MPOL_MF_STRICT, zero pages would be counted for
244 * the per node stats, and there would be useless attempts
245 * to put zero pages on the migration list.
247 if (PageReserved(page
))
249 nid
= page_to_nid(page
);
250 if (node_isset(nid
, *nodes
) == !!(flags
& MPOL_MF_INVERT
))
253 if (flags
& MPOL_MF_STATS
)
254 gather_stats(page
, private, pte_dirty(*pte
));
255 else if (flags
& (MPOL_MF_MOVE
| MPOL_MF_MOVE_ALL
))
256 migrate_page_add(page
, private, flags
);
259 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
260 pte_unmap_unlock(orig_pte
, ptl
);
264 static inline int check_pmd_range(struct vm_area_struct
*vma
, pud_t
*pud
,
265 unsigned long addr
, unsigned long end
,
266 const nodemask_t
*nodes
, unsigned long flags
,
272 pmd
= pmd_offset(pud
, addr
);
274 next
= pmd_addr_end(addr
, end
);
275 if (pmd_none_or_clear_bad(pmd
))
277 if (check_pte_range(vma
, pmd
, addr
, next
, nodes
,
280 } while (pmd
++, addr
= next
, addr
!= end
);
284 static inline int check_pud_range(struct vm_area_struct
*vma
, pgd_t
*pgd
,
285 unsigned long addr
, unsigned long end
,
286 const nodemask_t
*nodes
, unsigned long flags
,
292 pud
= pud_offset(pgd
, addr
);
294 next
= pud_addr_end(addr
, end
);
295 if (pud_none_or_clear_bad(pud
))
297 if (check_pmd_range(vma
, pud
, addr
, next
, nodes
,
300 } while (pud
++, addr
= next
, addr
!= end
);
304 static inline int check_pgd_range(struct vm_area_struct
*vma
,
305 unsigned long addr
, unsigned long end
,
306 const nodemask_t
*nodes
, unsigned long flags
,
312 pgd
= pgd_offset(vma
->vm_mm
, addr
);
314 next
= pgd_addr_end(addr
, end
);
315 if (pgd_none_or_clear_bad(pgd
))
317 if (check_pud_range(vma
, pgd
, addr
, next
, nodes
,
320 } while (pgd
++, addr
= next
, addr
!= end
);
325 * Check if all pages in a range are on a set of nodes.
326 * If pagelist != NULL then isolate pages from the LRU and
327 * put them on the pagelist.
329 static struct vm_area_struct
*
330 check_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
,
331 const nodemask_t
*nodes
, unsigned long flags
, void *private)
334 struct vm_area_struct
*first
, *vma
, *prev
;
336 if (flags
& (MPOL_MF_MOVE
| MPOL_MF_MOVE_ALL
)) {
338 err
= migrate_prep();
343 first
= find_vma(mm
, start
);
345 return ERR_PTR(-EFAULT
);
347 for (vma
= first
; vma
&& vma
->vm_start
< end
; vma
= vma
->vm_next
) {
348 if (!(flags
& MPOL_MF_DISCONTIG_OK
)) {
349 if (!vma
->vm_next
&& vma
->vm_end
< end
)
350 return ERR_PTR(-EFAULT
);
351 if (prev
&& prev
->vm_end
< vma
->vm_start
)
352 return ERR_PTR(-EFAULT
);
354 if (!is_vm_hugetlb_page(vma
) &&
355 ((flags
& MPOL_MF_STRICT
) ||
356 ((flags
& (MPOL_MF_MOVE
| MPOL_MF_MOVE_ALL
)) &&
357 vma_migratable(vma
)))) {
358 unsigned long endvma
= vma
->vm_end
;
362 if (vma
->vm_start
> start
)
363 start
= vma
->vm_start
;
364 err
= check_pgd_range(vma
, start
, endvma
, nodes
,
367 first
= ERR_PTR(err
);
376 /* Apply policy to a single VMA */
377 static int policy_vma(struct vm_area_struct
*vma
, struct mempolicy
*new)
380 struct mempolicy
*old
= vma
->vm_policy
;
382 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
383 vma
->vm_start
, vma
->vm_end
, vma
->vm_pgoff
,
384 vma
->vm_ops
, vma
->vm_file
,
385 vma
->vm_ops
? vma
->vm_ops
->set_policy
: NULL
);
387 if (vma
->vm_ops
&& vma
->vm_ops
->set_policy
)
388 err
= vma
->vm_ops
->set_policy(vma
, new);
391 vma
->vm_policy
= new;
397 /* Step 2: apply policy to a range and do splits. */
398 static int mbind_range(struct vm_area_struct
*vma
, unsigned long start
,
399 unsigned long end
, struct mempolicy
*new)
401 struct vm_area_struct
*next
;
405 for (; vma
&& vma
->vm_start
< end
; vma
= next
) {
407 if (vma
->vm_start
< start
)
408 err
= split_vma(vma
->vm_mm
, vma
, start
, 1);
409 if (!err
&& vma
->vm_end
> end
)
410 err
= split_vma(vma
->vm_mm
, vma
, end
, 0);
412 err
= policy_vma(vma
, new);
419 static int contextualize_policy(int mode
, nodemask_t
*nodes
)
424 cpuset_update_task_memory_state();
425 if (!cpuset_nodes_subset_current_mems_allowed(*nodes
))
427 return mpol_check_policy(mode
, nodes
);
432 * Update task->flags PF_MEMPOLICY bit: set iff non-default
433 * mempolicy. Allows more rapid checking of this (combined perhaps
434 * with other PF_* flag bits) on memory allocation hot code paths.
436 * If called from outside this file, the task 'p' should -only- be
437 * a newly forked child not yet visible on the task list, because
438 * manipulating the task flags of a visible task is not safe.
440 * The above limitation is why this routine has the funny name
441 * mpol_fix_fork_child_flag().
443 * It is also safe to call this with a task pointer of current,
444 * which the static wrapper mpol_set_task_struct_flag() does,
445 * for use within this file.
448 void mpol_fix_fork_child_flag(struct task_struct
*p
)
451 p
->flags
|= PF_MEMPOLICY
;
453 p
->flags
&= ~PF_MEMPOLICY
;
456 static void mpol_set_task_struct_flag(void)
458 mpol_fix_fork_child_flag(current
);
461 /* Set the process memory policy */
462 long do_set_mempolicy(int mode
, nodemask_t
*nodes
)
464 struct mempolicy
*new;
466 if (contextualize_policy(mode
, nodes
))
468 new = mpol_new(mode
, nodes
);
471 mpol_free(current
->mempolicy
);
472 current
->mempolicy
= new;
473 mpol_set_task_struct_flag();
474 if (new && new->policy
== MPOL_INTERLEAVE
)
475 current
->il_next
= first_node(new->v
.nodes
);
479 /* Fill a zone bitmap for a policy */
480 static void get_zonemask(struct mempolicy
*p
, nodemask_t
*nodes
)
487 for (i
= 0; p
->v
.zonelist
->zones
[i
]; i
++)
488 node_set(zone_to_nid(p
->v
.zonelist
->zones
[i
]),
493 case MPOL_INTERLEAVE
:
497 /* or use current node instead of online map? */
498 if (p
->v
.preferred_node
< 0)
499 *nodes
= node_online_map
;
501 node_set(p
->v
.preferred_node
, *nodes
);
508 static int lookup_node(struct mm_struct
*mm
, unsigned long addr
)
513 err
= get_user_pages(current
, mm
, addr
& PAGE_MASK
, 1, 0, 0, &p
, NULL
);
515 err
= page_to_nid(p
);
521 /* Retrieve NUMA policy */
522 long do_get_mempolicy(int *policy
, nodemask_t
*nmask
,
523 unsigned long addr
, unsigned long flags
)
526 struct mm_struct
*mm
= current
->mm
;
527 struct vm_area_struct
*vma
= NULL
;
528 struct mempolicy
*pol
= current
->mempolicy
;
530 cpuset_update_task_memory_state();
531 if (flags
& ~(unsigned long)(MPOL_F_NODE
|MPOL_F_ADDR
))
533 if (flags
& MPOL_F_ADDR
) {
534 down_read(&mm
->mmap_sem
);
535 vma
= find_vma_intersection(mm
, addr
, addr
+1);
537 up_read(&mm
->mmap_sem
);
540 if (vma
->vm_ops
&& vma
->vm_ops
->get_policy
)
541 pol
= vma
->vm_ops
->get_policy(vma
, addr
);
543 pol
= vma
->vm_policy
;
548 pol
= &default_policy
;
550 if (flags
& MPOL_F_NODE
) {
551 if (flags
& MPOL_F_ADDR
) {
552 err
= lookup_node(mm
, addr
);
556 } else if (pol
== current
->mempolicy
&&
557 pol
->policy
== MPOL_INTERLEAVE
) {
558 *policy
= current
->il_next
;
564 *policy
= pol
->policy
;
567 up_read(¤t
->mm
->mmap_sem
);
573 get_zonemask(pol
, nmask
);
577 up_read(¤t
->mm
->mmap_sem
);
581 #ifdef CONFIG_MIGRATION
585 static void migrate_page_add(struct page
*page
, struct list_head
*pagelist
,
589 * Avoid migrating a page that is shared with others.
591 if ((flags
& MPOL_MF_MOVE_ALL
) || page_mapcount(page
) == 1)
592 isolate_lru_page(page
, pagelist
);
595 static struct page
*new_node_page(struct page
*page
, unsigned long node
, int **x
)
597 return alloc_pages_node(node
, GFP_HIGHUSER_MOVABLE
, 0);
601 * Migrate pages from one node to a target node.
602 * Returns error or the number of pages not migrated.
604 int migrate_to_node(struct mm_struct
*mm
, int source
, int dest
, int flags
)
611 node_set(source
, nmask
);
613 check_range(mm
, mm
->mmap
->vm_start
, TASK_SIZE
, &nmask
,
614 flags
| MPOL_MF_DISCONTIG_OK
, &pagelist
);
616 if (!list_empty(&pagelist
))
617 err
= migrate_pages(&pagelist
, new_node_page
, dest
);
623 * Move pages between the two nodesets so as to preserve the physical
624 * layout as much as possible.
626 * Returns the number of page that could not be moved.
628 int do_migrate_pages(struct mm_struct
*mm
,
629 const nodemask_t
*from_nodes
, const nodemask_t
*to_nodes
, int flags
)
636 down_read(&mm
->mmap_sem
);
638 err
= migrate_vmas(mm
, from_nodes
, to_nodes
, flags
);
643 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
644 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
645 * bit in 'tmp', and return that <source, dest> pair for migration.
646 * The pair of nodemasks 'to' and 'from' define the map.
648 * If no pair of bits is found that way, fallback to picking some
649 * pair of 'source' and 'dest' bits that are not the same. If the
650 * 'source' and 'dest' bits are the same, this represents a node
651 * that will be migrating to itself, so no pages need move.
653 * If no bits are left in 'tmp', or if all remaining bits left
654 * in 'tmp' correspond to the same bit in 'to', return false
655 * (nothing left to migrate).
657 * This lets us pick a pair of nodes to migrate between, such that
658 * if possible the dest node is not already occupied by some other
659 * source node, minimizing the risk of overloading the memory on a
660 * node that would happen if we migrated incoming memory to a node
661 * before migrating outgoing memory source that same node.
663 * A single scan of tmp is sufficient. As we go, we remember the
664 * most recent <s, d> pair that moved (s != d). If we find a pair
665 * that not only moved, but what's better, moved to an empty slot
666 * (d is not set in tmp), then we break out then, with that pair.
667 * Otherwise when we finish scannng from_tmp, we at least have the
668 * most recent <s, d> pair that moved. If we get all the way through
669 * the scan of tmp without finding any node that moved, much less
670 * moved to an empty node, then there is nothing left worth migrating.
674 while (!nodes_empty(tmp
)) {
679 for_each_node_mask(s
, tmp
) {
680 d
= node_remap(s
, *from_nodes
, *to_nodes
);
684 source
= s
; /* Node moved. Memorize */
687 /* dest not in remaining from nodes? */
688 if (!node_isset(dest
, tmp
))
694 node_clear(source
, tmp
);
695 err
= migrate_to_node(mm
, source
, dest
, flags
);
702 up_read(&mm
->mmap_sem
);
709 static struct page
*new_vma_page(struct page
*page
, unsigned long private, int **x
)
711 struct vm_area_struct
*vma
= (struct vm_area_struct
*)private;
713 return alloc_page_vma(GFP_HIGHUSER_MOVABLE
, vma
,
714 page_address_in_vma(page
, vma
));
718 static void migrate_page_add(struct page
*page
, struct list_head
*pagelist
,
723 int do_migrate_pages(struct mm_struct
*mm
,
724 const nodemask_t
*from_nodes
, const nodemask_t
*to_nodes
, int flags
)
729 static struct page
*new_vma_page(struct page
*page
, unsigned long private, int **x
)
735 long do_mbind(unsigned long start
, unsigned long len
,
736 unsigned long mode
, nodemask_t
*nmask
, unsigned long flags
)
738 struct vm_area_struct
*vma
;
739 struct mm_struct
*mm
= current
->mm
;
740 struct mempolicy
*new;
745 if ((flags
& ~(unsigned long)(MPOL_MF_STRICT
|
746 MPOL_MF_MOVE
| MPOL_MF_MOVE_ALL
))
749 if ((flags
& MPOL_MF_MOVE_ALL
) && !capable(CAP_SYS_NICE
))
752 if (start
& ~PAGE_MASK
)
755 if (mode
== MPOL_DEFAULT
)
756 flags
&= ~MPOL_MF_STRICT
;
758 len
= (len
+ PAGE_SIZE
- 1) & PAGE_MASK
;
766 if (mpol_check_policy(mode
, nmask
))
769 new = mpol_new(mode
, nmask
);
774 * If we are using the default policy then operation
775 * on discontinuous address spaces is okay after all
778 flags
|= MPOL_MF_DISCONTIG_OK
;
780 pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start
,start
+len
,
781 mode
, nmask
? nodes_addr(*nmask
)[0] : -1);
783 down_write(&mm
->mmap_sem
);
784 vma
= check_range(mm
, start
, end
, nmask
,
785 flags
| MPOL_MF_INVERT
, &pagelist
);
791 err
= mbind_range(vma
, start
, end
, new);
793 if (!list_empty(&pagelist
))
794 nr_failed
= migrate_pages(&pagelist
, new_vma_page
,
797 if (!err
&& nr_failed
&& (flags
& MPOL_MF_STRICT
))
801 up_write(&mm
->mmap_sem
);
807 * User space interface with variable sized bitmaps for nodelists.
810 /* Copy a node mask from user space. */
811 static int get_nodes(nodemask_t
*nodes
, const unsigned long __user
*nmask
,
812 unsigned long maxnode
)
815 unsigned long nlongs
;
816 unsigned long endmask
;
820 if (maxnode
== 0 || !nmask
)
822 if (maxnode
> PAGE_SIZE
*BITS_PER_BYTE
)
825 nlongs
= BITS_TO_LONGS(maxnode
);
826 if ((maxnode
% BITS_PER_LONG
) == 0)
829 endmask
= (1UL << (maxnode
% BITS_PER_LONG
)) - 1;
831 /* When the user specified more nodes than supported just check
832 if the non supported part is all zero. */
833 if (nlongs
> BITS_TO_LONGS(MAX_NUMNODES
)) {
834 if (nlongs
> PAGE_SIZE
/sizeof(long))
836 for (k
= BITS_TO_LONGS(MAX_NUMNODES
); k
< nlongs
; k
++) {
838 if (get_user(t
, nmask
+ k
))
840 if (k
== nlongs
- 1) {
846 nlongs
= BITS_TO_LONGS(MAX_NUMNODES
);
850 if (copy_from_user(nodes_addr(*nodes
), nmask
, nlongs
*sizeof(unsigned long)))
852 nodes_addr(*nodes
)[nlongs
-1] &= endmask
;
856 /* Copy a kernel node mask to user space */
857 static int copy_nodes_to_user(unsigned long __user
*mask
, unsigned long maxnode
,
860 unsigned long copy
= ALIGN(maxnode
-1, 64) / 8;
861 const int nbytes
= BITS_TO_LONGS(MAX_NUMNODES
) * sizeof(long);
864 if (copy
> PAGE_SIZE
)
866 if (clear_user((char __user
*)mask
+ nbytes
, copy
- nbytes
))
870 return copy_to_user(mask
, nodes_addr(*nodes
), copy
) ? -EFAULT
: 0;
873 asmlinkage
long sys_mbind(unsigned long start
, unsigned long len
,
875 unsigned long __user
*nmask
, unsigned long maxnode
,
881 err
= get_nodes(&nodes
, nmask
, maxnode
);
884 #ifdef CONFIG_CPUSETS
885 /* Restrict the nodes to the allowed nodes in the cpuset */
886 nodes_and(nodes
, nodes
, current
->mems_allowed
);
888 return do_mbind(start
, len
, mode
, &nodes
, flags
);
891 /* Set the process memory policy */
892 asmlinkage
long sys_set_mempolicy(int mode
, unsigned long __user
*nmask
,
893 unsigned long maxnode
)
898 if (mode
< 0 || mode
> MPOL_MAX
)
900 err
= get_nodes(&nodes
, nmask
, maxnode
);
903 return do_set_mempolicy(mode
, &nodes
);
906 asmlinkage
long sys_migrate_pages(pid_t pid
, unsigned long maxnode
,
907 const unsigned long __user
*old_nodes
,
908 const unsigned long __user
*new_nodes
)
910 struct mm_struct
*mm
;
911 struct task_struct
*task
;
914 nodemask_t task_nodes
;
917 err
= get_nodes(&old
, old_nodes
, maxnode
);
921 err
= get_nodes(&new, new_nodes
, maxnode
);
925 /* Find the mm_struct */
926 read_lock(&tasklist_lock
);
927 task
= pid
? find_task_by_pid(pid
) : current
;
929 read_unlock(&tasklist_lock
);
932 mm
= get_task_mm(task
);
933 read_unlock(&tasklist_lock
);
939 * Check if this process has the right to modify the specified
940 * process. The right exists if the process has administrative
941 * capabilities, superuser privileges or the same
942 * userid as the target process.
944 if ((current
->euid
!= task
->suid
) && (current
->euid
!= task
->uid
) &&
945 (current
->uid
!= task
->suid
) && (current
->uid
!= task
->uid
) &&
946 !capable(CAP_SYS_NICE
)) {
951 task_nodes
= cpuset_mems_allowed(task
);
952 /* Is the user allowed to access the target nodes? */
953 if (!nodes_subset(new, task_nodes
) && !capable(CAP_SYS_NICE
)) {
958 if (!nodes_subset(new, node_online_map
)) {
963 err
= security_task_movememory(task
);
967 err
= do_migrate_pages(mm
, &old
, &new,
968 capable(CAP_SYS_NICE
) ? MPOL_MF_MOVE_ALL
: MPOL_MF_MOVE
);
975 /* Retrieve NUMA policy */
976 asmlinkage
long sys_get_mempolicy(int __user
*policy
,
977 unsigned long __user
*nmask
,
978 unsigned long maxnode
,
979 unsigned long addr
, unsigned long flags
)
984 if (nmask
!= NULL
&& maxnode
< MAX_NUMNODES
)
987 err
= do_get_mempolicy(&pval
, &nodes
, addr
, flags
);
992 if (policy
&& put_user(pval
, policy
))
996 err
= copy_nodes_to_user(nmask
, maxnode
, &nodes
);
1001 #ifdef CONFIG_COMPAT
1003 asmlinkage
long compat_sys_get_mempolicy(int __user
*policy
,
1004 compat_ulong_t __user
*nmask
,
1005 compat_ulong_t maxnode
,
1006 compat_ulong_t addr
, compat_ulong_t flags
)
1009 unsigned long __user
*nm
= NULL
;
1010 unsigned long nr_bits
, alloc_size
;
1011 DECLARE_BITMAP(bm
, MAX_NUMNODES
);
1013 nr_bits
= min_t(unsigned long, maxnode
-1, MAX_NUMNODES
);
1014 alloc_size
= ALIGN(nr_bits
, BITS_PER_LONG
) / 8;
1017 nm
= compat_alloc_user_space(alloc_size
);
1019 err
= sys_get_mempolicy(policy
, nm
, nr_bits
+1, addr
, flags
);
1021 if (!err
&& nmask
) {
1022 err
= copy_from_user(bm
, nm
, alloc_size
);
1023 /* ensure entire bitmap is zeroed */
1024 err
|= clear_user(nmask
, ALIGN(maxnode
-1, 8) / 8);
1025 err
|= compat_put_bitmap(nmask
, bm
, nr_bits
);
1031 asmlinkage
long compat_sys_set_mempolicy(int mode
, compat_ulong_t __user
*nmask
,
1032 compat_ulong_t maxnode
)
1035 unsigned long __user
*nm
= NULL
;
1036 unsigned long nr_bits
, alloc_size
;
1037 DECLARE_BITMAP(bm
, MAX_NUMNODES
);
1039 nr_bits
= min_t(unsigned long, maxnode
-1, MAX_NUMNODES
);
1040 alloc_size
= ALIGN(nr_bits
, BITS_PER_LONG
) / 8;
1043 err
= compat_get_bitmap(bm
, nmask
, nr_bits
);
1044 nm
= compat_alloc_user_space(alloc_size
);
1045 err
|= copy_to_user(nm
, bm
, alloc_size
);
1051 return sys_set_mempolicy(mode
, nm
, nr_bits
+1);
1054 asmlinkage
long compat_sys_mbind(compat_ulong_t start
, compat_ulong_t len
,
1055 compat_ulong_t mode
, compat_ulong_t __user
*nmask
,
1056 compat_ulong_t maxnode
, compat_ulong_t flags
)
1059 unsigned long __user
*nm
= NULL
;
1060 unsigned long nr_bits
, alloc_size
;
1063 nr_bits
= min_t(unsigned long, maxnode
-1, MAX_NUMNODES
);
1064 alloc_size
= ALIGN(nr_bits
, BITS_PER_LONG
) / 8;
1067 err
= compat_get_bitmap(nodes_addr(bm
), nmask
, nr_bits
);
1068 nm
= compat_alloc_user_space(alloc_size
);
1069 err
|= copy_to_user(nm
, nodes_addr(bm
), alloc_size
);
1075 return sys_mbind(start
, len
, mode
, nm
, nr_bits
+1, flags
);
1081 * get_vma_policy(@task, @vma, @addr)
1082 * @task - task for fallback if vma policy == default
1083 * @vma - virtual memory area whose policy is sought
1084 * @addr - address in @vma for shared policy lookup
1086 * Returns effective policy for a VMA at specified address.
1087 * Falls back to @task or system default policy, as necessary.
1088 * Returned policy has extra reference count if shared, vma,
1089 * or some other task's policy [show_numa_maps() can pass
1090 * @task != current]. It is the caller's responsibility to
1091 * free the reference in these cases.
1093 static struct mempolicy
* get_vma_policy(struct task_struct
*task
,
1094 struct vm_area_struct
*vma
, unsigned long addr
)
1096 struct mempolicy
*pol
= task
->mempolicy
;
1100 if (vma
->vm_ops
&& vma
->vm_ops
->get_policy
) {
1101 pol
= vma
->vm_ops
->get_policy(vma
, addr
);
1102 shared_pol
= 1; /* if pol non-NULL, add ref below */
1103 } else if (vma
->vm_policy
&&
1104 vma
->vm_policy
->policy
!= MPOL_DEFAULT
)
1105 pol
= vma
->vm_policy
;
1108 pol
= &default_policy
;
1109 else if (!shared_pol
&& pol
!= current
->mempolicy
)
1110 mpol_get(pol
); /* vma or other task's policy */
1114 /* Return a zonelist representing a mempolicy */
1115 static struct zonelist
*zonelist_policy(gfp_t gfp
, struct mempolicy
*policy
)
1119 switch (policy
->policy
) {
1120 case MPOL_PREFERRED
:
1121 nd
= policy
->v
.preferred_node
;
1123 nd
= numa_node_id();
1126 /* Lower zones don't get a policy applied */
1127 /* Careful: current->mems_allowed might have moved */
1128 if (gfp_zone(gfp
) >= policy_zone
)
1129 if (cpuset_zonelist_valid_mems_allowed(policy
->v
.zonelist
))
1130 return policy
->v
.zonelist
;
1132 case MPOL_INTERLEAVE
: /* should not happen */
1134 nd
= numa_node_id();
1140 return NODE_DATA(nd
)->node_zonelists
+ gfp_zone(gfp
);
1143 /* Do dynamic interleaving for a process */
1144 static unsigned interleave_nodes(struct mempolicy
*policy
)
1147 struct task_struct
*me
= current
;
1150 next
= next_node(nid
, policy
->v
.nodes
);
1151 if (next
>= MAX_NUMNODES
)
1152 next
= first_node(policy
->v
.nodes
);
1158 * Depending on the memory policy provide a node from which to allocate the
1161 unsigned slab_node(struct mempolicy
*policy
)
1163 int pol
= policy
? policy
->policy
: MPOL_DEFAULT
;
1166 case MPOL_INTERLEAVE
:
1167 return interleave_nodes(policy
);
1171 * Follow bind policy behavior and start allocation at the
1174 return zone_to_nid(policy
->v
.zonelist
->zones
[0]);
1176 case MPOL_PREFERRED
:
1177 if (policy
->v
.preferred_node
>= 0)
1178 return policy
->v
.preferred_node
;
1182 return numa_node_id();
1186 /* Do static interleaving for a VMA with known offset. */
1187 static unsigned offset_il_node(struct mempolicy
*pol
,
1188 struct vm_area_struct
*vma
, unsigned long off
)
1190 unsigned nnodes
= nodes_weight(pol
->v
.nodes
);
1191 unsigned target
= (unsigned)off
% nnodes
;
1197 nid
= next_node(nid
, pol
->v
.nodes
);
1199 } while (c
<= target
);
1203 /* Determine a node number for interleave */
1204 static inline unsigned interleave_nid(struct mempolicy
*pol
,
1205 struct vm_area_struct
*vma
, unsigned long addr
, int shift
)
1211 * for small pages, there is no difference between
1212 * shift and PAGE_SHIFT, so the bit-shift is safe.
1213 * for huge pages, since vm_pgoff is in units of small
1214 * pages, we need to shift off the always 0 bits to get
1217 BUG_ON(shift
< PAGE_SHIFT
);
1218 off
= vma
->vm_pgoff
>> (shift
- PAGE_SHIFT
);
1219 off
+= (addr
- vma
->vm_start
) >> shift
;
1220 return offset_il_node(pol
, vma
, off
);
1222 return interleave_nodes(pol
);
1225 #ifdef CONFIG_HUGETLBFS
1227 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1228 * @vma = virtual memory area whose policy is sought
1229 * @addr = address in @vma for shared policy lookup and interleave policy
1230 * @gfp_flags = for requested zone
1231 * @mpol = pointer to mempolicy pointer for reference counted 'BIND policy
1233 * Returns a zonelist suitable for a huge page allocation.
1234 * If the effective policy is 'BIND, returns pointer to policy's zonelist.
1235 * If it is also a policy for which get_vma_policy() returns an extra
1236 * reference, we must hold that reference until after allocation.
1237 * In that case, return policy via @mpol so hugetlb allocation can drop
1238 * the reference. For non-'BIND referenced policies, we can/do drop the
1239 * reference here, so the caller doesn't need to know about the special case
1240 * for default and current task policy.
1242 struct zonelist
*huge_zonelist(struct vm_area_struct
*vma
, unsigned long addr
,
1243 gfp_t gfp_flags
, struct mempolicy
**mpol
)
1245 struct mempolicy
*pol
= get_vma_policy(current
, vma
, addr
);
1246 struct zonelist
*zl
;
1248 *mpol
= NULL
; /* probably no unref needed */
1249 if (pol
->policy
== MPOL_INTERLEAVE
) {
1252 nid
= interleave_nid(pol
, vma
, addr
, HPAGE_SHIFT
);
1253 __mpol_free(pol
); /* finished with pol */
1254 return NODE_DATA(nid
)->node_zonelists
+ gfp_zone(gfp_flags
);
1257 zl
= zonelist_policy(GFP_HIGHUSER
, pol
);
1258 if (unlikely(pol
!= &default_policy
&& pol
!= current
->mempolicy
)) {
1259 if (pol
->policy
!= MPOL_BIND
)
1260 __mpol_free(pol
); /* finished with pol */
1262 *mpol
= pol
; /* unref needed after allocation */
1268 /* Allocate a page in interleaved policy.
1269 Own path because it needs to do special accounting. */
1270 static struct page
*alloc_page_interleave(gfp_t gfp
, unsigned order
,
1273 struct zonelist
*zl
;
1276 zl
= NODE_DATA(nid
)->node_zonelists
+ gfp_zone(gfp
);
1277 page
= __alloc_pages(gfp
, order
, zl
);
1278 if (page
&& page_zone(page
) == zl
->zones
[0])
1279 inc_zone_page_state(page
, NUMA_INTERLEAVE_HIT
);
1284 * alloc_page_vma - Allocate a page for a VMA.
1287 * %GFP_USER user allocation.
1288 * %GFP_KERNEL kernel allocations,
1289 * %GFP_HIGHMEM highmem/user allocations,
1290 * %GFP_FS allocation should not call back into a file system.
1291 * %GFP_ATOMIC don't sleep.
1293 * @vma: Pointer to VMA or NULL if not available.
1294 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1296 * This function allocates a page from the kernel page pool and applies
1297 * a NUMA policy associated with the VMA or the current process.
1298 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1299 * mm_struct of the VMA to prevent it from going away. Should be used for
1300 * all allocations for pages that will be mapped into
1301 * user space. Returns NULL when no page can be allocated.
1303 * Should be called with the mm_sem of the vma hold.
1306 alloc_page_vma(gfp_t gfp
, struct vm_area_struct
*vma
, unsigned long addr
)
1308 struct mempolicy
*pol
= get_vma_policy(current
, vma
, addr
);
1309 struct zonelist
*zl
;
1311 cpuset_update_task_memory_state();
1313 if (unlikely(pol
->policy
== MPOL_INTERLEAVE
)) {
1316 nid
= interleave_nid(pol
, vma
, addr
, PAGE_SHIFT
);
1317 return alloc_page_interleave(gfp
, 0, nid
);
1319 zl
= zonelist_policy(gfp
, pol
);
1320 if (pol
!= &default_policy
&& pol
!= current
->mempolicy
) {
1322 * slow path: ref counted policy -- shared or vma
1324 struct page
*page
= __alloc_pages(gfp
, 0, zl
);
1329 * fast path: default or task policy
1331 return __alloc_pages(gfp
, 0, zl
);
1335 * alloc_pages_current - Allocate pages.
1338 * %GFP_USER user allocation,
1339 * %GFP_KERNEL kernel allocation,
1340 * %GFP_HIGHMEM highmem allocation,
1341 * %GFP_FS don't call back into a file system.
1342 * %GFP_ATOMIC don't sleep.
1343 * @order: Power of two of allocation size in pages. 0 is a single page.
1345 * Allocate a page from the kernel page pool. When not in
1346 * interrupt context and apply the current process NUMA policy.
1347 * Returns NULL when no page can be allocated.
1349 * Don't call cpuset_update_task_memory_state() unless
1350 * 1) it's ok to take cpuset_sem (can WAIT), and
1351 * 2) allocating for current task (not interrupt).
1353 struct page
*alloc_pages_current(gfp_t gfp
, unsigned order
)
1355 struct mempolicy
*pol
= current
->mempolicy
;
1357 if ((gfp
& __GFP_WAIT
) && !in_interrupt())
1358 cpuset_update_task_memory_state();
1359 if (!pol
|| in_interrupt() || (gfp
& __GFP_THISNODE
))
1360 pol
= &default_policy
;
1361 if (pol
->policy
== MPOL_INTERLEAVE
)
1362 return alloc_page_interleave(gfp
, order
, interleave_nodes(pol
));
1363 return __alloc_pages(gfp
, order
, zonelist_policy(gfp
, pol
));
1365 EXPORT_SYMBOL(alloc_pages_current
);
1368 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1369 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1370 * with the mems_allowed returned by cpuset_mems_allowed(). This
1371 * keeps mempolicies cpuset relative after its cpuset moves. See
1372 * further kernel/cpuset.c update_nodemask().
1374 void *cpuset_being_rebound
;
1376 /* Slow path of a mempolicy copy */
1377 struct mempolicy
*__mpol_copy(struct mempolicy
*old
)
1379 struct mempolicy
*new = kmem_cache_alloc(policy_cache
, GFP_KERNEL
);
1382 return ERR_PTR(-ENOMEM
);
1383 if (current_cpuset_is_being_rebound()) {
1384 nodemask_t mems
= cpuset_mems_allowed(current
);
1385 mpol_rebind_policy(old
, &mems
);
1388 atomic_set(&new->refcnt
, 1);
1389 if (new->policy
== MPOL_BIND
) {
1390 int sz
= ksize(old
->v
.zonelist
);
1391 new->v
.zonelist
= kmemdup(old
->v
.zonelist
, sz
, GFP_KERNEL
);
1392 if (!new->v
.zonelist
) {
1393 kmem_cache_free(policy_cache
, new);
1394 return ERR_PTR(-ENOMEM
);
1400 /* Slow path of a mempolicy comparison */
1401 int __mpol_equal(struct mempolicy
*a
, struct mempolicy
*b
)
1405 if (a
->policy
!= b
->policy
)
1407 switch (a
->policy
) {
1410 case MPOL_INTERLEAVE
:
1411 return nodes_equal(a
->v
.nodes
, b
->v
.nodes
);
1412 case MPOL_PREFERRED
:
1413 return a
->v
.preferred_node
== b
->v
.preferred_node
;
1416 for (i
= 0; a
->v
.zonelist
->zones
[i
]; i
++)
1417 if (a
->v
.zonelist
->zones
[i
] != b
->v
.zonelist
->zones
[i
])
1419 return b
->v
.zonelist
->zones
[i
] == NULL
;
1427 /* Slow path of a mpol destructor. */
1428 void __mpol_free(struct mempolicy
*p
)
1430 if (!atomic_dec_and_test(&p
->refcnt
))
1432 if (p
->policy
== MPOL_BIND
)
1433 kfree(p
->v
.zonelist
);
1434 p
->policy
= MPOL_DEFAULT
;
1435 kmem_cache_free(policy_cache
, p
);
1439 * Shared memory backing store policy support.
1441 * Remember policies even when nobody has shared memory mapped.
1442 * The policies are kept in Red-Black tree linked from the inode.
1443 * They are protected by the sp->lock spinlock, which should be held
1444 * for any accesses to the tree.
1447 /* lookup first element intersecting start-end */
1448 /* Caller holds sp->lock */
1449 static struct sp_node
*
1450 sp_lookup(struct shared_policy
*sp
, unsigned long start
, unsigned long end
)
1452 struct rb_node
*n
= sp
->root
.rb_node
;
1455 struct sp_node
*p
= rb_entry(n
, struct sp_node
, nd
);
1457 if (start
>= p
->end
)
1459 else if (end
<= p
->start
)
1467 struct sp_node
*w
= NULL
;
1468 struct rb_node
*prev
= rb_prev(n
);
1471 w
= rb_entry(prev
, struct sp_node
, nd
);
1472 if (w
->end
<= start
)
1476 return rb_entry(n
, struct sp_node
, nd
);
1479 /* Insert a new shared policy into the list. */
1480 /* Caller holds sp->lock */
1481 static void sp_insert(struct shared_policy
*sp
, struct sp_node
*new)
1483 struct rb_node
**p
= &sp
->root
.rb_node
;
1484 struct rb_node
*parent
= NULL
;
1489 nd
= rb_entry(parent
, struct sp_node
, nd
);
1490 if (new->start
< nd
->start
)
1492 else if (new->end
> nd
->end
)
1493 p
= &(*p
)->rb_right
;
1497 rb_link_node(&new->nd
, parent
, p
);
1498 rb_insert_color(&new->nd
, &sp
->root
);
1499 pr_debug("inserting %lx-%lx: %d\n", new->start
, new->end
,
1500 new->policy
? new->policy
->policy
: 0);
1503 /* Find shared policy intersecting idx */
1505 mpol_shared_policy_lookup(struct shared_policy
*sp
, unsigned long idx
)
1507 struct mempolicy
*pol
= NULL
;
1510 if (!sp
->root
.rb_node
)
1512 spin_lock(&sp
->lock
);
1513 sn
= sp_lookup(sp
, idx
, idx
+1);
1515 mpol_get(sn
->policy
);
1518 spin_unlock(&sp
->lock
);
1522 static void sp_delete(struct shared_policy
*sp
, struct sp_node
*n
)
1524 pr_debug("deleting %lx-l%lx\n", n
->start
, n
->end
);
1525 rb_erase(&n
->nd
, &sp
->root
);
1526 mpol_free(n
->policy
);
1527 kmem_cache_free(sn_cache
, n
);
1531 sp_alloc(unsigned long start
, unsigned long end
, struct mempolicy
*pol
)
1533 struct sp_node
*n
= kmem_cache_alloc(sn_cache
, GFP_KERNEL
);
1544 /* Replace a policy range. */
1545 static int shared_policy_replace(struct shared_policy
*sp
, unsigned long start
,
1546 unsigned long end
, struct sp_node
*new)
1548 struct sp_node
*n
, *new2
= NULL
;
1551 spin_lock(&sp
->lock
);
1552 n
= sp_lookup(sp
, start
, end
);
1553 /* Take care of old policies in the same range. */
1554 while (n
&& n
->start
< end
) {
1555 struct rb_node
*next
= rb_next(&n
->nd
);
1556 if (n
->start
>= start
) {
1562 /* Old policy spanning whole new range. */
1565 spin_unlock(&sp
->lock
);
1566 new2
= sp_alloc(end
, n
->end
, n
->policy
);
1572 sp_insert(sp
, new2
);
1580 n
= rb_entry(next
, struct sp_node
, nd
);
1584 spin_unlock(&sp
->lock
);
1586 mpol_free(new2
->policy
);
1587 kmem_cache_free(sn_cache
, new2
);
1592 void mpol_shared_policy_init(struct shared_policy
*info
, int policy
,
1593 nodemask_t
*policy_nodes
)
1595 info
->root
= RB_ROOT
;
1596 spin_lock_init(&info
->lock
);
1598 if (policy
!= MPOL_DEFAULT
) {
1599 struct mempolicy
*newpol
;
1601 /* Falls back to MPOL_DEFAULT on any error */
1602 newpol
= mpol_new(policy
, policy_nodes
);
1603 if (!IS_ERR(newpol
)) {
1604 /* Create pseudo-vma that contains just the policy */
1605 struct vm_area_struct pvma
;
1607 memset(&pvma
, 0, sizeof(struct vm_area_struct
));
1608 /* Policy covers entire file */
1609 pvma
.vm_end
= TASK_SIZE
;
1610 mpol_set_shared_policy(info
, &pvma
, newpol
);
1616 int mpol_set_shared_policy(struct shared_policy
*info
,
1617 struct vm_area_struct
*vma
, struct mempolicy
*npol
)
1620 struct sp_node
*new = NULL
;
1621 unsigned long sz
= vma_pages(vma
);
1623 pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
1625 sz
, npol
? npol
->policy
: -1,
1626 npol
? nodes_addr(npol
->v
.nodes
)[0] : -1);
1629 new = sp_alloc(vma
->vm_pgoff
, vma
->vm_pgoff
+ sz
, npol
);
1633 err
= shared_policy_replace(info
, vma
->vm_pgoff
, vma
->vm_pgoff
+sz
, new);
1635 kmem_cache_free(sn_cache
, new);
1639 /* Free a backing policy store on inode delete. */
1640 void mpol_free_shared_policy(struct shared_policy
*p
)
1643 struct rb_node
*next
;
1645 if (!p
->root
.rb_node
)
1647 spin_lock(&p
->lock
);
1648 next
= rb_first(&p
->root
);
1650 n
= rb_entry(next
, struct sp_node
, nd
);
1651 next
= rb_next(&n
->nd
);
1652 rb_erase(&n
->nd
, &p
->root
);
1653 mpol_free(n
->policy
);
1654 kmem_cache_free(sn_cache
, n
);
1656 spin_unlock(&p
->lock
);
1659 /* assumes fs == KERNEL_DS */
1660 void __init
numa_policy_init(void)
1662 nodemask_t interleave_nodes
;
1663 unsigned long largest
= 0;
1664 int nid
, prefer
= 0;
1666 policy_cache
= kmem_cache_create("numa_policy",
1667 sizeof(struct mempolicy
),
1668 0, SLAB_PANIC
, NULL
);
1670 sn_cache
= kmem_cache_create("shared_policy_node",
1671 sizeof(struct sp_node
),
1672 0, SLAB_PANIC
, NULL
);
1675 * Set interleaving policy for system init. Interleaving is only
1676 * enabled across suitably sized nodes (default is >= 16MB), or
1677 * fall back to the largest node if they're all smaller.
1679 nodes_clear(interleave_nodes
);
1680 for_each_online_node(nid
) {
1681 unsigned long total_pages
= node_present_pages(nid
);
1683 /* Preserve the largest node */
1684 if (largest
< total_pages
) {
1685 largest
= total_pages
;
1689 /* Interleave this node? */
1690 if ((total_pages
<< PAGE_SHIFT
) >= (16 << 20))
1691 node_set(nid
, interleave_nodes
);
1694 /* All too small, use the largest */
1695 if (unlikely(nodes_empty(interleave_nodes
)))
1696 node_set(prefer
, interleave_nodes
);
1698 if (do_set_mempolicy(MPOL_INTERLEAVE
, &interleave_nodes
))
1699 printk("numa_policy_init: interleaving failed\n");
1702 /* Reset policy of current process to default */
1703 void numa_default_policy(void)
1705 do_set_mempolicy(MPOL_DEFAULT
, NULL
);
1708 /* Migrate a policy to a different set of nodes */
1709 void mpol_rebind_policy(struct mempolicy
*pol
, const nodemask_t
*newmask
)
1711 nodemask_t
*mpolmask
;
1716 mpolmask
= &pol
->cpuset_mems_allowed
;
1717 if (nodes_equal(*mpolmask
, *newmask
))
1720 switch (pol
->policy
) {
1723 case MPOL_INTERLEAVE
:
1724 nodes_remap(tmp
, pol
->v
.nodes
, *mpolmask
, *newmask
);
1726 *mpolmask
= *newmask
;
1727 current
->il_next
= node_remap(current
->il_next
,
1728 *mpolmask
, *newmask
);
1730 case MPOL_PREFERRED
:
1731 pol
->v
.preferred_node
= node_remap(pol
->v
.preferred_node
,
1732 *mpolmask
, *newmask
);
1733 *mpolmask
= *newmask
;
1738 struct zonelist
*zonelist
;
1741 for (z
= pol
->v
.zonelist
->zones
; *z
; z
++)
1742 node_set(zone_to_nid(*z
), nodes
);
1743 nodes_remap(tmp
, nodes
, *mpolmask
, *newmask
);
1746 zonelist
= bind_zonelist(&nodes
);
1748 /* If no mem, then zonelist is NULL and we keep old zonelist.
1749 * If that old zonelist has no remaining mems_allowed nodes,
1750 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
1753 if (!IS_ERR(zonelist
)) {
1754 /* Good - got mem - substitute new zonelist */
1755 kfree(pol
->v
.zonelist
);
1756 pol
->v
.zonelist
= zonelist
;
1758 *mpolmask
= *newmask
;
1768 * Wrapper for mpol_rebind_policy() that just requires task
1769 * pointer, and updates task mempolicy.
1772 void mpol_rebind_task(struct task_struct
*tsk
, const nodemask_t
*new)
1774 mpol_rebind_policy(tsk
->mempolicy
, new);
1778 * Rebind each vma in mm to new nodemask.
1780 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1783 void mpol_rebind_mm(struct mm_struct
*mm
, nodemask_t
*new)
1785 struct vm_area_struct
*vma
;
1787 down_write(&mm
->mmap_sem
);
1788 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
)
1789 mpol_rebind_policy(vma
->vm_policy
, new);
1790 up_write(&mm
->mmap_sem
);
1794 * Display pages allocated per node and memory policy via /proc.
1797 static const char * const policy_types
[] =
1798 { "default", "prefer", "bind", "interleave" };
1801 * Convert a mempolicy into a string.
1802 * Returns the number of characters in buffer (if positive)
1803 * or an error (negative)
1805 static inline int mpol_to_str(char *buffer
, int maxlen
, struct mempolicy
*pol
)
1810 int mode
= pol
? pol
->policy
: MPOL_DEFAULT
;
1817 case MPOL_PREFERRED
:
1819 node_set(pol
->v
.preferred_node
, nodes
);
1823 get_zonemask(pol
, &nodes
);
1826 case MPOL_INTERLEAVE
:
1827 nodes
= pol
->v
.nodes
;
1835 l
= strlen(policy_types
[mode
]);
1836 if (buffer
+ maxlen
< p
+ l
+ 1)
1839 strcpy(p
, policy_types
[mode
]);
1842 if (!nodes_empty(nodes
)) {
1843 if (buffer
+ maxlen
< p
+ 2)
1846 p
+= nodelist_scnprintf(p
, buffer
+ maxlen
- p
, nodes
);
1852 unsigned long pages
;
1854 unsigned long active
;
1855 unsigned long writeback
;
1856 unsigned long mapcount_max
;
1857 unsigned long dirty
;
1858 unsigned long swapcache
;
1859 unsigned long node
[MAX_NUMNODES
];
1862 static void gather_stats(struct page
*page
, void *private, int pte_dirty
)
1864 struct numa_maps
*md
= private;
1865 int count
= page_mapcount(page
);
1868 if (pte_dirty
|| PageDirty(page
))
1871 if (PageSwapCache(page
))
1874 if (PageActive(page
))
1877 if (PageWriteback(page
))
1883 if (count
> md
->mapcount_max
)
1884 md
->mapcount_max
= count
;
1886 md
->node
[page_to_nid(page
)]++;
1889 #ifdef CONFIG_HUGETLB_PAGE
1890 static void check_huge_range(struct vm_area_struct
*vma
,
1891 unsigned long start
, unsigned long end
,
1892 struct numa_maps
*md
)
1897 for (addr
= start
; addr
< end
; addr
+= HPAGE_SIZE
) {
1898 pte_t
*ptep
= huge_pte_offset(vma
->vm_mm
, addr
& HPAGE_MASK
);
1908 page
= pte_page(pte
);
1912 gather_stats(page
, md
, pte_dirty(*ptep
));
1916 static inline void check_huge_range(struct vm_area_struct
*vma
,
1917 unsigned long start
, unsigned long end
,
1918 struct numa_maps
*md
)
1923 int show_numa_map(struct seq_file
*m
, void *v
)
1925 struct proc_maps_private
*priv
= m
->private;
1926 struct vm_area_struct
*vma
= v
;
1927 struct numa_maps
*md
;
1928 struct file
*file
= vma
->vm_file
;
1929 struct mm_struct
*mm
= vma
->vm_mm
;
1930 struct mempolicy
*pol
;
1937 md
= kzalloc(sizeof(struct numa_maps
), GFP_KERNEL
);
1941 pol
= get_vma_policy(priv
->task
, vma
, vma
->vm_start
);
1942 mpol_to_str(buffer
, sizeof(buffer
), pol
);
1944 * unref shared or other task's mempolicy
1946 if (pol
!= &default_policy
&& pol
!= current
->mempolicy
)
1949 seq_printf(m
, "%08lx %s", vma
->vm_start
, buffer
);
1952 seq_printf(m
, " file=");
1953 seq_path(m
, file
->f_path
.mnt
, file
->f_path
.dentry
, "\n\t= ");
1954 } else if (vma
->vm_start
<= mm
->brk
&& vma
->vm_end
>= mm
->start_brk
) {
1955 seq_printf(m
, " heap");
1956 } else if (vma
->vm_start
<= mm
->start_stack
&&
1957 vma
->vm_end
>= mm
->start_stack
) {
1958 seq_printf(m
, " stack");
1961 if (is_vm_hugetlb_page(vma
)) {
1962 check_huge_range(vma
, vma
->vm_start
, vma
->vm_end
, md
);
1963 seq_printf(m
, " huge");
1965 check_pgd_range(vma
, vma
->vm_start
, vma
->vm_end
,
1966 &node_online_map
, MPOL_MF_STATS
, md
);
1973 seq_printf(m
," anon=%lu",md
->anon
);
1976 seq_printf(m
," dirty=%lu",md
->dirty
);
1978 if (md
->pages
!= md
->anon
&& md
->pages
!= md
->dirty
)
1979 seq_printf(m
, " mapped=%lu", md
->pages
);
1981 if (md
->mapcount_max
> 1)
1982 seq_printf(m
, " mapmax=%lu", md
->mapcount_max
);
1985 seq_printf(m
," swapcache=%lu", md
->swapcache
);
1987 if (md
->active
< md
->pages
&& !is_vm_hugetlb_page(vma
))
1988 seq_printf(m
," active=%lu", md
->active
);
1991 seq_printf(m
," writeback=%lu", md
->writeback
);
1993 for_each_online_node(n
)
1995 seq_printf(m
, " N%d=%lu", n
, md
->node
[n
]);
2000 if (m
->count
< m
->size
)
2001 m
->version
= (vma
!= priv
->tail_vma
) ? vma
->vm_start
: 0;