USB: fix serial gadget ACM breakage
[linux-2.6/kvm.git] / mm / mempolicy.c
blobbb54b88c3d5aaab2752569f046129bea8082f4ec
1 /*
2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
56 /* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
69 #include <linux/mempolicy.h>
70 #include <linux/mm.h>
71 #include <linux/highmem.h>
72 #include <linux/hugetlb.h>
73 #include <linux/kernel.h>
74 #include <linux/sched.h>
75 #include <linux/mm.h>
76 #include <linux/nodemask.h>
77 #include <linux/cpuset.h>
78 #include <linux/gfp.h>
79 #include <linux/slab.h>
80 #include <linux/string.h>
81 #include <linux/module.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/mempolicy.h>
86 #include <linux/swap.h>
87 #include <linux/seq_file.h>
88 #include <linux/proc_fs.h>
89 #include <linux/migrate.h>
90 #include <linux/rmap.h>
91 #include <linux/security.h>
93 #include <asm/tlbflush.h>
94 #include <asm/uaccess.h>
96 /* Internal flags */
97 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
98 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
99 #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
101 static struct kmem_cache *policy_cache;
102 static struct kmem_cache *sn_cache;
104 /* Highest zone. An specific allocation for a zone below that is not
105 policied. */
106 enum zone_type policy_zone = 0;
108 struct mempolicy default_policy = {
109 .refcnt = ATOMIC_INIT(1), /* never free it */
110 .policy = MPOL_DEFAULT,
113 /* Do sanity checking on a policy */
114 static int mpol_check_policy(int mode, nodemask_t *nodes)
116 int empty = nodes_empty(*nodes);
118 switch (mode) {
119 case MPOL_DEFAULT:
120 if (!empty)
121 return -EINVAL;
122 break;
123 case MPOL_BIND:
124 case MPOL_INTERLEAVE:
125 /* Preferred will only use the first bit, but allow
126 more for now. */
127 if (empty)
128 return -EINVAL;
129 break;
131 return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL;
134 /* Generate a custom zonelist for the BIND policy. */
135 static struct zonelist *bind_zonelist(nodemask_t *nodes)
137 struct zonelist *zl;
138 int num, max, nd;
139 enum zone_type k;
141 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
142 max++; /* space for zlcache_ptr (see mmzone.h) */
143 zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
144 if (!zl)
145 return ERR_PTR(-ENOMEM);
146 zl->zlcache_ptr = NULL;
147 num = 0;
148 /* First put in the highest zones from all nodes, then all the next
149 lower zones etc. Avoid empty zones because the memory allocator
150 doesn't like them. If you implement node hot removal you
151 have to fix that. */
152 k = MAX_NR_ZONES - 1;
153 while (1) {
154 for_each_node_mask(nd, *nodes) {
155 struct zone *z = &NODE_DATA(nd)->node_zones[k];
156 if (z->present_pages > 0)
157 zl->zones[num++] = z;
159 if (k == 0)
160 break;
161 k--;
163 if (num == 0) {
164 kfree(zl);
165 return ERR_PTR(-EINVAL);
167 zl->zones[num] = NULL;
168 return zl;
171 /* Create a new policy */
172 static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
174 struct mempolicy *policy;
176 pr_debug("setting mode %d nodes[0] %lx\n",
177 mode, nodes ? nodes_addr(*nodes)[0] : -1);
179 if (mode == MPOL_DEFAULT)
180 return NULL;
181 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
182 if (!policy)
183 return ERR_PTR(-ENOMEM);
184 atomic_set(&policy->refcnt, 1);
185 switch (mode) {
186 case MPOL_INTERLEAVE:
187 policy->v.nodes = *nodes;
188 if (nodes_weight(*nodes) == 0) {
189 kmem_cache_free(policy_cache, policy);
190 return ERR_PTR(-EINVAL);
192 break;
193 case MPOL_PREFERRED:
194 policy->v.preferred_node = first_node(*nodes);
195 if (policy->v.preferred_node >= MAX_NUMNODES)
196 policy->v.preferred_node = -1;
197 break;
198 case MPOL_BIND:
199 policy->v.zonelist = bind_zonelist(nodes);
200 if (IS_ERR(policy->v.zonelist)) {
201 void *error_code = policy->v.zonelist;
202 kmem_cache_free(policy_cache, policy);
203 return error_code;
205 break;
207 policy->policy = mode;
208 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
209 return policy;
212 static void gather_stats(struct page *, void *, int pte_dirty);
213 static void migrate_page_add(struct page *page, struct list_head *pagelist,
214 unsigned long flags);
216 /* Scan through pages checking if pages follow certain conditions. */
217 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
218 unsigned long addr, unsigned long end,
219 const nodemask_t *nodes, unsigned long flags,
220 void *private)
222 pte_t *orig_pte;
223 pte_t *pte;
224 spinlock_t *ptl;
226 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
227 do {
228 struct page *page;
229 int nid;
231 if (!pte_present(*pte))
232 continue;
233 page = vm_normal_page(vma, addr, *pte);
234 if (!page)
235 continue;
237 * The check for PageReserved here is important to avoid
238 * handling zero pages and other pages that may have been
239 * marked special by the system.
241 * If the PageReserved would not be checked here then f.e.
242 * the location of the zero page could have an influence
243 * on MPOL_MF_STRICT, zero pages would be counted for
244 * the per node stats, and there would be useless attempts
245 * to put zero pages on the migration list.
247 if (PageReserved(page))
248 continue;
249 nid = page_to_nid(page);
250 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
251 continue;
253 if (flags & MPOL_MF_STATS)
254 gather_stats(page, private, pte_dirty(*pte));
255 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
256 migrate_page_add(page, private, flags);
257 else
258 break;
259 } while (pte++, addr += PAGE_SIZE, addr != end);
260 pte_unmap_unlock(orig_pte, ptl);
261 return addr != end;
264 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
265 unsigned long addr, unsigned long end,
266 const nodemask_t *nodes, unsigned long flags,
267 void *private)
269 pmd_t *pmd;
270 unsigned long next;
272 pmd = pmd_offset(pud, addr);
273 do {
274 next = pmd_addr_end(addr, end);
275 if (pmd_none_or_clear_bad(pmd))
276 continue;
277 if (check_pte_range(vma, pmd, addr, next, nodes,
278 flags, private))
279 return -EIO;
280 } while (pmd++, addr = next, addr != end);
281 return 0;
284 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
285 unsigned long addr, unsigned long end,
286 const nodemask_t *nodes, unsigned long flags,
287 void *private)
289 pud_t *pud;
290 unsigned long next;
292 pud = pud_offset(pgd, addr);
293 do {
294 next = pud_addr_end(addr, end);
295 if (pud_none_or_clear_bad(pud))
296 continue;
297 if (check_pmd_range(vma, pud, addr, next, nodes,
298 flags, private))
299 return -EIO;
300 } while (pud++, addr = next, addr != end);
301 return 0;
304 static inline int check_pgd_range(struct vm_area_struct *vma,
305 unsigned long addr, unsigned long end,
306 const nodemask_t *nodes, unsigned long flags,
307 void *private)
309 pgd_t *pgd;
310 unsigned long next;
312 pgd = pgd_offset(vma->vm_mm, addr);
313 do {
314 next = pgd_addr_end(addr, end);
315 if (pgd_none_or_clear_bad(pgd))
316 continue;
317 if (check_pud_range(vma, pgd, addr, next, nodes,
318 flags, private))
319 return -EIO;
320 } while (pgd++, addr = next, addr != end);
321 return 0;
325 * Check if all pages in a range are on a set of nodes.
326 * If pagelist != NULL then isolate pages from the LRU and
327 * put them on the pagelist.
329 static struct vm_area_struct *
330 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
331 const nodemask_t *nodes, unsigned long flags, void *private)
333 int err;
334 struct vm_area_struct *first, *vma, *prev;
336 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
338 err = migrate_prep();
339 if (err)
340 return ERR_PTR(err);
343 first = find_vma(mm, start);
344 if (!first)
345 return ERR_PTR(-EFAULT);
346 prev = NULL;
347 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
348 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
349 if (!vma->vm_next && vma->vm_end < end)
350 return ERR_PTR(-EFAULT);
351 if (prev && prev->vm_end < vma->vm_start)
352 return ERR_PTR(-EFAULT);
354 if (!is_vm_hugetlb_page(vma) &&
355 ((flags & MPOL_MF_STRICT) ||
356 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
357 vma_migratable(vma)))) {
358 unsigned long endvma = vma->vm_end;
360 if (endvma > end)
361 endvma = end;
362 if (vma->vm_start > start)
363 start = vma->vm_start;
364 err = check_pgd_range(vma, start, endvma, nodes,
365 flags, private);
366 if (err) {
367 first = ERR_PTR(err);
368 break;
371 prev = vma;
373 return first;
376 /* Apply policy to a single VMA */
377 static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
379 int err = 0;
380 struct mempolicy *old = vma->vm_policy;
382 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
383 vma->vm_start, vma->vm_end, vma->vm_pgoff,
384 vma->vm_ops, vma->vm_file,
385 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
387 if (vma->vm_ops && vma->vm_ops->set_policy)
388 err = vma->vm_ops->set_policy(vma, new);
389 if (!err) {
390 mpol_get(new);
391 vma->vm_policy = new;
392 mpol_free(old);
394 return err;
397 /* Step 2: apply policy to a range and do splits. */
398 static int mbind_range(struct vm_area_struct *vma, unsigned long start,
399 unsigned long end, struct mempolicy *new)
401 struct vm_area_struct *next;
402 int err;
404 err = 0;
405 for (; vma && vma->vm_start < end; vma = next) {
406 next = vma->vm_next;
407 if (vma->vm_start < start)
408 err = split_vma(vma->vm_mm, vma, start, 1);
409 if (!err && vma->vm_end > end)
410 err = split_vma(vma->vm_mm, vma, end, 0);
411 if (!err)
412 err = policy_vma(vma, new);
413 if (err)
414 break;
416 return err;
419 static int contextualize_policy(int mode, nodemask_t *nodes)
421 if (!nodes)
422 return 0;
424 cpuset_update_task_memory_state();
425 if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
426 return -EINVAL;
427 return mpol_check_policy(mode, nodes);
432 * Update task->flags PF_MEMPOLICY bit: set iff non-default
433 * mempolicy. Allows more rapid checking of this (combined perhaps
434 * with other PF_* flag bits) on memory allocation hot code paths.
436 * If called from outside this file, the task 'p' should -only- be
437 * a newly forked child not yet visible on the task list, because
438 * manipulating the task flags of a visible task is not safe.
440 * The above limitation is why this routine has the funny name
441 * mpol_fix_fork_child_flag().
443 * It is also safe to call this with a task pointer of current,
444 * which the static wrapper mpol_set_task_struct_flag() does,
445 * for use within this file.
448 void mpol_fix_fork_child_flag(struct task_struct *p)
450 if (p->mempolicy)
451 p->flags |= PF_MEMPOLICY;
452 else
453 p->flags &= ~PF_MEMPOLICY;
456 static void mpol_set_task_struct_flag(void)
458 mpol_fix_fork_child_flag(current);
461 /* Set the process memory policy */
462 long do_set_mempolicy(int mode, nodemask_t *nodes)
464 struct mempolicy *new;
466 if (contextualize_policy(mode, nodes))
467 return -EINVAL;
468 new = mpol_new(mode, nodes);
469 if (IS_ERR(new))
470 return PTR_ERR(new);
471 mpol_free(current->mempolicy);
472 current->mempolicy = new;
473 mpol_set_task_struct_flag();
474 if (new && new->policy == MPOL_INTERLEAVE)
475 current->il_next = first_node(new->v.nodes);
476 return 0;
479 /* Fill a zone bitmap for a policy */
480 static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
482 int i;
484 nodes_clear(*nodes);
485 switch (p->policy) {
486 case MPOL_BIND:
487 for (i = 0; p->v.zonelist->zones[i]; i++)
488 node_set(zone_to_nid(p->v.zonelist->zones[i]),
489 *nodes);
490 break;
491 case MPOL_DEFAULT:
492 break;
493 case MPOL_INTERLEAVE:
494 *nodes = p->v.nodes;
495 break;
496 case MPOL_PREFERRED:
497 /* or use current node instead of online map? */
498 if (p->v.preferred_node < 0)
499 *nodes = node_online_map;
500 else
501 node_set(p->v.preferred_node, *nodes);
502 break;
503 default:
504 BUG();
508 static int lookup_node(struct mm_struct *mm, unsigned long addr)
510 struct page *p;
511 int err;
513 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
514 if (err >= 0) {
515 err = page_to_nid(p);
516 put_page(p);
518 return err;
521 /* Retrieve NUMA policy */
522 long do_get_mempolicy(int *policy, nodemask_t *nmask,
523 unsigned long addr, unsigned long flags)
525 int err;
526 struct mm_struct *mm = current->mm;
527 struct vm_area_struct *vma = NULL;
528 struct mempolicy *pol = current->mempolicy;
530 cpuset_update_task_memory_state();
531 if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR))
532 return -EINVAL;
533 if (flags & MPOL_F_ADDR) {
534 down_read(&mm->mmap_sem);
535 vma = find_vma_intersection(mm, addr, addr+1);
536 if (!vma) {
537 up_read(&mm->mmap_sem);
538 return -EFAULT;
540 if (vma->vm_ops && vma->vm_ops->get_policy)
541 pol = vma->vm_ops->get_policy(vma, addr);
542 else
543 pol = vma->vm_policy;
544 } else if (addr)
545 return -EINVAL;
547 if (!pol)
548 pol = &default_policy;
550 if (flags & MPOL_F_NODE) {
551 if (flags & MPOL_F_ADDR) {
552 err = lookup_node(mm, addr);
553 if (err < 0)
554 goto out;
555 *policy = err;
556 } else if (pol == current->mempolicy &&
557 pol->policy == MPOL_INTERLEAVE) {
558 *policy = current->il_next;
559 } else {
560 err = -EINVAL;
561 goto out;
563 } else
564 *policy = pol->policy;
566 if (vma) {
567 up_read(&current->mm->mmap_sem);
568 vma = NULL;
571 err = 0;
572 if (nmask)
573 get_zonemask(pol, nmask);
575 out:
576 if (vma)
577 up_read(&current->mm->mmap_sem);
578 return err;
581 #ifdef CONFIG_MIGRATION
583 * page migration
585 static void migrate_page_add(struct page *page, struct list_head *pagelist,
586 unsigned long flags)
589 * Avoid migrating a page that is shared with others.
591 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
592 isolate_lru_page(page, pagelist);
595 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
597 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
601 * Migrate pages from one node to a target node.
602 * Returns error or the number of pages not migrated.
604 int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags)
606 nodemask_t nmask;
607 LIST_HEAD(pagelist);
608 int err = 0;
610 nodes_clear(nmask);
611 node_set(source, nmask);
613 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
614 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
616 if (!list_empty(&pagelist))
617 err = migrate_pages(&pagelist, new_node_page, dest);
619 return err;
623 * Move pages between the two nodesets so as to preserve the physical
624 * layout as much as possible.
626 * Returns the number of page that could not be moved.
628 int do_migrate_pages(struct mm_struct *mm,
629 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
631 LIST_HEAD(pagelist);
632 int busy = 0;
633 int err = 0;
634 nodemask_t tmp;
636 down_read(&mm->mmap_sem);
638 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
639 if (err)
640 goto out;
643 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
644 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
645 * bit in 'tmp', and return that <source, dest> pair for migration.
646 * The pair of nodemasks 'to' and 'from' define the map.
648 * If no pair of bits is found that way, fallback to picking some
649 * pair of 'source' and 'dest' bits that are not the same. If the
650 * 'source' and 'dest' bits are the same, this represents a node
651 * that will be migrating to itself, so no pages need move.
653 * If no bits are left in 'tmp', or if all remaining bits left
654 * in 'tmp' correspond to the same bit in 'to', return false
655 * (nothing left to migrate).
657 * This lets us pick a pair of nodes to migrate between, such that
658 * if possible the dest node is not already occupied by some other
659 * source node, minimizing the risk of overloading the memory on a
660 * node that would happen if we migrated incoming memory to a node
661 * before migrating outgoing memory source that same node.
663 * A single scan of tmp is sufficient. As we go, we remember the
664 * most recent <s, d> pair that moved (s != d). If we find a pair
665 * that not only moved, but what's better, moved to an empty slot
666 * (d is not set in tmp), then we break out then, with that pair.
667 * Otherwise when we finish scannng from_tmp, we at least have the
668 * most recent <s, d> pair that moved. If we get all the way through
669 * the scan of tmp without finding any node that moved, much less
670 * moved to an empty node, then there is nothing left worth migrating.
673 tmp = *from_nodes;
674 while (!nodes_empty(tmp)) {
675 int s,d;
676 int source = -1;
677 int dest = 0;
679 for_each_node_mask(s, tmp) {
680 d = node_remap(s, *from_nodes, *to_nodes);
681 if (s == d)
682 continue;
684 source = s; /* Node moved. Memorize */
685 dest = d;
687 /* dest not in remaining from nodes? */
688 if (!node_isset(dest, tmp))
689 break;
691 if (source == -1)
692 break;
694 node_clear(source, tmp);
695 err = migrate_to_node(mm, source, dest, flags);
696 if (err > 0)
697 busy += err;
698 if (err < 0)
699 break;
701 out:
702 up_read(&mm->mmap_sem);
703 if (err < 0)
704 return err;
705 return busy;
709 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
711 struct vm_area_struct *vma = (struct vm_area_struct *)private;
713 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
714 page_address_in_vma(page, vma));
716 #else
718 static void migrate_page_add(struct page *page, struct list_head *pagelist,
719 unsigned long flags)
723 int do_migrate_pages(struct mm_struct *mm,
724 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
726 return -ENOSYS;
729 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
731 return NULL;
733 #endif
735 long do_mbind(unsigned long start, unsigned long len,
736 unsigned long mode, nodemask_t *nmask, unsigned long flags)
738 struct vm_area_struct *vma;
739 struct mm_struct *mm = current->mm;
740 struct mempolicy *new;
741 unsigned long end;
742 int err;
743 LIST_HEAD(pagelist);
745 if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
746 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
747 || mode > MPOL_MAX)
748 return -EINVAL;
749 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
750 return -EPERM;
752 if (start & ~PAGE_MASK)
753 return -EINVAL;
755 if (mode == MPOL_DEFAULT)
756 flags &= ~MPOL_MF_STRICT;
758 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
759 end = start + len;
761 if (end < start)
762 return -EINVAL;
763 if (end == start)
764 return 0;
766 if (mpol_check_policy(mode, nmask))
767 return -EINVAL;
769 new = mpol_new(mode, nmask);
770 if (IS_ERR(new))
771 return PTR_ERR(new);
774 * If we are using the default policy then operation
775 * on discontinuous address spaces is okay after all
777 if (!new)
778 flags |= MPOL_MF_DISCONTIG_OK;
780 pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
781 mode, nmask ? nodes_addr(*nmask)[0] : -1);
783 down_write(&mm->mmap_sem);
784 vma = check_range(mm, start, end, nmask,
785 flags | MPOL_MF_INVERT, &pagelist);
787 err = PTR_ERR(vma);
788 if (!IS_ERR(vma)) {
789 int nr_failed = 0;
791 err = mbind_range(vma, start, end, new);
793 if (!list_empty(&pagelist))
794 nr_failed = migrate_pages(&pagelist, new_vma_page,
795 (unsigned long)vma);
797 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
798 err = -EIO;
801 up_write(&mm->mmap_sem);
802 mpol_free(new);
803 return err;
807 * User space interface with variable sized bitmaps for nodelists.
810 /* Copy a node mask from user space. */
811 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
812 unsigned long maxnode)
814 unsigned long k;
815 unsigned long nlongs;
816 unsigned long endmask;
818 --maxnode;
819 nodes_clear(*nodes);
820 if (maxnode == 0 || !nmask)
821 return 0;
822 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
823 return -EINVAL;
825 nlongs = BITS_TO_LONGS(maxnode);
826 if ((maxnode % BITS_PER_LONG) == 0)
827 endmask = ~0UL;
828 else
829 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
831 /* When the user specified more nodes than supported just check
832 if the non supported part is all zero. */
833 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
834 if (nlongs > PAGE_SIZE/sizeof(long))
835 return -EINVAL;
836 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
837 unsigned long t;
838 if (get_user(t, nmask + k))
839 return -EFAULT;
840 if (k == nlongs - 1) {
841 if (t & endmask)
842 return -EINVAL;
843 } else if (t)
844 return -EINVAL;
846 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
847 endmask = ~0UL;
850 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
851 return -EFAULT;
852 nodes_addr(*nodes)[nlongs-1] &= endmask;
853 return 0;
856 /* Copy a kernel node mask to user space */
857 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
858 nodemask_t *nodes)
860 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
861 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
863 if (copy > nbytes) {
864 if (copy > PAGE_SIZE)
865 return -EINVAL;
866 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
867 return -EFAULT;
868 copy = nbytes;
870 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
873 asmlinkage long sys_mbind(unsigned long start, unsigned long len,
874 unsigned long mode,
875 unsigned long __user *nmask, unsigned long maxnode,
876 unsigned flags)
878 nodemask_t nodes;
879 int err;
881 err = get_nodes(&nodes, nmask, maxnode);
882 if (err)
883 return err;
884 #ifdef CONFIG_CPUSETS
885 /* Restrict the nodes to the allowed nodes in the cpuset */
886 nodes_and(nodes, nodes, current->mems_allowed);
887 #endif
888 return do_mbind(start, len, mode, &nodes, flags);
891 /* Set the process memory policy */
892 asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
893 unsigned long maxnode)
895 int err;
896 nodemask_t nodes;
898 if (mode < 0 || mode > MPOL_MAX)
899 return -EINVAL;
900 err = get_nodes(&nodes, nmask, maxnode);
901 if (err)
902 return err;
903 return do_set_mempolicy(mode, &nodes);
906 asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
907 const unsigned long __user *old_nodes,
908 const unsigned long __user *new_nodes)
910 struct mm_struct *mm;
911 struct task_struct *task;
912 nodemask_t old;
913 nodemask_t new;
914 nodemask_t task_nodes;
915 int err;
917 err = get_nodes(&old, old_nodes, maxnode);
918 if (err)
919 return err;
921 err = get_nodes(&new, new_nodes, maxnode);
922 if (err)
923 return err;
925 /* Find the mm_struct */
926 read_lock(&tasklist_lock);
927 task = pid ? find_task_by_pid(pid) : current;
928 if (!task) {
929 read_unlock(&tasklist_lock);
930 return -ESRCH;
932 mm = get_task_mm(task);
933 read_unlock(&tasklist_lock);
935 if (!mm)
936 return -EINVAL;
939 * Check if this process has the right to modify the specified
940 * process. The right exists if the process has administrative
941 * capabilities, superuser privileges or the same
942 * userid as the target process.
944 if ((current->euid != task->suid) && (current->euid != task->uid) &&
945 (current->uid != task->suid) && (current->uid != task->uid) &&
946 !capable(CAP_SYS_NICE)) {
947 err = -EPERM;
948 goto out;
951 task_nodes = cpuset_mems_allowed(task);
952 /* Is the user allowed to access the target nodes? */
953 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
954 err = -EPERM;
955 goto out;
958 if (!nodes_subset(new, node_online_map)) {
959 err = -EINVAL;
960 goto out;
963 err = security_task_movememory(task);
964 if (err)
965 goto out;
967 err = do_migrate_pages(mm, &old, &new,
968 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
969 out:
970 mmput(mm);
971 return err;
975 /* Retrieve NUMA policy */
976 asmlinkage long sys_get_mempolicy(int __user *policy,
977 unsigned long __user *nmask,
978 unsigned long maxnode,
979 unsigned long addr, unsigned long flags)
981 int err, pval;
982 nodemask_t nodes;
984 if (nmask != NULL && maxnode < MAX_NUMNODES)
985 return -EINVAL;
987 err = do_get_mempolicy(&pval, &nodes, addr, flags);
989 if (err)
990 return err;
992 if (policy && put_user(pval, policy))
993 return -EFAULT;
995 if (nmask)
996 err = copy_nodes_to_user(nmask, maxnode, &nodes);
998 return err;
1001 #ifdef CONFIG_COMPAT
1003 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1004 compat_ulong_t __user *nmask,
1005 compat_ulong_t maxnode,
1006 compat_ulong_t addr, compat_ulong_t flags)
1008 long err;
1009 unsigned long __user *nm = NULL;
1010 unsigned long nr_bits, alloc_size;
1011 DECLARE_BITMAP(bm, MAX_NUMNODES);
1013 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1014 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1016 if (nmask)
1017 nm = compat_alloc_user_space(alloc_size);
1019 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1021 if (!err && nmask) {
1022 err = copy_from_user(bm, nm, alloc_size);
1023 /* ensure entire bitmap is zeroed */
1024 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1025 err |= compat_put_bitmap(nmask, bm, nr_bits);
1028 return err;
1031 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1032 compat_ulong_t maxnode)
1034 long err = 0;
1035 unsigned long __user *nm = NULL;
1036 unsigned long nr_bits, alloc_size;
1037 DECLARE_BITMAP(bm, MAX_NUMNODES);
1039 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1040 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1042 if (nmask) {
1043 err = compat_get_bitmap(bm, nmask, nr_bits);
1044 nm = compat_alloc_user_space(alloc_size);
1045 err |= copy_to_user(nm, bm, alloc_size);
1048 if (err)
1049 return -EFAULT;
1051 return sys_set_mempolicy(mode, nm, nr_bits+1);
1054 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1055 compat_ulong_t mode, compat_ulong_t __user *nmask,
1056 compat_ulong_t maxnode, compat_ulong_t flags)
1058 long err = 0;
1059 unsigned long __user *nm = NULL;
1060 unsigned long nr_bits, alloc_size;
1061 nodemask_t bm;
1063 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1064 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1066 if (nmask) {
1067 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1068 nm = compat_alloc_user_space(alloc_size);
1069 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1072 if (err)
1073 return -EFAULT;
1075 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1078 #endif
1080 /* Return effective policy for a VMA */
1081 static struct mempolicy * get_vma_policy(struct task_struct *task,
1082 struct vm_area_struct *vma, unsigned long addr)
1084 struct mempolicy *pol = task->mempolicy;
1086 if (vma) {
1087 if (vma->vm_ops && vma->vm_ops->get_policy)
1088 pol = vma->vm_ops->get_policy(vma, addr);
1089 else if (vma->vm_policy &&
1090 vma->vm_policy->policy != MPOL_DEFAULT)
1091 pol = vma->vm_policy;
1093 if (!pol)
1094 pol = &default_policy;
1095 return pol;
1098 /* Return a zonelist representing a mempolicy */
1099 static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
1101 int nd;
1103 switch (policy->policy) {
1104 case MPOL_PREFERRED:
1105 nd = policy->v.preferred_node;
1106 if (nd < 0)
1107 nd = numa_node_id();
1108 break;
1109 case MPOL_BIND:
1110 /* Lower zones don't get a policy applied */
1111 /* Careful: current->mems_allowed might have moved */
1112 if (gfp_zone(gfp) >= policy_zone)
1113 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
1114 return policy->v.zonelist;
1115 /*FALL THROUGH*/
1116 case MPOL_INTERLEAVE: /* should not happen */
1117 case MPOL_DEFAULT:
1118 nd = numa_node_id();
1119 break;
1120 default:
1121 nd = 0;
1122 BUG();
1124 return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
1127 /* Do dynamic interleaving for a process */
1128 static unsigned interleave_nodes(struct mempolicy *policy)
1130 unsigned nid, next;
1131 struct task_struct *me = current;
1133 nid = me->il_next;
1134 next = next_node(nid, policy->v.nodes);
1135 if (next >= MAX_NUMNODES)
1136 next = first_node(policy->v.nodes);
1137 me->il_next = next;
1138 return nid;
1142 * Depending on the memory policy provide a node from which to allocate the
1143 * next slab entry.
1145 unsigned slab_node(struct mempolicy *policy)
1147 int pol = policy ? policy->policy : MPOL_DEFAULT;
1149 switch (pol) {
1150 case MPOL_INTERLEAVE:
1151 return interleave_nodes(policy);
1153 case MPOL_BIND:
1155 * Follow bind policy behavior and start allocation at the
1156 * first node.
1158 return zone_to_nid(policy->v.zonelist->zones[0]);
1160 case MPOL_PREFERRED:
1161 if (policy->v.preferred_node >= 0)
1162 return policy->v.preferred_node;
1163 /* Fall through */
1165 default:
1166 return numa_node_id();
1170 /* Do static interleaving for a VMA with known offset. */
1171 static unsigned offset_il_node(struct mempolicy *pol,
1172 struct vm_area_struct *vma, unsigned long off)
1174 unsigned nnodes = nodes_weight(pol->v.nodes);
1175 unsigned target = (unsigned)off % nnodes;
1176 int c;
1177 int nid = -1;
1179 c = 0;
1180 do {
1181 nid = next_node(nid, pol->v.nodes);
1182 c++;
1183 } while (c <= target);
1184 return nid;
1187 /* Determine a node number for interleave */
1188 static inline unsigned interleave_nid(struct mempolicy *pol,
1189 struct vm_area_struct *vma, unsigned long addr, int shift)
1191 if (vma) {
1192 unsigned long off;
1195 * for small pages, there is no difference between
1196 * shift and PAGE_SHIFT, so the bit-shift is safe.
1197 * for huge pages, since vm_pgoff is in units of small
1198 * pages, we need to shift off the always 0 bits to get
1199 * a useful offset.
1201 BUG_ON(shift < PAGE_SHIFT);
1202 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1203 off += (addr - vma->vm_start) >> shift;
1204 return offset_il_node(pol, vma, off);
1205 } else
1206 return interleave_nodes(pol);
1209 #ifdef CONFIG_HUGETLBFS
1210 /* Return a zonelist suitable for a huge page allocation. */
1211 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1212 gfp_t gfp_flags)
1214 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1216 if (pol->policy == MPOL_INTERLEAVE) {
1217 unsigned nid;
1219 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1220 return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
1222 return zonelist_policy(GFP_HIGHUSER, pol);
1224 #endif
1226 /* Allocate a page in interleaved policy.
1227 Own path because it needs to do special accounting. */
1228 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1229 unsigned nid)
1231 struct zonelist *zl;
1232 struct page *page;
1234 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
1235 page = __alloc_pages(gfp, order, zl);
1236 if (page && page_zone(page) == zl->zones[0])
1237 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1238 return page;
1242 * alloc_page_vma - Allocate a page for a VMA.
1244 * @gfp:
1245 * %GFP_USER user allocation.
1246 * %GFP_KERNEL kernel allocations,
1247 * %GFP_HIGHMEM highmem/user allocations,
1248 * %GFP_FS allocation should not call back into a file system.
1249 * %GFP_ATOMIC don't sleep.
1251 * @vma: Pointer to VMA or NULL if not available.
1252 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1254 * This function allocates a page from the kernel page pool and applies
1255 * a NUMA policy associated with the VMA or the current process.
1256 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1257 * mm_struct of the VMA to prevent it from going away. Should be used for
1258 * all allocations for pages that will be mapped into
1259 * user space. Returns NULL when no page can be allocated.
1261 * Should be called with the mm_sem of the vma hold.
1263 struct page *
1264 alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1266 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1268 cpuset_update_task_memory_state();
1270 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1271 unsigned nid;
1273 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1274 return alloc_page_interleave(gfp, 0, nid);
1276 return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol));
1280 * alloc_pages_current - Allocate pages.
1282 * @gfp:
1283 * %GFP_USER user allocation,
1284 * %GFP_KERNEL kernel allocation,
1285 * %GFP_HIGHMEM highmem allocation,
1286 * %GFP_FS don't call back into a file system.
1287 * %GFP_ATOMIC don't sleep.
1288 * @order: Power of two of allocation size in pages. 0 is a single page.
1290 * Allocate a page from the kernel page pool. When not in
1291 * interrupt context and apply the current process NUMA policy.
1292 * Returns NULL when no page can be allocated.
1294 * Don't call cpuset_update_task_memory_state() unless
1295 * 1) it's ok to take cpuset_sem (can WAIT), and
1296 * 2) allocating for current task (not interrupt).
1298 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1300 struct mempolicy *pol = current->mempolicy;
1302 if ((gfp & __GFP_WAIT) && !in_interrupt())
1303 cpuset_update_task_memory_state();
1304 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1305 pol = &default_policy;
1306 if (pol->policy == MPOL_INTERLEAVE)
1307 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1308 return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
1310 EXPORT_SYMBOL(alloc_pages_current);
1313 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1314 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1315 * with the mems_allowed returned by cpuset_mems_allowed(). This
1316 * keeps mempolicies cpuset relative after its cpuset moves. See
1317 * further kernel/cpuset.c update_nodemask().
1319 void *cpuset_being_rebound;
1321 /* Slow path of a mempolicy copy */
1322 struct mempolicy *__mpol_copy(struct mempolicy *old)
1324 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1326 if (!new)
1327 return ERR_PTR(-ENOMEM);
1328 if (current_cpuset_is_being_rebound()) {
1329 nodemask_t mems = cpuset_mems_allowed(current);
1330 mpol_rebind_policy(old, &mems);
1332 *new = *old;
1333 atomic_set(&new->refcnt, 1);
1334 if (new->policy == MPOL_BIND) {
1335 int sz = ksize(old->v.zonelist);
1336 new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
1337 if (!new->v.zonelist) {
1338 kmem_cache_free(policy_cache, new);
1339 return ERR_PTR(-ENOMEM);
1342 return new;
1345 /* Slow path of a mempolicy comparison */
1346 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1348 if (!a || !b)
1349 return 0;
1350 if (a->policy != b->policy)
1351 return 0;
1352 switch (a->policy) {
1353 case MPOL_DEFAULT:
1354 return 1;
1355 case MPOL_INTERLEAVE:
1356 return nodes_equal(a->v.nodes, b->v.nodes);
1357 case MPOL_PREFERRED:
1358 return a->v.preferred_node == b->v.preferred_node;
1359 case MPOL_BIND: {
1360 int i;
1361 for (i = 0; a->v.zonelist->zones[i]; i++)
1362 if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
1363 return 0;
1364 return b->v.zonelist->zones[i] == NULL;
1366 default:
1367 BUG();
1368 return 0;
1372 /* Slow path of a mpol destructor. */
1373 void __mpol_free(struct mempolicy *p)
1375 if (!atomic_dec_and_test(&p->refcnt))
1376 return;
1377 if (p->policy == MPOL_BIND)
1378 kfree(p->v.zonelist);
1379 p->policy = MPOL_DEFAULT;
1380 kmem_cache_free(policy_cache, p);
1384 * Shared memory backing store policy support.
1386 * Remember policies even when nobody has shared memory mapped.
1387 * The policies are kept in Red-Black tree linked from the inode.
1388 * They are protected by the sp->lock spinlock, which should be held
1389 * for any accesses to the tree.
1392 /* lookup first element intersecting start-end */
1393 /* Caller holds sp->lock */
1394 static struct sp_node *
1395 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1397 struct rb_node *n = sp->root.rb_node;
1399 while (n) {
1400 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1402 if (start >= p->end)
1403 n = n->rb_right;
1404 else if (end <= p->start)
1405 n = n->rb_left;
1406 else
1407 break;
1409 if (!n)
1410 return NULL;
1411 for (;;) {
1412 struct sp_node *w = NULL;
1413 struct rb_node *prev = rb_prev(n);
1414 if (!prev)
1415 break;
1416 w = rb_entry(prev, struct sp_node, nd);
1417 if (w->end <= start)
1418 break;
1419 n = prev;
1421 return rb_entry(n, struct sp_node, nd);
1424 /* Insert a new shared policy into the list. */
1425 /* Caller holds sp->lock */
1426 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1428 struct rb_node **p = &sp->root.rb_node;
1429 struct rb_node *parent = NULL;
1430 struct sp_node *nd;
1432 while (*p) {
1433 parent = *p;
1434 nd = rb_entry(parent, struct sp_node, nd);
1435 if (new->start < nd->start)
1436 p = &(*p)->rb_left;
1437 else if (new->end > nd->end)
1438 p = &(*p)->rb_right;
1439 else
1440 BUG();
1442 rb_link_node(&new->nd, parent, p);
1443 rb_insert_color(&new->nd, &sp->root);
1444 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1445 new->policy ? new->policy->policy : 0);
1448 /* Find shared policy intersecting idx */
1449 struct mempolicy *
1450 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1452 struct mempolicy *pol = NULL;
1453 struct sp_node *sn;
1455 if (!sp->root.rb_node)
1456 return NULL;
1457 spin_lock(&sp->lock);
1458 sn = sp_lookup(sp, idx, idx+1);
1459 if (sn) {
1460 mpol_get(sn->policy);
1461 pol = sn->policy;
1463 spin_unlock(&sp->lock);
1464 return pol;
1467 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1469 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1470 rb_erase(&n->nd, &sp->root);
1471 mpol_free(n->policy);
1472 kmem_cache_free(sn_cache, n);
1475 struct sp_node *
1476 sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol)
1478 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1480 if (!n)
1481 return NULL;
1482 n->start = start;
1483 n->end = end;
1484 mpol_get(pol);
1485 n->policy = pol;
1486 return n;
1489 /* Replace a policy range. */
1490 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1491 unsigned long end, struct sp_node *new)
1493 struct sp_node *n, *new2 = NULL;
1495 restart:
1496 spin_lock(&sp->lock);
1497 n = sp_lookup(sp, start, end);
1498 /* Take care of old policies in the same range. */
1499 while (n && n->start < end) {
1500 struct rb_node *next = rb_next(&n->nd);
1501 if (n->start >= start) {
1502 if (n->end <= end)
1503 sp_delete(sp, n);
1504 else
1505 n->start = end;
1506 } else {
1507 /* Old policy spanning whole new range. */
1508 if (n->end > end) {
1509 if (!new2) {
1510 spin_unlock(&sp->lock);
1511 new2 = sp_alloc(end, n->end, n->policy);
1512 if (!new2)
1513 return -ENOMEM;
1514 goto restart;
1516 n->end = start;
1517 sp_insert(sp, new2);
1518 new2 = NULL;
1519 break;
1520 } else
1521 n->end = start;
1523 if (!next)
1524 break;
1525 n = rb_entry(next, struct sp_node, nd);
1527 if (new)
1528 sp_insert(sp, new);
1529 spin_unlock(&sp->lock);
1530 if (new2) {
1531 mpol_free(new2->policy);
1532 kmem_cache_free(sn_cache, new2);
1534 return 0;
1537 void mpol_shared_policy_init(struct shared_policy *info, int policy,
1538 nodemask_t *policy_nodes)
1540 info->root = RB_ROOT;
1541 spin_lock_init(&info->lock);
1543 if (policy != MPOL_DEFAULT) {
1544 struct mempolicy *newpol;
1546 /* Falls back to MPOL_DEFAULT on any error */
1547 newpol = mpol_new(policy, policy_nodes);
1548 if (!IS_ERR(newpol)) {
1549 /* Create pseudo-vma that contains just the policy */
1550 struct vm_area_struct pvma;
1552 memset(&pvma, 0, sizeof(struct vm_area_struct));
1553 /* Policy covers entire file */
1554 pvma.vm_end = TASK_SIZE;
1555 mpol_set_shared_policy(info, &pvma, newpol);
1556 mpol_free(newpol);
1561 int mpol_set_shared_policy(struct shared_policy *info,
1562 struct vm_area_struct *vma, struct mempolicy *npol)
1564 int err;
1565 struct sp_node *new = NULL;
1566 unsigned long sz = vma_pages(vma);
1568 pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
1569 vma->vm_pgoff,
1570 sz, npol? npol->policy : -1,
1571 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1573 if (npol) {
1574 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1575 if (!new)
1576 return -ENOMEM;
1578 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1579 if (err && new)
1580 kmem_cache_free(sn_cache, new);
1581 return err;
1584 /* Free a backing policy store on inode delete. */
1585 void mpol_free_shared_policy(struct shared_policy *p)
1587 struct sp_node *n;
1588 struct rb_node *next;
1590 if (!p->root.rb_node)
1591 return;
1592 spin_lock(&p->lock);
1593 next = rb_first(&p->root);
1594 while (next) {
1595 n = rb_entry(next, struct sp_node, nd);
1596 next = rb_next(&n->nd);
1597 rb_erase(&n->nd, &p->root);
1598 mpol_free(n->policy);
1599 kmem_cache_free(sn_cache, n);
1601 spin_unlock(&p->lock);
1604 /* assumes fs == KERNEL_DS */
1605 void __init numa_policy_init(void)
1607 nodemask_t interleave_nodes;
1608 unsigned long largest = 0;
1609 int nid, prefer = 0;
1611 policy_cache = kmem_cache_create("numa_policy",
1612 sizeof(struct mempolicy),
1613 0, SLAB_PANIC, NULL);
1615 sn_cache = kmem_cache_create("shared_policy_node",
1616 sizeof(struct sp_node),
1617 0, SLAB_PANIC, NULL);
1620 * Set interleaving policy for system init. Interleaving is only
1621 * enabled across suitably sized nodes (default is >= 16MB), or
1622 * fall back to the largest node if they're all smaller.
1624 nodes_clear(interleave_nodes);
1625 for_each_online_node(nid) {
1626 unsigned long total_pages = node_present_pages(nid);
1628 /* Preserve the largest node */
1629 if (largest < total_pages) {
1630 largest = total_pages;
1631 prefer = nid;
1634 /* Interleave this node? */
1635 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1636 node_set(nid, interleave_nodes);
1639 /* All too small, use the largest */
1640 if (unlikely(nodes_empty(interleave_nodes)))
1641 node_set(prefer, interleave_nodes);
1643 if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
1644 printk("numa_policy_init: interleaving failed\n");
1647 /* Reset policy of current process to default */
1648 void numa_default_policy(void)
1650 do_set_mempolicy(MPOL_DEFAULT, NULL);
1653 /* Migrate a policy to a different set of nodes */
1654 void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1656 nodemask_t *mpolmask;
1657 nodemask_t tmp;
1659 if (!pol)
1660 return;
1661 mpolmask = &pol->cpuset_mems_allowed;
1662 if (nodes_equal(*mpolmask, *newmask))
1663 return;
1665 switch (pol->policy) {
1666 case MPOL_DEFAULT:
1667 break;
1668 case MPOL_INTERLEAVE:
1669 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
1670 pol->v.nodes = tmp;
1671 *mpolmask = *newmask;
1672 current->il_next = node_remap(current->il_next,
1673 *mpolmask, *newmask);
1674 break;
1675 case MPOL_PREFERRED:
1676 pol->v.preferred_node = node_remap(pol->v.preferred_node,
1677 *mpolmask, *newmask);
1678 *mpolmask = *newmask;
1679 break;
1680 case MPOL_BIND: {
1681 nodemask_t nodes;
1682 struct zone **z;
1683 struct zonelist *zonelist;
1685 nodes_clear(nodes);
1686 for (z = pol->v.zonelist->zones; *z; z++)
1687 node_set(zone_to_nid(*z), nodes);
1688 nodes_remap(tmp, nodes, *mpolmask, *newmask);
1689 nodes = tmp;
1691 zonelist = bind_zonelist(&nodes);
1693 /* If no mem, then zonelist is NULL and we keep old zonelist.
1694 * If that old zonelist has no remaining mems_allowed nodes,
1695 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
1698 if (!IS_ERR(zonelist)) {
1699 /* Good - got mem - substitute new zonelist */
1700 kfree(pol->v.zonelist);
1701 pol->v.zonelist = zonelist;
1703 *mpolmask = *newmask;
1704 break;
1706 default:
1707 BUG();
1708 break;
1713 * Wrapper for mpol_rebind_policy() that just requires task
1714 * pointer, and updates task mempolicy.
1717 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1719 mpol_rebind_policy(tsk->mempolicy, new);
1723 * Rebind each vma in mm to new nodemask.
1725 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1728 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1730 struct vm_area_struct *vma;
1732 down_write(&mm->mmap_sem);
1733 for (vma = mm->mmap; vma; vma = vma->vm_next)
1734 mpol_rebind_policy(vma->vm_policy, new);
1735 up_write(&mm->mmap_sem);
1739 * Display pages allocated per node and memory policy via /proc.
1742 static const char * const policy_types[] =
1743 { "default", "prefer", "bind", "interleave" };
1746 * Convert a mempolicy into a string.
1747 * Returns the number of characters in buffer (if positive)
1748 * or an error (negative)
1750 static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1752 char *p = buffer;
1753 int l;
1754 nodemask_t nodes;
1755 int mode = pol ? pol->policy : MPOL_DEFAULT;
1757 switch (mode) {
1758 case MPOL_DEFAULT:
1759 nodes_clear(nodes);
1760 break;
1762 case MPOL_PREFERRED:
1763 nodes_clear(nodes);
1764 node_set(pol->v.preferred_node, nodes);
1765 break;
1767 case MPOL_BIND:
1768 get_zonemask(pol, &nodes);
1769 break;
1771 case MPOL_INTERLEAVE:
1772 nodes = pol->v.nodes;
1773 break;
1775 default:
1776 BUG();
1777 return -EFAULT;
1780 l = strlen(policy_types[mode]);
1781 if (buffer + maxlen < p + l + 1)
1782 return -ENOSPC;
1784 strcpy(p, policy_types[mode]);
1785 p += l;
1787 if (!nodes_empty(nodes)) {
1788 if (buffer + maxlen < p + 2)
1789 return -ENOSPC;
1790 *p++ = '=';
1791 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1793 return p - buffer;
1796 struct numa_maps {
1797 unsigned long pages;
1798 unsigned long anon;
1799 unsigned long active;
1800 unsigned long writeback;
1801 unsigned long mapcount_max;
1802 unsigned long dirty;
1803 unsigned long swapcache;
1804 unsigned long node[MAX_NUMNODES];
1807 static void gather_stats(struct page *page, void *private, int pte_dirty)
1809 struct numa_maps *md = private;
1810 int count = page_mapcount(page);
1812 md->pages++;
1813 if (pte_dirty || PageDirty(page))
1814 md->dirty++;
1816 if (PageSwapCache(page))
1817 md->swapcache++;
1819 if (PageActive(page))
1820 md->active++;
1822 if (PageWriteback(page))
1823 md->writeback++;
1825 if (PageAnon(page))
1826 md->anon++;
1828 if (count > md->mapcount_max)
1829 md->mapcount_max = count;
1831 md->node[page_to_nid(page)]++;
1834 #ifdef CONFIG_HUGETLB_PAGE
1835 static void check_huge_range(struct vm_area_struct *vma,
1836 unsigned long start, unsigned long end,
1837 struct numa_maps *md)
1839 unsigned long addr;
1840 struct page *page;
1842 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1843 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1844 pte_t pte;
1846 if (!ptep)
1847 continue;
1849 pte = *ptep;
1850 if (pte_none(pte))
1851 continue;
1853 page = pte_page(pte);
1854 if (!page)
1855 continue;
1857 gather_stats(page, md, pte_dirty(*ptep));
1860 #else
1861 static inline void check_huge_range(struct vm_area_struct *vma,
1862 unsigned long start, unsigned long end,
1863 struct numa_maps *md)
1866 #endif
1868 int show_numa_map(struct seq_file *m, void *v)
1870 struct proc_maps_private *priv = m->private;
1871 struct vm_area_struct *vma = v;
1872 struct numa_maps *md;
1873 struct file *file = vma->vm_file;
1874 struct mm_struct *mm = vma->vm_mm;
1875 int n;
1876 char buffer[50];
1878 if (!mm)
1879 return 0;
1881 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1882 if (!md)
1883 return 0;
1885 mpol_to_str(buffer, sizeof(buffer),
1886 get_vma_policy(priv->task, vma, vma->vm_start));
1888 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1890 if (file) {
1891 seq_printf(m, " file=");
1892 seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= ");
1893 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1894 seq_printf(m, " heap");
1895 } else if (vma->vm_start <= mm->start_stack &&
1896 vma->vm_end >= mm->start_stack) {
1897 seq_printf(m, " stack");
1900 if (is_vm_hugetlb_page(vma)) {
1901 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1902 seq_printf(m, " huge");
1903 } else {
1904 check_pgd_range(vma, vma->vm_start, vma->vm_end,
1905 &node_online_map, MPOL_MF_STATS, md);
1908 if (!md->pages)
1909 goto out;
1911 if (md->anon)
1912 seq_printf(m," anon=%lu",md->anon);
1914 if (md->dirty)
1915 seq_printf(m," dirty=%lu",md->dirty);
1917 if (md->pages != md->anon && md->pages != md->dirty)
1918 seq_printf(m, " mapped=%lu", md->pages);
1920 if (md->mapcount_max > 1)
1921 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1923 if (md->swapcache)
1924 seq_printf(m," swapcache=%lu", md->swapcache);
1926 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1927 seq_printf(m," active=%lu", md->active);
1929 if (md->writeback)
1930 seq_printf(m," writeback=%lu", md->writeback);
1932 for_each_online_node(n)
1933 if (md->node[n])
1934 seq_printf(m, " N%d=%lu", n, md->node[n]);
1935 out:
1936 seq_putc(m, '\n');
1937 kfree(md);
1939 if (m->count < m->size)
1940 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
1941 return 0;