[PATCH] update some mm/ comments
[linux-2.6.git] / mm / mempolicy.c
blobc3429a710ab1868ee5323367780e7ef88fb260da
1 /*
2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
56 /* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
69 #include <linux/mempolicy.h>
70 #include <linux/mm.h>
71 #include <linux/highmem.h>
72 #include <linux/hugetlb.h>
73 #include <linux/kernel.h>
74 #include <linux/sched.h>
75 #include <linux/mm.h>
76 #include <linux/nodemask.h>
77 #include <linux/cpuset.h>
78 #include <linux/gfp.h>
79 #include <linux/slab.h>
80 #include <linux/string.h>
81 #include <linux/module.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/mempolicy.h>
86 #include <linux/swap.h>
87 #include <linux/seq_file.h>
88 #include <linux/proc_fs.h>
89 #include <linux/migrate.h>
90 #include <linux/rmap.h>
91 #include <linux/security.h>
93 #include <asm/tlbflush.h>
94 #include <asm/uaccess.h>
96 /* Internal flags */
97 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
98 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
99 #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
101 static struct kmem_cache *policy_cache;
102 static struct kmem_cache *sn_cache;
104 #define PDprintk(fmt...)
106 /* Highest zone. An specific allocation for a zone below that is not
107 policied. */
108 enum zone_type policy_zone = ZONE_DMA;
110 struct mempolicy default_policy = {
111 .refcnt = ATOMIC_INIT(1), /* never free it */
112 .policy = MPOL_DEFAULT,
115 /* Do sanity checking on a policy */
116 static int mpol_check_policy(int mode, nodemask_t *nodes)
118 int empty = nodes_empty(*nodes);
120 switch (mode) {
121 case MPOL_DEFAULT:
122 if (!empty)
123 return -EINVAL;
124 break;
125 case MPOL_BIND:
126 case MPOL_INTERLEAVE:
127 /* Preferred will only use the first bit, but allow
128 more for now. */
129 if (empty)
130 return -EINVAL;
131 break;
133 return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL;
136 /* Generate a custom zonelist for the BIND policy. */
137 static struct zonelist *bind_zonelist(nodemask_t *nodes)
139 struct zonelist *zl;
140 int num, max, nd;
141 enum zone_type k;
143 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
144 zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
145 if (!zl)
146 return NULL;
147 num = 0;
148 /* First put in the highest zones from all nodes, then all the next
149 lower zones etc. Avoid empty zones because the memory allocator
150 doesn't like them. If you implement node hot removal you
151 have to fix that. */
152 k = policy_zone;
153 while (1) {
154 for_each_node_mask(nd, *nodes) {
155 struct zone *z = &NODE_DATA(nd)->node_zones[k];
156 if (z->present_pages > 0)
157 zl->zones[num++] = z;
159 if (k == 0)
160 break;
161 k--;
163 zl->zones[num] = NULL;
164 return zl;
167 /* Create a new policy */
168 static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
170 struct mempolicy *policy;
172 PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes_addr(*nodes)[0]);
173 if (mode == MPOL_DEFAULT)
174 return NULL;
175 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
176 if (!policy)
177 return ERR_PTR(-ENOMEM);
178 atomic_set(&policy->refcnt, 1);
179 switch (mode) {
180 case MPOL_INTERLEAVE:
181 policy->v.nodes = *nodes;
182 if (nodes_weight(*nodes) == 0) {
183 kmem_cache_free(policy_cache, policy);
184 return ERR_PTR(-EINVAL);
186 break;
187 case MPOL_PREFERRED:
188 policy->v.preferred_node = first_node(*nodes);
189 if (policy->v.preferred_node >= MAX_NUMNODES)
190 policy->v.preferred_node = -1;
191 break;
192 case MPOL_BIND:
193 policy->v.zonelist = bind_zonelist(nodes);
194 if (policy->v.zonelist == NULL) {
195 kmem_cache_free(policy_cache, policy);
196 return ERR_PTR(-ENOMEM);
198 break;
200 policy->policy = mode;
201 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
202 return policy;
205 static void gather_stats(struct page *, void *, int pte_dirty);
206 static void migrate_page_add(struct page *page, struct list_head *pagelist,
207 unsigned long flags);
209 /* Scan through pages checking if pages follow certain conditions. */
210 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
211 unsigned long addr, unsigned long end,
212 const nodemask_t *nodes, unsigned long flags,
213 void *private)
215 pte_t *orig_pte;
216 pte_t *pte;
217 spinlock_t *ptl;
219 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
220 do {
221 struct page *page;
222 unsigned int nid;
224 if (!pte_present(*pte))
225 continue;
226 page = vm_normal_page(vma, addr, *pte);
227 if (!page)
228 continue;
230 * The check for PageReserved here is important to avoid
231 * handling zero pages and other pages that may have been
232 * marked special by the system.
234 * If the PageReserved would not be checked here then f.e.
235 * the location of the zero page could have an influence
236 * on MPOL_MF_STRICT, zero pages would be counted for
237 * the per node stats, and there would be useless attempts
238 * to put zero pages on the migration list.
240 if (PageReserved(page))
241 continue;
242 nid = page_to_nid(page);
243 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
244 continue;
246 if (flags & MPOL_MF_STATS)
247 gather_stats(page, private, pte_dirty(*pte));
248 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
249 migrate_page_add(page, private, flags);
250 else
251 break;
252 } while (pte++, addr += PAGE_SIZE, addr != end);
253 pte_unmap_unlock(orig_pte, ptl);
254 return addr != end;
257 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
258 unsigned long addr, unsigned long end,
259 const nodemask_t *nodes, unsigned long flags,
260 void *private)
262 pmd_t *pmd;
263 unsigned long next;
265 pmd = pmd_offset(pud, addr);
266 do {
267 next = pmd_addr_end(addr, end);
268 if (pmd_none_or_clear_bad(pmd))
269 continue;
270 if (check_pte_range(vma, pmd, addr, next, nodes,
271 flags, private))
272 return -EIO;
273 } while (pmd++, addr = next, addr != end);
274 return 0;
277 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
278 unsigned long addr, unsigned long end,
279 const nodemask_t *nodes, unsigned long flags,
280 void *private)
282 pud_t *pud;
283 unsigned long next;
285 pud = pud_offset(pgd, addr);
286 do {
287 next = pud_addr_end(addr, end);
288 if (pud_none_or_clear_bad(pud))
289 continue;
290 if (check_pmd_range(vma, pud, addr, next, nodes,
291 flags, private))
292 return -EIO;
293 } while (pud++, addr = next, addr != end);
294 return 0;
297 static inline int check_pgd_range(struct vm_area_struct *vma,
298 unsigned long addr, unsigned long end,
299 const nodemask_t *nodes, unsigned long flags,
300 void *private)
302 pgd_t *pgd;
303 unsigned long next;
305 pgd = pgd_offset(vma->vm_mm, addr);
306 do {
307 next = pgd_addr_end(addr, end);
308 if (pgd_none_or_clear_bad(pgd))
309 continue;
310 if (check_pud_range(vma, pgd, addr, next, nodes,
311 flags, private))
312 return -EIO;
313 } while (pgd++, addr = next, addr != end);
314 return 0;
317 /* Check if a vma is migratable */
318 static inline int vma_migratable(struct vm_area_struct *vma)
320 if (vma->vm_flags & (
321 VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
322 return 0;
323 return 1;
327 * Check if all pages in a range are on a set of nodes.
328 * If pagelist != NULL then isolate pages from the LRU and
329 * put them on the pagelist.
331 static struct vm_area_struct *
332 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
333 const nodemask_t *nodes, unsigned long flags, void *private)
335 int err;
336 struct vm_area_struct *first, *vma, *prev;
338 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
340 err = migrate_prep();
341 if (err)
342 return ERR_PTR(err);
345 first = find_vma(mm, start);
346 if (!first)
347 return ERR_PTR(-EFAULT);
348 prev = NULL;
349 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
350 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
351 if (!vma->vm_next && vma->vm_end < end)
352 return ERR_PTR(-EFAULT);
353 if (prev && prev->vm_end < vma->vm_start)
354 return ERR_PTR(-EFAULT);
356 if (!is_vm_hugetlb_page(vma) &&
357 ((flags & MPOL_MF_STRICT) ||
358 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
359 vma_migratable(vma)))) {
360 unsigned long endvma = vma->vm_end;
362 if (endvma > end)
363 endvma = end;
364 if (vma->vm_start > start)
365 start = vma->vm_start;
366 err = check_pgd_range(vma, start, endvma, nodes,
367 flags, private);
368 if (err) {
369 first = ERR_PTR(err);
370 break;
373 prev = vma;
375 return first;
378 /* Apply policy to a single VMA */
379 static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
381 int err = 0;
382 struct mempolicy *old = vma->vm_policy;
384 PDprintk("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
385 vma->vm_start, vma->vm_end, vma->vm_pgoff,
386 vma->vm_ops, vma->vm_file,
387 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
389 if (vma->vm_ops && vma->vm_ops->set_policy)
390 err = vma->vm_ops->set_policy(vma, new);
391 if (!err) {
392 mpol_get(new);
393 vma->vm_policy = new;
394 mpol_free(old);
396 return err;
399 /* Step 2: apply policy to a range and do splits. */
400 static int mbind_range(struct vm_area_struct *vma, unsigned long start,
401 unsigned long end, struct mempolicy *new)
403 struct vm_area_struct *next;
404 int err;
406 err = 0;
407 for (; vma && vma->vm_start < end; vma = next) {
408 next = vma->vm_next;
409 if (vma->vm_start < start)
410 err = split_vma(vma->vm_mm, vma, start, 1);
411 if (!err && vma->vm_end > end)
412 err = split_vma(vma->vm_mm, vma, end, 0);
413 if (!err)
414 err = policy_vma(vma, new);
415 if (err)
416 break;
418 return err;
421 static int contextualize_policy(int mode, nodemask_t *nodes)
423 if (!nodes)
424 return 0;
426 cpuset_update_task_memory_state();
427 if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
428 return -EINVAL;
429 return mpol_check_policy(mode, nodes);
434 * Update task->flags PF_MEMPOLICY bit: set iff non-default
435 * mempolicy. Allows more rapid checking of this (combined perhaps
436 * with other PF_* flag bits) on memory allocation hot code paths.
438 * If called from outside this file, the task 'p' should -only- be
439 * a newly forked child not yet visible on the task list, because
440 * manipulating the task flags of a visible task is not safe.
442 * The above limitation is why this routine has the funny name
443 * mpol_fix_fork_child_flag().
445 * It is also safe to call this with a task pointer of current,
446 * which the static wrapper mpol_set_task_struct_flag() does,
447 * for use within this file.
450 void mpol_fix_fork_child_flag(struct task_struct *p)
452 if (p->mempolicy)
453 p->flags |= PF_MEMPOLICY;
454 else
455 p->flags &= ~PF_MEMPOLICY;
458 static void mpol_set_task_struct_flag(void)
460 mpol_fix_fork_child_flag(current);
463 /* Set the process memory policy */
464 long do_set_mempolicy(int mode, nodemask_t *nodes)
466 struct mempolicy *new;
468 if (contextualize_policy(mode, nodes))
469 return -EINVAL;
470 new = mpol_new(mode, nodes);
471 if (IS_ERR(new))
472 return PTR_ERR(new);
473 mpol_free(current->mempolicy);
474 current->mempolicy = new;
475 mpol_set_task_struct_flag();
476 if (new && new->policy == MPOL_INTERLEAVE)
477 current->il_next = first_node(new->v.nodes);
478 return 0;
481 /* Fill a zone bitmap for a policy */
482 static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
484 int i;
486 nodes_clear(*nodes);
487 switch (p->policy) {
488 case MPOL_BIND:
489 for (i = 0; p->v.zonelist->zones[i]; i++)
490 node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id,
491 *nodes);
492 break;
493 case MPOL_DEFAULT:
494 break;
495 case MPOL_INTERLEAVE:
496 *nodes = p->v.nodes;
497 break;
498 case MPOL_PREFERRED:
499 /* or use current node instead of online map? */
500 if (p->v.preferred_node < 0)
501 *nodes = node_online_map;
502 else
503 node_set(p->v.preferred_node, *nodes);
504 break;
505 default:
506 BUG();
510 static int lookup_node(struct mm_struct *mm, unsigned long addr)
512 struct page *p;
513 int err;
515 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
516 if (err >= 0) {
517 err = page_to_nid(p);
518 put_page(p);
520 return err;
523 /* Retrieve NUMA policy */
524 long do_get_mempolicy(int *policy, nodemask_t *nmask,
525 unsigned long addr, unsigned long flags)
527 int err;
528 struct mm_struct *mm = current->mm;
529 struct vm_area_struct *vma = NULL;
530 struct mempolicy *pol = current->mempolicy;
532 cpuset_update_task_memory_state();
533 if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR))
534 return -EINVAL;
535 if (flags & MPOL_F_ADDR) {
536 down_read(&mm->mmap_sem);
537 vma = find_vma_intersection(mm, addr, addr+1);
538 if (!vma) {
539 up_read(&mm->mmap_sem);
540 return -EFAULT;
542 if (vma->vm_ops && vma->vm_ops->get_policy)
543 pol = vma->vm_ops->get_policy(vma, addr);
544 else
545 pol = vma->vm_policy;
546 } else if (addr)
547 return -EINVAL;
549 if (!pol)
550 pol = &default_policy;
552 if (flags & MPOL_F_NODE) {
553 if (flags & MPOL_F_ADDR) {
554 err = lookup_node(mm, addr);
555 if (err < 0)
556 goto out;
557 *policy = err;
558 } else if (pol == current->mempolicy &&
559 pol->policy == MPOL_INTERLEAVE) {
560 *policy = current->il_next;
561 } else {
562 err = -EINVAL;
563 goto out;
565 } else
566 *policy = pol->policy;
568 if (vma) {
569 up_read(&current->mm->mmap_sem);
570 vma = NULL;
573 err = 0;
574 if (nmask)
575 get_zonemask(pol, nmask);
577 out:
578 if (vma)
579 up_read(&current->mm->mmap_sem);
580 return err;
583 #ifdef CONFIG_MIGRATION
585 * page migration
587 static void migrate_page_add(struct page *page, struct list_head *pagelist,
588 unsigned long flags)
591 * Avoid migrating a page that is shared with others.
593 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
594 isolate_lru_page(page, pagelist);
597 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
599 return alloc_pages_node(node, GFP_HIGHUSER, 0);
603 * Migrate pages from one node to a target node.
604 * Returns error or the number of pages not migrated.
606 int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags)
608 nodemask_t nmask;
609 LIST_HEAD(pagelist);
610 int err = 0;
612 nodes_clear(nmask);
613 node_set(source, nmask);
615 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
616 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
618 if (!list_empty(&pagelist))
619 err = migrate_pages(&pagelist, new_node_page, dest);
621 return err;
625 * Move pages between the two nodesets so as to preserve the physical
626 * layout as much as possible.
628 * Returns the number of page that could not be moved.
630 int do_migrate_pages(struct mm_struct *mm,
631 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
633 LIST_HEAD(pagelist);
634 int busy = 0;
635 int err = 0;
636 nodemask_t tmp;
638 down_read(&mm->mmap_sem);
640 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
641 if (err)
642 goto out;
645 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
646 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
647 * bit in 'tmp', and return that <source, dest> pair for migration.
648 * The pair of nodemasks 'to' and 'from' define the map.
650 * If no pair of bits is found that way, fallback to picking some
651 * pair of 'source' and 'dest' bits that are not the same. If the
652 * 'source' and 'dest' bits are the same, this represents a node
653 * that will be migrating to itself, so no pages need move.
655 * If no bits are left in 'tmp', or if all remaining bits left
656 * in 'tmp' correspond to the same bit in 'to', return false
657 * (nothing left to migrate).
659 * This lets us pick a pair of nodes to migrate between, such that
660 * if possible the dest node is not already occupied by some other
661 * source node, minimizing the risk of overloading the memory on a
662 * node that would happen if we migrated incoming memory to a node
663 * before migrating outgoing memory source that same node.
665 * A single scan of tmp is sufficient. As we go, we remember the
666 * most recent <s, d> pair that moved (s != d). If we find a pair
667 * that not only moved, but what's better, moved to an empty slot
668 * (d is not set in tmp), then we break out then, with that pair.
669 * Otherwise when we finish scannng from_tmp, we at least have the
670 * most recent <s, d> pair that moved. If we get all the way through
671 * the scan of tmp without finding any node that moved, much less
672 * moved to an empty node, then there is nothing left worth migrating.
675 tmp = *from_nodes;
676 while (!nodes_empty(tmp)) {
677 int s,d;
678 int source = -1;
679 int dest = 0;
681 for_each_node_mask(s, tmp) {
682 d = node_remap(s, *from_nodes, *to_nodes);
683 if (s == d)
684 continue;
686 source = s; /* Node moved. Memorize */
687 dest = d;
689 /* dest not in remaining from nodes? */
690 if (!node_isset(dest, tmp))
691 break;
693 if (source == -1)
694 break;
696 node_clear(source, tmp);
697 err = migrate_to_node(mm, source, dest, flags);
698 if (err > 0)
699 busy += err;
700 if (err < 0)
701 break;
703 out:
704 up_read(&mm->mmap_sem);
705 if (err < 0)
706 return err;
707 return busy;
711 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
713 struct vm_area_struct *vma = (struct vm_area_struct *)private;
715 return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma));
717 #else
719 static void migrate_page_add(struct page *page, struct list_head *pagelist,
720 unsigned long flags)
724 int do_migrate_pages(struct mm_struct *mm,
725 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
727 return -ENOSYS;
730 static struct page *new_vma_page(struct page *page, unsigned long private)
732 return NULL;
734 #endif
736 long do_mbind(unsigned long start, unsigned long len,
737 unsigned long mode, nodemask_t *nmask, unsigned long flags)
739 struct vm_area_struct *vma;
740 struct mm_struct *mm = current->mm;
741 struct mempolicy *new;
742 unsigned long end;
743 int err;
744 LIST_HEAD(pagelist);
746 if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
747 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
748 || mode > MPOL_MAX)
749 return -EINVAL;
750 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
751 return -EPERM;
753 if (start & ~PAGE_MASK)
754 return -EINVAL;
756 if (mode == MPOL_DEFAULT)
757 flags &= ~MPOL_MF_STRICT;
759 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
760 end = start + len;
762 if (end < start)
763 return -EINVAL;
764 if (end == start)
765 return 0;
767 if (mpol_check_policy(mode, nmask))
768 return -EINVAL;
770 new = mpol_new(mode, nmask);
771 if (IS_ERR(new))
772 return PTR_ERR(new);
775 * If we are using the default policy then operation
776 * on discontinuous address spaces is okay after all
778 if (!new)
779 flags |= MPOL_MF_DISCONTIG_OK;
781 PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
782 mode,nodes_addr(nodes)[0]);
784 down_write(&mm->mmap_sem);
785 vma = check_range(mm, start, end, nmask,
786 flags | MPOL_MF_INVERT, &pagelist);
788 err = PTR_ERR(vma);
789 if (!IS_ERR(vma)) {
790 int nr_failed = 0;
792 err = mbind_range(vma, start, end, new);
794 if (!list_empty(&pagelist))
795 nr_failed = migrate_pages(&pagelist, new_vma_page,
796 (unsigned long)vma);
798 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
799 err = -EIO;
802 up_write(&mm->mmap_sem);
803 mpol_free(new);
804 return err;
808 * User space interface with variable sized bitmaps for nodelists.
811 /* Copy a node mask from user space. */
812 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
813 unsigned long maxnode)
815 unsigned long k;
816 unsigned long nlongs;
817 unsigned long endmask;
819 --maxnode;
820 nodes_clear(*nodes);
821 if (maxnode == 0 || !nmask)
822 return 0;
823 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
824 return -EINVAL;
826 nlongs = BITS_TO_LONGS(maxnode);
827 if ((maxnode % BITS_PER_LONG) == 0)
828 endmask = ~0UL;
829 else
830 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
832 /* When the user specified more nodes than supported just check
833 if the non supported part is all zero. */
834 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
835 if (nlongs > PAGE_SIZE/sizeof(long))
836 return -EINVAL;
837 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
838 unsigned long t;
839 if (get_user(t, nmask + k))
840 return -EFAULT;
841 if (k == nlongs - 1) {
842 if (t & endmask)
843 return -EINVAL;
844 } else if (t)
845 return -EINVAL;
847 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
848 endmask = ~0UL;
851 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
852 return -EFAULT;
853 nodes_addr(*nodes)[nlongs-1] &= endmask;
854 return 0;
857 /* Copy a kernel node mask to user space */
858 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
859 nodemask_t *nodes)
861 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
862 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
864 if (copy > nbytes) {
865 if (copy > PAGE_SIZE)
866 return -EINVAL;
867 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
868 return -EFAULT;
869 copy = nbytes;
871 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
874 asmlinkage long sys_mbind(unsigned long start, unsigned long len,
875 unsigned long mode,
876 unsigned long __user *nmask, unsigned long maxnode,
877 unsigned flags)
879 nodemask_t nodes;
880 int err;
882 err = get_nodes(&nodes, nmask, maxnode);
883 if (err)
884 return err;
885 return do_mbind(start, len, mode, &nodes, flags);
888 /* Set the process memory policy */
889 asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
890 unsigned long maxnode)
892 int err;
893 nodemask_t nodes;
895 if (mode < 0 || mode > MPOL_MAX)
896 return -EINVAL;
897 err = get_nodes(&nodes, nmask, maxnode);
898 if (err)
899 return err;
900 return do_set_mempolicy(mode, &nodes);
903 asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
904 const unsigned long __user *old_nodes,
905 const unsigned long __user *new_nodes)
907 struct mm_struct *mm;
908 struct task_struct *task;
909 nodemask_t old;
910 nodemask_t new;
911 nodemask_t task_nodes;
912 int err;
914 err = get_nodes(&old, old_nodes, maxnode);
915 if (err)
916 return err;
918 err = get_nodes(&new, new_nodes, maxnode);
919 if (err)
920 return err;
922 /* Find the mm_struct */
923 read_lock(&tasklist_lock);
924 task = pid ? find_task_by_pid(pid) : current;
925 if (!task) {
926 read_unlock(&tasklist_lock);
927 return -ESRCH;
929 mm = get_task_mm(task);
930 read_unlock(&tasklist_lock);
932 if (!mm)
933 return -EINVAL;
936 * Check if this process has the right to modify the specified
937 * process. The right exists if the process has administrative
938 * capabilities, superuser privileges or the same
939 * userid as the target process.
941 if ((current->euid != task->suid) && (current->euid != task->uid) &&
942 (current->uid != task->suid) && (current->uid != task->uid) &&
943 !capable(CAP_SYS_NICE)) {
944 err = -EPERM;
945 goto out;
948 task_nodes = cpuset_mems_allowed(task);
949 /* Is the user allowed to access the target nodes? */
950 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
951 err = -EPERM;
952 goto out;
955 err = security_task_movememory(task);
956 if (err)
957 goto out;
959 err = do_migrate_pages(mm, &old, &new,
960 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
961 out:
962 mmput(mm);
963 return err;
967 /* Retrieve NUMA policy */
968 asmlinkage long sys_get_mempolicy(int __user *policy,
969 unsigned long __user *nmask,
970 unsigned long maxnode,
971 unsigned long addr, unsigned long flags)
973 int err, pval;
974 nodemask_t nodes;
976 if (nmask != NULL && maxnode < MAX_NUMNODES)
977 return -EINVAL;
979 err = do_get_mempolicy(&pval, &nodes, addr, flags);
981 if (err)
982 return err;
984 if (policy && put_user(pval, policy))
985 return -EFAULT;
987 if (nmask)
988 err = copy_nodes_to_user(nmask, maxnode, &nodes);
990 return err;
993 #ifdef CONFIG_COMPAT
995 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
996 compat_ulong_t __user *nmask,
997 compat_ulong_t maxnode,
998 compat_ulong_t addr, compat_ulong_t flags)
1000 long err;
1001 unsigned long __user *nm = NULL;
1002 unsigned long nr_bits, alloc_size;
1003 DECLARE_BITMAP(bm, MAX_NUMNODES);
1005 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1006 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1008 if (nmask)
1009 nm = compat_alloc_user_space(alloc_size);
1011 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1013 if (!err && nmask) {
1014 err = copy_from_user(bm, nm, alloc_size);
1015 /* ensure entire bitmap is zeroed */
1016 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1017 err |= compat_put_bitmap(nmask, bm, nr_bits);
1020 return err;
1023 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1024 compat_ulong_t maxnode)
1026 long err = 0;
1027 unsigned long __user *nm = NULL;
1028 unsigned long nr_bits, alloc_size;
1029 DECLARE_BITMAP(bm, MAX_NUMNODES);
1031 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1032 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1034 if (nmask) {
1035 err = compat_get_bitmap(bm, nmask, nr_bits);
1036 nm = compat_alloc_user_space(alloc_size);
1037 err |= copy_to_user(nm, bm, alloc_size);
1040 if (err)
1041 return -EFAULT;
1043 return sys_set_mempolicy(mode, nm, nr_bits+1);
1046 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1047 compat_ulong_t mode, compat_ulong_t __user *nmask,
1048 compat_ulong_t maxnode, compat_ulong_t flags)
1050 long err = 0;
1051 unsigned long __user *nm = NULL;
1052 unsigned long nr_bits, alloc_size;
1053 nodemask_t bm;
1055 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1056 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1058 if (nmask) {
1059 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1060 nm = compat_alloc_user_space(alloc_size);
1061 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1064 if (err)
1065 return -EFAULT;
1067 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1070 #endif
1072 /* Return effective policy for a VMA */
1073 static struct mempolicy * get_vma_policy(struct task_struct *task,
1074 struct vm_area_struct *vma, unsigned long addr)
1076 struct mempolicy *pol = task->mempolicy;
1078 if (vma) {
1079 if (vma->vm_ops && vma->vm_ops->get_policy)
1080 pol = vma->vm_ops->get_policy(vma, addr);
1081 else if (vma->vm_policy &&
1082 vma->vm_policy->policy != MPOL_DEFAULT)
1083 pol = vma->vm_policy;
1085 if (!pol)
1086 pol = &default_policy;
1087 return pol;
1090 /* Return a zonelist representing a mempolicy */
1091 static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
1093 int nd;
1095 switch (policy->policy) {
1096 case MPOL_PREFERRED:
1097 nd = policy->v.preferred_node;
1098 if (nd < 0)
1099 nd = numa_node_id();
1100 break;
1101 case MPOL_BIND:
1102 /* Lower zones don't get a policy applied */
1103 /* Careful: current->mems_allowed might have moved */
1104 if (gfp_zone(gfp) >= policy_zone)
1105 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
1106 return policy->v.zonelist;
1107 /*FALL THROUGH*/
1108 case MPOL_INTERLEAVE: /* should not happen */
1109 case MPOL_DEFAULT:
1110 nd = numa_node_id();
1111 break;
1112 default:
1113 nd = 0;
1114 BUG();
1116 return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
1119 /* Do dynamic interleaving for a process */
1120 static unsigned interleave_nodes(struct mempolicy *policy)
1122 unsigned nid, next;
1123 struct task_struct *me = current;
1125 nid = me->il_next;
1126 next = next_node(nid, policy->v.nodes);
1127 if (next >= MAX_NUMNODES)
1128 next = first_node(policy->v.nodes);
1129 me->il_next = next;
1130 return nid;
1134 * Depending on the memory policy provide a node from which to allocate the
1135 * next slab entry.
1137 unsigned slab_node(struct mempolicy *policy)
1139 switch (policy->policy) {
1140 case MPOL_INTERLEAVE:
1141 return interleave_nodes(policy);
1143 case MPOL_BIND:
1145 * Follow bind policy behavior and start allocation at the
1146 * first node.
1148 return policy->v.zonelist->zones[0]->zone_pgdat->node_id;
1150 case MPOL_PREFERRED:
1151 if (policy->v.preferred_node >= 0)
1152 return policy->v.preferred_node;
1153 /* Fall through */
1155 default:
1156 return numa_node_id();
1160 /* Do static interleaving for a VMA with known offset. */
1161 static unsigned offset_il_node(struct mempolicy *pol,
1162 struct vm_area_struct *vma, unsigned long off)
1164 unsigned nnodes = nodes_weight(pol->v.nodes);
1165 unsigned target = (unsigned)off % nnodes;
1166 int c;
1167 int nid = -1;
1169 c = 0;
1170 do {
1171 nid = next_node(nid, pol->v.nodes);
1172 c++;
1173 } while (c <= target);
1174 return nid;
1177 /* Determine a node number for interleave */
1178 static inline unsigned interleave_nid(struct mempolicy *pol,
1179 struct vm_area_struct *vma, unsigned long addr, int shift)
1181 if (vma) {
1182 unsigned long off;
1185 * for small pages, there is no difference between
1186 * shift and PAGE_SHIFT, so the bit-shift is safe.
1187 * for huge pages, since vm_pgoff is in units of small
1188 * pages, we need to shift off the always 0 bits to get
1189 * a useful offset.
1191 BUG_ON(shift < PAGE_SHIFT);
1192 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1193 off += (addr - vma->vm_start) >> shift;
1194 return offset_il_node(pol, vma, off);
1195 } else
1196 return interleave_nodes(pol);
1199 #ifdef CONFIG_HUGETLBFS
1200 /* Return a zonelist suitable for a huge page allocation. */
1201 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
1203 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1205 if (pol->policy == MPOL_INTERLEAVE) {
1206 unsigned nid;
1208 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1209 return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER);
1211 return zonelist_policy(GFP_HIGHUSER, pol);
1213 #endif
1215 /* Allocate a page in interleaved policy.
1216 Own path because it needs to do special accounting. */
1217 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1218 unsigned nid)
1220 struct zonelist *zl;
1221 struct page *page;
1223 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
1224 page = __alloc_pages(gfp, order, zl);
1225 if (page && page_zone(page) == zl->zones[0])
1226 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1227 return page;
1231 * alloc_page_vma - Allocate a page for a VMA.
1233 * @gfp:
1234 * %GFP_USER user allocation.
1235 * %GFP_KERNEL kernel allocations,
1236 * %GFP_HIGHMEM highmem/user allocations,
1237 * %GFP_FS allocation should not call back into a file system.
1238 * %GFP_ATOMIC don't sleep.
1240 * @vma: Pointer to VMA or NULL if not available.
1241 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1243 * This function allocates a page from the kernel page pool and applies
1244 * a NUMA policy associated with the VMA or the current process.
1245 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1246 * mm_struct of the VMA to prevent it from going away. Should be used for
1247 * all allocations for pages that will be mapped into
1248 * user space. Returns NULL when no page can be allocated.
1250 * Should be called with the mm_sem of the vma hold.
1252 struct page *
1253 alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1255 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1257 cpuset_update_task_memory_state();
1259 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1260 unsigned nid;
1262 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1263 return alloc_page_interleave(gfp, 0, nid);
1265 return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol));
1269 * alloc_pages_current - Allocate pages.
1271 * @gfp:
1272 * %GFP_USER user allocation,
1273 * %GFP_KERNEL kernel allocation,
1274 * %GFP_HIGHMEM highmem allocation,
1275 * %GFP_FS don't call back into a file system.
1276 * %GFP_ATOMIC don't sleep.
1277 * @order: Power of two of allocation size in pages. 0 is a single page.
1279 * Allocate a page from the kernel page pool. When not in
1280 * interrupt context and apply the current process NUMA policy.
1281 * Returns NULL when no page can be allocated.
1283 * Don't call cpuset_update_task_memory_state() unless
1284 * 1) it's ok to take cpuset_sem (can WAIT), and
1285 * 2) allocating for current task (not interrupt).
1287 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1289 struct mempolicy *pol = current->mempolicy;
1291 if ((gfp & __GFP_WAIT) && !in_interrupt())
1292 cpuset_update_task_memory_state();
1293 if (!pol || in_interrupt())
1294 pol = &default_policy;
1295 if (pol->policy == MPOL_INTERLEAVE)
1296 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1297 return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
1299 EXPORT_SYMBOL(alloc_pages_current);
1302 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1303 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1304 * with the mems_allowed returned by cpuset_mems_allowed(). This
1305 * keeps mempolicies cpuset relative after its cpuset moves. See
1306 * further kernel/cpuset.c update_nodemask().
1308 void *cpuset_being_rebound;
1310 /* Slow path of a mempolicy copy */
1311 struct mempolicy *__mpol_copy(struct mempolicy *old)
1313 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1315 if (!new)
1316 return ERR_PTR(-ENOMEM);
1317 if (current_cpuset_is_being_rebound()) {
1318 nodemask_t mems = cpuset_mems_allowed(current);
1319 mpol_rebind_policy(old, &mems);
1321 *new = *old;
1322 atomic_set(&new->refcnt, 1);
1323 if (new->policy == MPOL_BIND) {
1324 int sz = ksize(old->v.zonelist);
1325 new->v.zonelist = kmalloc(sz, SLAB_KERNEL);
1326 if (!new->v.zonelist) {
1327 kmem_cache_free(policy_cache, new);
1328 return ERR_PTR(-ENOMEM);
1330 memcpy(new->v.zonelist, old->v.zonelist, sz);
1332 return new;
1335 /* Slow path of a mempolicy comparison */
1336 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1338 if (!a || !b)
1339 return 0;
1340 if (a->policy != b->policy)
1341 return 0;
1342 switch (a->policy) {
1343 case MPOL_DEFAULT:
1344 return 1;
1345 case MPOL_INTERLEAVE:
1346 return nodes_equal(a->v.nodes, b->v.nodes);
1347 case MPOL_PREFERRED:
1348 return a->v.preferred_node == b->v.preferred_node;
1349 case MPOL_BIND: {
1350 int i;
1351 for (i = 0; a->v.zonelist->zones[i]; i++)
1352 if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
1353 return 0;
1354 return b->v.zonelist->zones[i] == NULL;
1356 default:
1357 BUG();
1358 return 0;
1362 /* Slow path of a mpol destructor. */
1363 void __mpol_free(struct mempolicy *p)
1365 if (!atomic_dec_and_test(&p->refcnt))
1366 return;
1367 if (p->policy == MPOL_BIND)
1368 kfree(p->v.zonelist);
1369 p->policy = MPOL_DEFAULT;
1370 kmem_cache_free(policy_cache, p);
1374 * Shared memory backing store policy support.
1376 * Remember policies even when nobody has shared memory mapped.
1377 * The policies are kept in Red-Black tree linked from the inode.
1378 * They are protected by the sp->lock spinlock, which should be held
1379 * for any accesses to the tree.
1382 /* lookup first element intersecting start-end */
1383 /* Caller holds sp->lock */
1384 static struct sp_node *
1385 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1387 struct rb_node *n = sp->root.rb_node;
1389 while (n) {
1390 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1392 if (start >= p->end)
1393 n = n->rb_right;
1394 else if (end <= p->start)
1395 n = n->rb_left;
1396 else
1397 break;
1399 if (!n)
1400 return NULL;
1401 for (;;) {
1402 struct sp_node *w = NULL;
1403 struct rb_node *prev = rb_prev(n);
1404 if (!prev)
1405 break;
1406 w = rb_entry(prev, struct sp_node, nd);
1407 if (w->end <= start)
1408 break;
1409 n = prev;
1411 return rb_entry(n, struct sp_node, nd);
1414 /* Insert a new shared policy into the list. */
1415 /* Caller holds sp->lock */
1416 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1418 struct rb_node **p = &sp->root.rb_node;
1419 struct rb_node *parent = NULL;
1420 struct sp_node *nd;
1422 while (*p) {
1423 parent = *p;
1424 nd = rb_entry(parent, struct sp_node, nd);
1425 if (new->start < nd->start)
1426 p = &(*p)->rb_left;
1427 else if (new->end > nd->end)
1428 p = &(*p)->rb_right;
1429 else
1430 BUG();
1432 rb_link_node(&new->nd, parent, p);
1433 rb_insert_color(&new->nd, &sp->root);
1434 PDprintk("inserting %lx-%lx: %d\n", new->start, new->end,
1435 new->policy ? new->policy->policy : 0);
1438 /* Find shared policy intersecting idx */
1439 struct mempolicy *
1440 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1442 struct mempolicy *pol = NULL;
1443 struct sp_node *sn;
1445 if (!sp->root.rb_node)
1446 return NULL;
1447 spin_lock(&sp->lock);
1448 sn = sp_lookup(sp, idx, idx+1);
1449 if (sn) {
1450 mpol_get(sn->policy);
1451 pol = sn->policy;
1453 spin_unlock(&sp->lock);
1454 return pol;
1457 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1459 PDprintk("deleting %lx-l%x\n", n->start, n->end);
1460 rb_erase(&n->nd, &sp->root);
1461 mpol_free(n->policy);
1462 kmem_cache_free(sn_cache, n);
1465 struct sp_node *
1466 sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol)
1468 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1470 if (!n)
1471 return NULL;
1472 n->start = start;
1473 n->end = end;
1474 mpol_get(pol);
1475 n->policy = pol;
1476 return n;
1479 /* Replace a policy range. */
1480 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1481 unsigned long end, struct sp_node *new)
1483 struct sp_node *n, *new2 = NULL;
1485 restart:
1486 spin_lock(&sp->lock);
1487 n = sp_lookup(sp, start, end);
1488 /* Take care of old policies in the same range. */
1489 while (n && n->start < end) {
1490 struct rb_node *next = rb_next(&n->nd);
1491 if (n->start >= start) {
1492 if (n->end <= end)
1493 sp_delete(sp, n);
1494 else
1495 n->start = end;
1496 } else {
1497 /* Old policy spanning whole new range. */
1498 if (n->end > end) {
1499 if (!new2) {
1500 spin_unlock(&sp->lock);
1501 new2 = sp_alloc(end, n->end, n->policy);
1502 if (!new2)
1503 return -ENOMEM;
1504 goto restart;
1506 n->end = start;
1507 sp_insert(sp, new2);
1508 new2 = NULL;
1509 break;
1510 } else
1511 n->end = start;
1513 if (!next)
1514 break;
1515 n = rb_entry(next, struct sp_node, nd);
1517 if (new)
1518 sp_insert(sp, new);
1519 spin_unlock(&sp->lock);
1520 if (new2) {
1521 mpol_free(new2->policy);
1522 kmem_cache_free(sn_cache, new2);
1524 return 0;
1527 void mpol_shared_policy_init(struct shared_policy *info, int policy,
1528 nodemask_t *policy_nodes)
1530 info->root = RB_ROOT;
1531 spin_lock_init(&info->lock);
1533 if (policy != MPOL_DEFAULT) {
1534 struct mempolicy *newpol;
1536 /* Falls back to MPOL_DEFAULT on any error */
1537 newpol = mpol_new(policy, policy_nodes);
1538 if (!IS_ERR(newpol)) {
1539 /* Create pseudo-vma that contains just the policy */
1540 struct vm_area_struct pvma;
1542 memset(&pvma, 0, sizeof(struct vm_area_struct));
1543 /* Policy covers entire file */
1544 pvma.vm_end = TASK_SIZE;
1545 mpol_set_shared_policy(info, &pvma, newpol);
1546 mpol_free(newpol);
1551 int mpol_set_shared_policy(struct shared_policy *info,
1552 struct vm_area_struct *vma, struct mempolicy *npol)
1554 int err;
1555 struct sp_node *new = NULL;
1556 unsigned long sz = vma_pages(vma);
1558 PDprintk("set_shared_policy %lx sz %lu %d %lx\n",
1559 vma->vm_pgoff,
1560 sz, npol? npol->policy : -1,
1561 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1563 if (npol) {
1564 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1565 if (!new)
1566 return -ENOMEM;
1568 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1569 if (err && new)
1570 kmem_cache_free(sn_cache, new);
1571 return err;
1574 /* Free a backing policy store on inode delete. */
1575 void mpol_free_shared_policy(struct shared_policy *p)
1577 struct sp_node *n;
1578 struct rb_node *next;
1580 if (!p->root.rb_node)
1581 return;
1582 spin_lock(&p->lock);
1583 next = rb_first(&p->root);
1584 while (next) {
1585 n = rb_entry(next, struct sp_node, nd);
1586 next = rb_next(&n->nd);
1587 rb_erase(&n->nd, &p->root);
1588 mpol_free(n->policy);
1589 kmem_cache_free(sn_cache, n);
1591 spin_unlock(&p->lock);
1594 /* assumes fs == KERNEL_DS */
1595 void __init numa_policy_init(void)
1597 policy_cache = kmem_cache_create("numa_policy",
1598 sizeof(struct mempolicy),
1599 0, SLAB_PANIC, NULL, NULL);
1601 sn_cache = kmem_cache_create("shared_policy_node",
1602 sizeof(struct sp_node),
1603 0, SLAB_PANIC, NULL, NULL);
1605 /* Set interleaving policy for system init. This way not all
1606 the data structures allocated at system boot end up in node zero. */
1608 if (do_set_mempolicy(MPOL_INTERLEAVE, &node_online_map))
1609 printk("numa_policy_init: interleaving failed\n");
1612 /* Reset policy of current process to default */
1613 void numa_default_policy(void)
1615 do_set_mempolicy(MPOL_DEFAULT, NULL);
1618 /* Migrate a policy to a different set of nodes */
1619 void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1621 nodemask_t *mpolmask;
1622 nodemask_t tmp;
1624 if (!pol)
1625 return;
1626 mpolmask = &pol->cpuset_mems_allowed;
1627 if (nodes_equal(*mpolmask, *newmask))
1628 return;
1630 switch (pol->policy) {
1631 case MPOL_DEFAULT:
1632 break;
1633 case MPOL_INTERLEAVE:
1634 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
1635 pol->v.nodes = tmp;
1636 *mpolmask = *newmask;
1637 current->il_next = node_remap(current->il_next,
1638 *mpolmask, *newmask);
1639 break;
1640 case MPOL_PREFERRED:
1641 pol->v.preferred_node = node_remap(pol->v.preferred_node,
1642 *mpolmask, *newmask);
1643 *mpolmask = *newmask;
1644 break;
1645 case MPOL_BIND: {
1646 nodemask_t nodes;
1647 struct zone **z;
1648 struct zonelist *zonelist;
1650 nodes_clear(nodes);
1651 for (z = pol->v.zonelist->zones; *z; z++)
1652 node_set((*z)->zone_pgdat->node_id, nodes);
1653 nodes_remap(tmp, nodes, *mpolmask, *newmask);
1654 nodes = tmp;
1656 zonelist = bind_zonelist(&nodes);
1658 /* If no mem, then zonelist is NULL and we keep old zonelist.
1659 * If that old zonelist has no remaining mems_allowed nodes,
1660 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
1663 if (zonelist) {
1664 /* Good - got mem - substitute new zonelist */
1665 kfree(pol->v.zonelist);
1666 pol->v.zonelist = zonelist;
1668 *mpolmask = *newmask;
1669 break;
1671 default:
1672 BUG();
1673 break;
1678 * Wrapper for mpol_rebind_policy() that just requires task
1679 * pointer, and updates task mempolicy.
1682 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1684 mpol_rebind_policy(tsk->mempolicy, new);
1688 * Rebind each vma in mm to new nodemask.
1690 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1693 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1695 struct vm_area_struct *vma;
1697 down_write(&mm->mmap_sem);
1698 for (vma = mm->mmap; vma; vma = vma->vm_next)
1699 mpol_rebind_policy(vma->vm_policy, new);
1700 up_write(&mm->mmap_sem);
1704 * Display pages allocated per node and memory policy via /proc.
1707 static const char *policy_types[] = { "default", "prefer", "bind",
1708 "interleave" };
1711 * Convert a mempolicy into a string.
1712 * Returns the number of characters in buffer (if positive)
1713 * or an error (negative)
1715 static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1717 char *p = buffer;
1718 int l;
1719 nodemask_t nodes;
1720 int mode = pol ? pol->policy : MPOL_DEFAULT;
1722 switch (mode) {
1723 case MPOL_DEFAULT:
1724 nodes_clear(nodes);
1725 break;
1727 case MPOL_PREFERRED:
1728 nodes_clear(nodes);
1729 node_set(pol->v.preferred_node, nodes);
1730 break;
1732 case MPOL_BIND:
1733 get_zonemask(pol, &nodes);
1734 break;
1736 case MPOL_INTERLEAVE:
1737 nodes = pol->v.nodes;
1738 break;
1740 default:
1741 BUG();
1742 return -EFAULT;
1745 l = strlen(policy_types[mode]);
1746 if (buffer + maxlen < p + l + 1)
1747 return -ENOSPC;
1749 strcpy(p, policy_types[mode]);
1750 p += l;
1752 if (!nodes_empty(nodes)) {
1753 if (buffer + maxlen < p + 2)
1754 return -ENOSPC;
1755 *p++ = '=';
1756 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1758 return p - buffer;
1761 struct numa_maps {
1762 unsigned long pages;
1763 unsigned long anon;
1764 unsigned long active;
1765 unsigned long writeback;
1766 unsigned long mapcount_max;
1767 unsigned long dirty;
1768 unsigned long swapcache;
1769 unsigned long node[MAX_NUMNODES];
1772 static void gather_stats(struct page *page, void *private, int pte_dirty)
1774 struct numa_maps *md = private;
1775 int count = page_mapcount(page);
1777 md->pages++;
1778 if (pte_dirty || PageDirty(page))
1779 md->dirty++;
1781 if (PageSwapCache(page))
1782 md->swapcache++;
1784 if (PageActive(page))
1785 md->active++;
1787 if (PageWriteback(page))
1788 md->writeback++;
1790 if (PageAnon(page))
1791 md->anon++;
1793 if (count > md->mapcount_max)
1794 md->mapcount_max = count;
1796 md->node[page_to_nid(page)]++;
1799 #ifdef CONFIG_HUGETLB_PAGE
1800 static void check_huge_range(struct vm_area_struct *vma,
1801 unsigned long start, unsigned long end,
1802 struct numa_maps *md)
1804 unsigned long addr;
1805 struct page *page;
1807 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1808 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1809 pte_t pte;
1811 if (!ptep)
1812 continue;
1814 pte = *ptep;
1815 if (pte_none(pte))
1816 continue;
1818 page = pte_page(pte);
1819 if (!page)
1820 continue;
1822 gather_stats(page, md, pte_dirty(*ptep));
1825 #else
1826 static inline void check_huge_range(struct vm_area_struct *vma,
1827 unsigned long start, unsigned long end,
1828 struct numa_maps *md)
1831 #endif
1833 int show_numa_map(struct seq_file *m, void *v)
1835 struct proc_maps_private *priv = m->private;
1836 struct vm_area_struct *vma = v;
1837 struct numa_maps *md;
1838 struct file *file = vma->vm_file;
1839 struct mm_struct *mm = vma->vm_mm;
1840 int n;
1841 char buffer[50];
1843 if (!mm)
1844 return 0;
1846 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1847 if (!md)
1848 return 0;
1850 mpol_to_str(buffer, sizeof(buffer),
1851 get_vma_policy(priv->task, vma, vma->vm_start));
1853 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1855 if (file) {
1856 seq_printf(m, " file=");
1857 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n\t= ");
1858 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1859 seq_printf(m, " heap");
1860 } else if (vma->vm_start <= mm->start_stack &&
1861 vma->vm_end >= mm->start_stack) {
1862 seq_printf(m, " stack");
1865 if (is_vm_hugetlb_page(vma)) {
1866 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1867 seq_printf(m, " huge");
1868 } else {
1869 check_pgd_range(vma, vma->vm_start, vma->vm_end,
1870 &node_online_map, MPOL_MF_STATS, md);
1873 if (!md->pages)
1874 goto out;
1876 if (md->anon)
1877 seq_printf(m," anon=%lu",md->anon);
1879 if (md->dirty)
1880 seq_printf(m," dirty=%lu",md->dirty);
1882 if (md->pages != md->anon && md->pages != md->dirty)
1883 seq_printf(m, " mapped=%lu", md->pages);
1885 if (md->mapcount_max > 1)
1886 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1888 if (md->swapcache)
1889 seq_printf(m," swapcache=%lu", md->swapcache);
1891 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1892 seq_printf(m," active=%lu", md->active);
1894 if (md->writeback)
1895 seq_printf(m," writeback=%lu", md->writeback);
1897 for_each_online_node(n)
1898 if (md->node[n])
1899 seq_printf(m, " N%d=%lu", n, md->node[n]);
1900 out:
1901 seq_putc(m, '\n');
1902 kfree(md);
1904 if (m->count < m->size)
1905 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
1906 return 0;