sched: Improve sysbench performance by fixing spurious active migration
[linux-2.6/btrfs-unstable.git] / mm / vmacache.c
blob9f25af825dec6d929348e16db85a6c17ab00f31d
1 /*
2 * Copyright (C) 2014 Davidlohr Bueso.
3 */
4 #include <linux/sched.h>
5 #include <linux/mm.h>
6 #include <linux/vmacache.h>
8 /*
9 * Flush vma caches for threads that share a given mm.
11 * The operation is safe because the caller holds the mmap_sem
12 * exclusively and other threads accessing the vma cache will
13 * have mmap_sem held at least for read, so no extra locking
14 * is required to maintain the vma cache.
16 void vmacache_flush_all(struct mm_struct *mm)
18 struct task_struct *g, *p;
21 * Single threaded tasks need not iterate the entire
22 * list of process. We can avoid the flushing as well
23 * since the mm's seqnum was increased and don't have
24 * to worry about other threads' seqnum. Current's
25 * flush will occur upon the next lookup.
27 if (atomic_read(&mm->mm_users) == 1)
28 return;
30 rcu_read_lock();
31 for_each_process_thread(g, p) {
33 * Only flush the vmacache pointers as the
34 * mm seqnum is already set and curr's will
35 * be set upon invalidation when the next
36 * lookup is done.
38 if (mm == p->mm)
39 vmacache_flush(p);
41 rcu_read_unlock();
45 * This task may be accessing a foreign mm via (for example)
46 * get_user_pages()->find_vma(). The vmacache is task-local and this
47 * task's vmacache pertains to a different mm (ie, its own). There is
48 * nothing we can do here.
50 * Also handle the case where a kernel thread has adopted this mm via use_mm().
51 * That kernel thread's vmacache is not applicable to this mm.
53 static bool vmacache_valid_mm(struct mm_struct *mm)
55 return current->mm == mm && !(current->flags & PF_KTHREAD);
58 void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
60 if (vmacache_valid_mm(newvma->vm_mm))
61 current->vmacache[VMACACHE_HASH(addr)] = newvma;
64 static bool vmacache_valid(struct mm_struct *mm)
66 struct task_struct *curr;
68 if (!vmacache_valid_mm(mm))
69 return false;
71 curr = current;
72 if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
74 * First attempt will always be invalid, initialize
75 * the new cache for this task here.
77 curr->vmacache_seqnum = mm->vmacache_seqnum;
78 vmacache_flush(curr);
79 return false;
81 return true;
84 struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
86 int i;
88 if (!vmacache_valid(mm))
89 return NULL;
91 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
93 for (i = 0; i < VMACACHE_SIZE; i++) {
94 struct vm_area_struct *vma = current->vmacache[i];
96 if (!vma)
97 continue;
98 if (WARN_ON_ONCE(vma->vm_mm != mm))
99 break;
100 if (vma->vm_start <= addr && vma->vm_end > addr) {
101 count_vm_vmacache_event(VMACACHE_FIND_HITS);
102 return vma;
106 return NULL;
109 #ifndef CONFIG_MMU
110 struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
111 unsigned long start,
112 unsigned long end)
114 int i;
116 if (!vmacache_valid(mm))
117 return NULL;
119 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
121 for (i = 0; i < VMACACHE_SIZE; i++) {
122 struct vm_area_struct *vma = current->vmacache[i];
124 if (vma && vma->vm_start == start && vma->vm_end == end) {
125 count_vm_vmacache_event(VMACACHE_FIND_HITS);
126 return vma;
130 return NULL;
132 #endif