2 * linux/mm/mmu_notifier.c
4 * Copyright (C) 2008 Qumranet, Inc.
5 * Copyright (C) 2008 SGI
6 * Christoph Lameter <clameter@sgi.com>
8 * This work is licensed under the terms of the GNU GPL, version 2. See
9 * the COPYING file in the top-level directory.
12 #include <linux/rculist.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/export.h>
16 #include <linux/err.h>
17 #include <linux/srcu.h>
18 #include <linux/rcupdate.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
22 /* global SRCU for all MMs */
23 struct srcu_struct srcu
;
26 * This function can't run concurrently against mmu_notifier_register
27 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
28 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
29 * in parallel despite there being no task using this mm any more,
30 * through the vmas outside of the exit_mmap context, such as with
31 * vmtruncate. This serializes against mmu_notifier_unregister with
32 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
33 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
34 * can't go away from under us as exit_mmap holds an mm_count pin
37 void __mmu_notifier_release(struct mm_struct
*mm
)
39 struct mmu_notifier
*mn
;
44 * RCU here will block mmu_notifier_unregister until
47 id
= srcu_read_lock(&srcu
);
48 hlist_for_each_entry_rcu(mn
, n
, &mm
->mmu_notifier_mm
->list
, hlist
)
50 * if ->release runs before mmu_notifier_unregister it
51 * must be handled as it's the only way for the driver
52 * to flush all existing sptes and stop the driver
53 * from establishing any more sptes before all the
54 * pages in the mm are freed.
57 mn
->ops
->release(mn
, mm
);
58 srcu_read_unlock(&srcu
, id
);
60 spin_lock(&mm
->mmu_notifier_mm
->lock
);
61 while (unlikely(!hlist_empty(&mm
->mmu_notifier_mm
->list
))) {
62 mn
= hlist_entry(mm
->mmu_notifier_mm
->list
.first
,
66 * We arrived before mmu_notifier_unregister so
67 * mmu_notifier_unregister will do nothing other than
68 * to wait ->release to finish and
69 * mmu_notifier_unregister to return.
71 hlist_del_init_rcu(&mn
->hlist
);
73 spin_unlock(&mm
->mmu_notifier_mm
->lock
);
76 * synchronize_srcu here prevents mmu_notifier_release to
77 * return to exit_mmap (which would proceed freeing all pages
78 * in the mm) until the ->release method returns, if it was
79 * invoked by mmu_notifier_unregister.
81 * The mmu_notifier_mm can't go away from under us because one
82 * mm_count is hold by exit_mmap.
84 synchronize_srcu(&srcu
);
88 * If no young bitflag is supported by the hardware, ->clear_flush_young can
89 * unmap the address and return 1 or 0 depending if the mapping previously
92 int __mmu_notifier_clear_flush_young(struct mm_struct
*mm
,
93 unsigned long address
)
95 struct mmu_notifier
*mn
;
99 id
= srcu_read_lock(&srcu
);
100 hlist_for_each_entry_rcu(mn
, n
, &mm
->mmu_notifier_mm
->list
, hlist
) {
101 if (mn
->ops
->clear_flush_young
)
102 young
|= mn
->ops
->clear_flush_young(mn
, mm
, address
);
104 srcu_read_unlock(&srcu
, id
);
109 int __mmu_notifier_test_young(struct mm_struct
*mm
,
110 unsigned long address
)
112 struct mmu_notifier
*mn
;
113 struct hlist_node
*n
;
116 id
= srcu_read_lock(&srcu
);
117 hlist_for_each_entry_rcu(mn
, n
, &mm
->mmu_notifier_mm
->list
, hlist
) {
118 if (mn
->ops
->test_young
) {
119 young
= mn
->ops
->test_young(mn
, mm
, address
);
124 srcu_read_unlock(&srcu
, id
);
129 void __mmu_notifier_change_pte(struct mm_struct
*mm
, unsigned long address
,
132 struct mmu_notifier
*mn
;
133 struct hlist_node
*n
;
136 id
= srcu_read_lock(&srcu
);
137 hlist_for_each_entry_rcu(mn
, n
, &mm
->mmu_notifier_mm
->list
, hlist
) {
138 if (mn
->ops
->change_pte
)
139 mn
->ops
->change_pte(mn
, mm
, address
, pte
);
141 * Some drivers don't have change_pte,
142 * so we must call invalidate_page in that case.
144 else if (mn
->ops
->invalidate_page
)
145 mn
->ops
->invalidate_page(mn
, mm
, address
);
147 srcu_read_unlock(&srcu
, id
);
150 void __mmu_notifier_invalidate_page(struct mm_struct
*mm
,
151 unsigned long address
)
153 struct mmu_notifier
*mn
;
154 struct hlist_node
*n
;
157 id
= srcu_read_lock(&srcu
);
158 hlist_for_each_entry_rcu(mn
, n
, &mm
->mmu_notifier_mm
->list
, hlist
) {
159 if (mn
->ops
->invalidate_page
)
160 mn
->ops
->invalidate_page(mn
, mm
, address
);
162 srcu_read_unlock(&srcu
, id
);
165 void __mmu_notifier_invalidate_range_start(struct mm_struct
*mm
,
166 unsigned long start
, unsigned long end
)
168 struct mmu_notifier
*mn
;
169 struct hlist_node
*n
;
172 id
= srcu_read_lock(&srcu
);
173 hlist_for_each_entry_rcu(mn
, n
, &mm
->mmu_notifier_mm
->list
, hlist
) {
174 if (mn
->ops
->invalidate_range_start
)
175 mn
->ops
->invalidate_range_start(mn
, mm
, start
, end
);
177 srcu_read_unlock(&srcu
, id
);
180 void __mmu_notifier_invalidate_range_end(struct mm_struct
*mm
,
181 unsigned long start
, unsigned long end
)
183 struct mmu_notifier
*mn
;
184 struct hlist_node
*n
;
187 id
= srcu_read_lock(&srcu
);
188 hlist_for_each_entry_rcu(mn
, n
, &mm
->mmu_notifier_mm
->list
, hlist
) {
189 if (mn
->ops
->invalidate_range_end
)
190 mn
->ops
->invalidate_range_end(mn
, mm
, start
, end
);
192 srcu_read_unlock(&srcu
, id
);
195 static int do_mmu_notifier_register(struct mmu_notifier
*mn
,
196 struct mm_struct
*mm
,
199 struct mmu_notifier_mm
*mmu_notifier_mm
;
202 BUG_ON(atomic_read(&mm
->mm_users
) <= 0);
205 * Verify that mmu_notifier_init() already run and the global srcu is
208 BUG_ON(!srcu
.per_cpu_ref
);
211 down_write(&mm
->mmap_sem
);
212 ret
= mm_take_all_locks(mm
);
216 if (!mm_has_notifiers(mm
)) {
217 mmu_notifier_mm
= kmalloc(sizeof(struct mmu_notifier_mm
),
219 if (unlikely(!mmu_notifier_mm
)) {
223 INIT_HLIST_HEAD(&mmu_notifier_mm
->list
);
224 spin_lock_init(&mmu_notifier_mm
->lock
);
226 mm
->mmu_notifier_mm
= mmu_notifier_mm
;
228 atomic_inc(&mm
->mm_count
);
231 * Serialize the update against mmu_notifier_unregister. A
232 * side note: mmu_notifier_release can't run concurrently with
233 * us because we hold the mm_users pin (either implicitly as
234 * current->mm or explicitly with get_task_mm() or similar).
235 * We can't race against any other mmu notifier method either
236 * thanks to mm_take_all_locks().
238 spin_lock(&mm
->mmu_notifier_mm
->lock
);
239 hlist_add_head(&mn
->hlist
, &mm
->mmu_notifier_mm
->list
);
240 spin_unlock(&mm
->mmu_notifier_mm
->lock
);
243 mm_drop_all_locks(mm
);
246 up_write(&mm
->mmap_sem
);
248 BUG_ON(atomic_read(&mm
->mm_users
) <= 0);
253 * Must not hold mmap_sem nor any other VM related lock when calling
254 * this registration function. Must also ensure mm_users can't go down
255 * to zero while this runs to avoid races with mmu_notifier_release,
256 * so mm has to be current->mm or the mm should be pinned safely such
257 * as with get_task_mm(). If the mm is not current->mm, the mm_users
258 * pin should be released by calling mmput after mmu_notifier_register
259 * returns. mmu_notifier_unregister must be always called to
260 * unregister the notifier. mm_count is automatically pinned to allow
261 * mmu_notifier_unregister to safely run at any time later, before or
262 * after exit_mmap. ->release will always be called before exit_mmap
265 int mmu_notifier_register(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
267 return do_mmu_notifier_register(mn
, mm
, 1);
269 EXPORT_SYMBOL_GPL(mmu_notifier_register
);
272 * Same as mmu_notifier_register but here the caller must hold the
273 * mmap_sem in write mode.
275 int __mmu_notifier_register(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
277 return do_mmu_notifier_register(mn
, mm
, 0);
279 EXPORT_SYMBOL_GPL(__mmu_notifier_register
);
281 /* this is called after the last mmu_notifier_unregister() returned */
282 void __mmu_notifier_mm_destroy(struct mm_struct
*mm
)
284 BUG_ON(!hlist_empty(&mm
->mmu_notifier_mm
->list
));
285 kfree(mm
->mmu_notifier_mm
);
286 mm
->mmu_notifier_mm
= LIST_POISON1
; /* debug */
290 * This releases the mm_count pin automatically and frees the mm
291 * structure if it was the last user of it. It serializes against
292 * running mmu notifiers with SRCU and against mmu_notifier_unregister
293 * with the unregister lock + SRCU. All sptes must be dropped before
294 * calling mmu_notifier_unregister. ->release or any other notifier
295 * method may be invoked concurrently with mmu_notifier_unregister,
296 * and only after mmu_notifier_unregister returned we're guaranteed
297 * that ->release or any other method can't run anymore.
299 void mmu_notifier_unregister(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
301 BUG_ON(atomic_read(&mm
->mm_count
) <= 0);
303 if (!hlist_unhashed(&mn
->hlist
)) {
305 * RCU here will force exit_mmap to wait ->release to finish
306 * before freeing the pages.
310 id
= srcu_read_lock(&srcu
);
312 * exit_mmap will block in mmu_notifier_release to
313 * guarantee ->release is called before freeing the
316 if (mn
->ops
->release
)
317 mn
->ops
->release(mn
, mm
);
318 srcu_read_unlock(&srcu
, id
);
320 spin_lock(&mm
->mmu_notifier_mm
->lock
);
321 hlist_del_rcu(&mn
->hlist
);
322 spin_unlock(&mm
->mmu_notifier_mm
->lock
);
326 * Wait any running method to finish, of course including
327 * ->release if it was run by mmu_notifier_relase instead of us.
329 synchronize_srcu(&srcu
);
331 BUG_ON(atomic_read(&mm
->mm_count
) <= 0);
335 EXPORT_SYMBOL_GPL(mmu_notifier_unregister
);
337 static int __init
mmu_notifier_init(void)
339 return init_srcu_struct(&srcu
);
342 module_init(mmu_notifier_init
);