1 #include <linux/context_tracking.h>
2 #include <linux/rcupdate.h>
3 #include <linux/sched.h>
4 #include <linux/percpu.h>
5 #include <linux/hardirq.h>
7 struct context_tracking
{
9 * When active is false, hooks are not set to
10 * minimize overhead: TIF flags are cleared
11 * and calls to user_enter/exit are ignored. This
12 * may be further optimized using static keys.
21 static DEFINE_PER_CPU(struct context_tracking
, context_tracking
) = {
22 #ifdef CONFIG_CONTEXT_TRACKING_FORCE
32 * Some contexts may involve an exception occuring in an irq,
33 * leading to that nesting:
34 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
35 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
36 * helpers are enough to protect RCU uses inside the exception. So
37 * just return immediately if we detect we are in an IRQ.
42 WARN_ON_ONCE(!current
->mm
);
44 local_irq_save(flags
);
45 if (__this_cpu_read(context_tracking
.active
) &&
46 __this_cpu_read(context_tracking
.state
) != IN_USER
) {
47 __this_cpu_write(context_tracking
.state
, IN_USER
);
50 local_irq_restore(flags
);
58 * Some contexts may involve an exception occuring in an irq,
59 * leading to that nesting:
60 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
61 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
62 * helpers are enough to protect RCU uses inside the exception. So
63 * just return immediately if we detect we are in an IRQ.
68 local_irq_save(flags
);
69 if (__this_cpu_read(context_tracking
.state
) == IN_USER
) {
70 __this_cpu_write(context_tracking
.state
, IN_KERNEL
);
73 local_irq_restore(flags
);
76 void context_tracking_task_switch(struct task_struct
*prev
,
77 struct task_struct
*next
)
79 if (__this_cpu_read(context_tracking
.active
)) {
80 clear_tsk_thread_flag(prev
, TIF_NOHZ
);
81 set_tsk_thread_flag(next
, TIF_NOHZ
);