1 #ifndef __LINUX_PREEMPT_H
2 #define __LINUX_PREEMPT_H
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
9 #include <linux/thread_info.h>
10 #include <linux/linkage.h>
11 #include <linux/list.h>
13 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
14 extern void add_preempt_count(int val
);
15 extern void sub_preempt_count(int val
);
17 # define add_preempt_count(val) do { preempt_count() += (val); } while (0)
18 # define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
21 #define inc_preempt_count() add_preempt_count(1)
22 #define dec_preempt_count() sub_preempt_count(1)
24 #define preempt_count() (current_thread_info()->preempt_count)
28 asmlinkage
void preempt_schedule(void);
30 #define preempt_check_resched() \
32 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
36 #else /* !CONFIG_PREEMPT */
38 #define preempt_check_resched() do { } while (0)
40 #endif /* CONFIG_PREEMPT */
43 #ifdef CONFIG_PREEMPT_COUNT
45 #define preempt_disable() \
47 inc_preempt_count(); \
51 #define preempt_enable_no_resched() \
54 dec_preempt_count(); \
57 #define preempt_enable() \
59 preempt_enable_no_resched(); \
61 preempt_check_resched(); \
64 /* For debugging and tracer internals only! */
65 #define add_preempt_count_notrace(val) \
66 do { preempt_count() += (val); } while (0)
67 #define sub_preempt_count_notrace(val) \
68 do { preempt_count() -= (val); } while (0)
69 #define inc_preempt_count_notrace() add_preempt_count_notrace(1)
70 #define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
72 #define preempt_disable_notrace() \
74 inc_preempt_count_notrace(); \
78 #define preempt_enable_no_resched_notrace() \
81 dec_preempt_count_notrace(); \
84 /* preempt_check_resched is OK to trace */
85 #define preempt_enable_notrace() \
87 preempt_enable_no_resched_notrace(); \
89 preempt_check_resched(); \
92 #else /* !CONFIG_PREEMPT_COUNT */
94 #define preempt_disable() do { } while (0)
95 #define preempt_enable_no_resched() do { } while (0)
96 #define preempt_enable() do { } while (0)
98 #define preempt_disable_notrace() do { } while (0)
99 #define preempt_enable_no_resched_notrace() do { } while (0)
100 #define preempt_enable_notrace() do { } while (0)
102 #endif /* CONFIG_PREEMPT_COUNT */
104 #ifdef CONFIG_PREEMPT_NOTIFIERS
106 struct preempt_notifier
;
109 * preempt_ops - notifiers called when a task is preempted and rescheduled
110 * @sched_in: we're about to be rescheduled:
111 * notifier: struct preempt_notifier for the task being scheduled
112 * cpu: cpu we're scheduled on
113 * @sched_out: we've just been preempted
114 * notifier: struct preempt_notifier for the task being preempted
115 * next: the task that's kicking us out
117 * Please note that sched_in and out are called under different
118 * contexts. sched_out is called with rq lock held and irq disabled
119 * while sched_in is called without rq lock and irq enabled. This
120 * difference is intentional and depended upon by its users.
123 void (*sched_in
)(struct preempt_notifier
*notifier
, int cpu
);
124 void (*sched_out
)(struct preempt_notifier
*notifier
,
125 struct task_struct
*next
);
129 * preempt_notifier - key for installing preemption notifiers
130 * @link: internal use
131 * @ops: defines the notifier functions to be called
133 * Usually used in conjunction with container_of().
135 struct preempt_notifier
{
136 struct hlist_node link
;
137 struct preempt_ops
*ops
;
140 void preempt_notifier_register(struct preempt_notifier
*notifier
);
141 void preempt_notifier_unregister(struct preempt_notifier
*notifier
);
143 static inline void preempt_notifier_init(struct preempt_notifier
*notifier
,
144 struct preempt_ops
*ops
)
146 INIT_HLIST_NODE(¬ifier
->link
);
152 #endif /* __LINUX_PREEMPT_H */