staging: gasket: fix check_and_invoke_callback log param
[linux-2.6/btrfs-unstable.git] / include / linux / freezer.h
blob21f5aa0b217f3c0ce3c04a7d459f7a2e44e93076
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Freezer declarations */
4 #ifndef FREEZER_H_INCLUDED
5 #define FREEZER_H_INCLUDED
7 #include <linux/debug_locks.h>
8 #include <linux/sched.h>
9 #include <linux/wait.h>
10 #include <linux/atomic.h>
12 #ifdef CONFIG_FREEZER
13 extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
14 extern bool pm_freezing; /* PM freezing in effect */
15 extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
18 * Timeout for stopping processes
20 extern unsigned int freeze_timeout_msecs;
23 * Check if a process has been frozen
25 static inline bool frozen(struct task_struct *p)
27 return p->flags & PF_FROZEN;
30 extern bool freezing_slow_path(struct task_struct *p);
33 * Check if there is a request to freeze a process
35 static inline bool freezing(struct task_struct *p)
37 if (likely(!atomic_read(&system_freezing_cnt)))
38 return false;
39 return freezing_slow_path(p);
42 /* Takes and releases task alloc lock using task_lock() */
43 extern void __thaw_task(struct task_struct *t);
45 extern bool __refrigerator(bool check_kthr_stop);
46 extern int freeze_processes(void);
47 extern int freeze_kernel_threads(void);
48 extern void thaw_processes(void);
49 extern void thaw_kernel_threads(void);
52 * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
53 * If try_to_freeze causes a lockdep warning it means the caller may deadlock
55 static inline bool try_to_freeze_unsafe(void)
57 might_sleep();
58 if (likely(!freezing(current)))
59 return false;
60 return __refrigerator(false);
63 static inline bool try_to_freeze(void)
65 if (!(current->flags & PF_NOFREEZE))
66 debug_check_no_locks_held();
67 return try_to_freeze_unsafe();
70 extern bool freeze_task(struct task_struct *p);
71 extern bool set_freezable(void);
73 #ifdef CONFIG_CGROUP_FREEZER
74 extern bool cgroup_freezing(struct task_struct *task);
75 #else /* !CONFIG_CGROUP_FREEZER */
76 static inline bool cgroup_freezing(struct task_struct *task)
78 return false;
80 #endif /* !CONFIG_CGROUP_FREEZER */
83 * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
84 * calls wait_for_completion(&vfork) and reset right after it returns from this
85 * function. Next, the parent should call try_to_freeze() to freeze itself
86 * appropriately in case the child has exited before the freezing of tasks is
87 * complete. However, we don't want kernel threads to be frozen in unexpected
88 * places, so we allow them to block freeze_processes() instead or to set
89 * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
90 * parent won't really block freeze_processes(), since ____call_usermodehelper()
91 * (the child) does a little before exec/exit and it can't be frozen before
92 * waking up the parent.
96 /**
97 * freezer_do_not_count - tell freezer to ignore %current
99 * Tell freezers to ignore the current task when determining whether the
100 * target frozen state is reached. IOW, the current task will be
101 * considered frozen enough by freezers.
103 * The caller shouldn't do anything which isn't allowed for a frozen task
104 * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
105 * wrap a scheduling operation and nothing much else.
107 static inline void freezer_do_not_count(void)
109 current->flags |= PF_FREEZER_SKIP;
113 * freezer_count - tell freezer to stop ignoring %current
115 * Undo freezer_do_not_count(). It tells freezers that %current should be
116 * considered again and tries to freeze if freezing condition is already in
117 * effect.
119 static inline void freezer_count(void)
121 current->flags &= ~PF_FREEZER_SKIP;
123 * If freezing is in progress, the following paired with smp_mb()
124 * in freezer_should_skip() ensures that either we see %true
125 * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
127 smp_mb();
128 try_to_freeze();
131 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
132 static inline void freezer_count_unsafe(void)
134 current->flags &= ~PF_FREEZER_SKIP;
135 smp_mb();
136 try_to_freeze_unsafe();
140 * freezer_should_skip - whether to skip a task when determining frozen
141 * state is reached
142 * @p: task in quesion
144 * This function is used by freezers after establishing %true freezing() to
145 * test whether a task should be skipped when determining the target frozen
146 * state is reached. IOW, if this function returns %true, @p is considered
147 * frozen enough.
149 static inline bool freezer_should_skip(struct task_struct *p)
152 * The following smp_mb() paired with the one in freezer_count()
153 * ensures that either freezer_count() sees %true freezing() or we
154 * see cleared %PF_FREEZER_SKIP and return %false. This makes it
155 * impossible for a task to slip frozen state testing after
156 * clearing %PF_FREEZER_SKIP.
158 smp_mb();
159 return p->flags & PF_FREEZER_SKIP;
163 * These functions are intended to be used whenever you want allow a sleeping
164 * task to be frozen. Note that neither return any clear indication of
165 * whether a freeze event happened while in this function.
168 /* Like schedule(), but should not block the freezer. */
169 static inline void freezable_schedule(void)
171 freezer_do_not_count();
172 schedule();
173 freezer_count();
176 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
177 static inline void freezable_schedule_unsafe(void)
179 freezer_do_not_count();
180 schedule();
181 freezer_count_unsafe();
185 * Like schedule_timeout(), but should not block the freezer. Do not
186 * call this with locks held.
188 static inline long freezable_schedule_timeout(long timeout)
190 long __retval;
191 freezer_do_not_count();
192 __retval = schedule_timeout(timeout);
193 freezer_count();
194 return __retval;
198 * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
199 * call this with locks held.
201 static inline long freezable_schedule_timeout_interruptible(long timeout)
203 long __retval;
204 freezer_do_not_count();
205 __retval = schedule_timeout_interruptible(timeout);
206 freezer_count();
207 return __retval;
210 /* Like schedule_timeout_killable(), but should not block the freezer. */
211 static inline long freezable_schedule_timeout_killable(long timeout)
213 long __retval;
214 freezer_do_not_count();
215 __retval = schedule_timeout_killable(timeout);
216 freezer_count();
217 return __retval;
220 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
221 static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
223 long __retval;
224 freezer_do_not_count();
225 __retval = schedule_timeout_killable(timeout);
226 freezer_count_unsafe();
227 return __retval;
231 * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
232 * call this with locks held.
234 static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
235 u64 delta, const enum hrtimer_mode mode)
237 int __retval;
238 freezer_do_not_count();
239 __retval = schedule_hrtimeout_range(expires, delta, mode);
240 freezer_count();
241 return __retval;
245 * Freezer-friendly wrappers around wait_event_interruptible(),
246 * wait_event_killable() and wait_event_interruptible_timeout(), originally
247 * defined in <linux/wait.h>
250 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
251 #define wait_event_freezekillable_unsafe(wq, condition) \
252 ({ \
253 int __retval; \
254 freezer_do_not_count(); \
255 __retval = wait_event_killable(wq, (condition)); \
256 freezer_count_unsafe(); \
257 __retval; \
260 #else /* !CONFIG_FREEZER */
261 static inline bool frozen(struct task_struct *p) { return false; }
262 static inline bool freezing(struct task_struct *p) { return false; }
263 static inline void __thaw_task(struct task_struct *t) {}
265 static inline bool __refrigerator(bool check_kthr_stop) { return false; }
266 static inline int freeze_processes(void) { return -ENOSYS; }
267 static inline int freeze_kernel_threads(void) { return -ENOSYS; }
268 static inline void thaw_processes(void) {}
269 static inline void thaw_kernel_threads(void) {}
271 static inline bool try_to_freeze_nowarn(void) { return false; }
272 static inline bool try_to_freeze(void) { return false; }
274 static inline void freezer_do_not_count(void) {}
275 static inline void freezer_count(void) {}
276 static inline int freezer_should_skip(struct task_struct *p) { return 0; }
277 static inline void set_freezable(void) {}
279 #define freezable_schedule() schedule()
281 #define freezable_schedule_unsafe() schedule()
283 #define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
285 #define freezable_schedule_timeout_interruptible(timeout) \
286 schedule_timeout_interruptible(timeout)
288 #define freezable_schedule_timeout_killable(timeout) \
289 schedule_timeout_killable(timeout)
291 #define freezable_schedule_timeout_killable_unsafe(timeout) \
292 schedule_timeout_killable(timeout)
294 #define freezable_schedule_hrtimeout_range(expires, delta, mode) \
295 schedule_hrtimeout_range(expires, delta, mode)
297 #define wait_event_freezekillable_unsafe(wq, condition) \
298 wait_event_killable(wq, condition)
300 #endif /* !CONFIG_FREEZER */
302 #endif /* FREEZER_H_INCLUDED */