2 * kernel/power/suspend.c - Suspend to RAM and standby functionality.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
8 * This file is released under the GPLv2.
11 #include <linux/string.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/console.h>
16 #include <linux/cpu.h>
17 #include <linux/cpuidle.h>
18 #include <linux/syscalls.h>
19 #include <linux/gfp.h>
21 #include <linux/kernel.h>
22 #include <linux/list.h>
24 #include <linux/slab.h>
25 #include <linux/export.h>
26 #include <linux/suspend.h>
27 #include <linux/syscore_ops.h>
28 #include <linux/ftrace.h>
29 #include <trace/events/power.h>
30 #include <linux/compiler.h>
31 #include <linux/moduleparam.h>
35 const char * const pm_labels
[] = {
36 [PM_SUSPEND_FREEZE
] = "freeze",
37 [PM_SUSPEND_STANDBY
] = "standby",
38 [PM_SUSPEND_MEM
] = "mem",
40 const char *pm_states
[PM_SUSPEND_MAX
];
41 static const char * const mem_sleep_labels
[] = {
42 [PM_SUSPEND_FREEZE
] = "s2idle",
43 [PM_SUSPEND_STANDBY
] = "shallow",
44 [PM_SUSPEND_MEM
] = "deep",
46 const char *mem_sleep_states
[PM_SUSPEND_MAX
];
48 suspend_state_t mem_sleep_current
= PM_SUSPEND_FREEZE
;
49 static suspend_state_t mem_sleep_default
= PM_SUSPEND_MEM
;
51 unsigned int pm_suspend_global_flags
;
52 EXPORT_SYMBOL_GPL(pm_suspend_global_flags
);
54 static const struct platform_suspend_ops
*suspend_ops
;
55 static const struct platform_freeze_ops
*freeze_ops
;
56 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head
);
58 enum freeze_state __read_mostly suspend_freeze_state
;
59 static DEFINE_SPINLOCK(suspend_freeze_lock
);
61 void freeze_set_ops(const struct platform_freeze_ops
*ops
)
65 unlock_system_sleep();
68 static void freeze_begin(void)
70 suspend_freeze_state
= FREEZE_STATE_NONE
;
73 static void freeze_enter(void)
75 trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE
, true);
77 spin_lock_irq(&suspend_freeze_lock
);
78 if (pm_wakeup_pending())
81 suspend_freeze_state
= FREEZE_STATE_ENTER
;
82 spin_unlock_irq(&suspend_freeze_lock
);
87 /* Push all the CPUs into the idle loop. */
88 wake_up_all_idle_cpus();
89 /* Make the current CPU wait so it can enter the idle loop too. */
90 wait_event(suspend_freeze_wait_head
,
91 suspend_freeze_state
== FREEZE_STATE_WAKE
);
96 spin_lock_irq(&suspend_freeze_lock
);
99 suspend_freeze_state
= FREEZE_STATE_NONE
;
100 spin_unlock_irq(&suspend_freeze_lock
);
102 trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE
, false);
105 static void s2idle_loop(void)
107 pr_debug("PM: suspend-to-idle\n");
112 if (freeze_ops
&& freeze_ops
->wake
)
115 dpm_resume_noirq(PMSG_RESUME
);
116 if (freeze_ops
&& freeze_ops
->sync
)
119 if (pm_wakeup_pending())
122 pm_wakeup_clear(false);
123 } while (!dpm_suspend_noirq(PMSG_SUSPEND
));
125 pr_debug("PM: resume from suspend-to-idle\n");
128 void freeze_wake(void)
132 spin_lock_irqsave(&suspend_freeze_lock
, flags
);
133 if (suspend_freeze_state
> FREEZE_STATE_NONE
) {
134 suspend_freeze_state
= FREEZE_STATE_WAKE
;
135 wake_up(&suspend_freeze_wait_head
);
137 spin_unlock_irqrestore(&suspend_freeze_lock
, flags
);
139 EXPORT_SYMBOL_GPL(freeze_wake
);
141 static bool valid_state(suspend_state_t state
)
144 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
145 * support and need to be valid to the low level
146 * implementation, no valid callback implies that none are valid.
148 return suspend_ops
&& suspend_ops
->valid
&& suspend_ops
->valid(state
);
151 void __init
pm_states_init(void)
153 /* "mem" and "freeze" are always present in /sys/power/state. */
154 pm_states
[PM_SUSPEND_MEM
] = pm_labels
[PM_SUSPEND_MEM
];
155 pm_states
[PM_SUSPEND_FREEZE
] = pm_labels
[PM_SUSPEND_FREEZE
];
157 * Suspend-to-idle should be supported even without any suspend_ops,
158 * initialize mem_sleep_states[] accordingly here.
160 mem_sleep_states
[PM_SUSPEND_FREEZE
] = mem_sleep_labels
[PM_SUSPEND_FREEZE
];
163 static int __init
mem_sleep_default_setup(char *str
)
165 suspend_state_t state
;
167 for (state
= PM_SUSPEND_FREEZE
; state
<= PM_SUSPEND_MEM
; state
++)
168 if (mem_sleep_labels
[state
] &&
169 !strcmp(str
, mem_sleep_labels
[state
])) {
170 mem_sleep_default
= state
;
176 __setup("mem_sleep_default=", mem_sleep_default_setup
);
179 * suspend_set_ops - Set the global suspend method table.
180 * @ops: Suspend operations to use.
182 void suspend_set_ops(const struct platform_suspend_ops
*ops
)
188 if (valid_state(PM_SUSPEND_STANDBY
)) {
189 mem_sleep_states
[PM_SUSPEND_STANDBY
] = mem_sleep_labels
[PM_SUSPEND_STANDBY
];
190 pm_states
[PM_SUSPEND_STANDBY
] = pm_labels
[PM_SUSPEND_STANDBY
];
191 if (mem_sleep_default
== PM_SUSPEND_STANDBY
)
192 mem_sleep_current
= PM_SUSPEND_STANDBY
;
194 if (valid_state(PM_SUSPEND_MEM
)) {
195 mem_sleep_states
[PM_SUSPEND_MEM
] = mem_sleep_labels
[PM_SUSPEND_MEM
];
196 if (mem_sleep_default
== PM_SUSPEND_MEM
)
197 mem_sleep_current
= PM_SUSPEND_MEM
;
200 unlock_system_sleep();
202 EXPORT_SYMBOL_GPL(suspend_set_ops
);
205 * suspend_valid_only_mem - Generic memory-only valid callback.
207 * Platform drivers that implement mem suspend only and only need to check for
208 * that in their .valid() callback can use this instead of rolling their own
211 int suspend_valid_only_mem(suspend_state_t state
)
213 return state
== PM_SUSPEND_MEM
;
215 EXPORT_SYMBOL_GPL(suspend_valid_only_mem
);
217 static bool sleep_state_supported(suspend_state_t state
)
219 return state
== PM_SUSPEND_FREEZE
|| (suspend_ops
&& suspend_ops
->enter
);
222 static int platform_suspend_prepare(suspend_state_t state
)
224 return state
!= PM_SUSPEND_FREEZE
&& suspend_ops
->prepare
?
225 suspend_ops
->prepare() : 0;
228 static int platform_suspend_prepare_late(suspend_state_t state
)
230 return state
== PM_SUSPEND_FREEZE
&& freeze_ops
&& freeze_ops
->prepare
?
231 freeze_ops
->prepare() : 0;
234 static int platform_suspend_prepare_noirq(suspend_state_t state
)
236 return state
!= PM_SUSPEND_FREEZE
&& suspend_ops
->prepare_late
?
237 suspend_ops
->prepare_late() : 0;
240 static void platform_resume_noirq(suspend_state_t state
)
242 if (state
!= PM_SUSPEND_FREEZE
&& suspend_ops
->wake
)
246 static void platform_resume_early(suspend_state_t state
)
248 if (state
== PM_SUSPEND_FREEZE
&& freeze_ops
&& freeze_ops
->restore
)
249 freeze_ops
->restore();
252 static void platform_resume_finish(suspend_state_t state
)
254 if (state
!= PM_SUSPEND_FREEZE
&& suspend_ops
->finish
)
255 suspend_ops
->finish();
258 static int platform_suspend_begin(suspend_state_t state
)
260 if (state
== PM_SUSPEND_FREEZE
&& freeze_ops
&& freeze_ops
->begin
)
261 return freeze_ops
->begin();
262 else if (suspend_ops
&& suspend_ops
->begin
)
263 return suspend_ops
->begin(state
);
268 static void platform_resume_end(suspend_state_t state
)
270 if (state
== PM_SUSPEND_FREEZE
&& freeze_ops
&& freeze_ops
->end
)
272 else if (suspend_ops
&& suspend_ops
->end
)
276 static void platform_recover(suspend_state_t state
)
278 if (state
!= PM_SUSPEND_FREEZE
&& suspend_ops
->recover
)
279 suspend_ops
->recover();
282 static bool platform_suspend_again(suspend_state_t state
)
284 return state
!= PM_SUSPEND_FREEZE
&& suspend_ops
->suspend_again
?
285 suspend_ops
->suspend_again() : false;
288 #ifdef CONFIG_PM_DEBUG
289 static unsigned int pm_test_delay
= 5;
290 module_param(pm_test_delay
, uint
, 0644);
291 MODULE_PARM_DESC(pm_test_delay
,
292 "Number of seconds to wait before resuming from suspend test");
295 static int suspend_test(int level
)
297 #ifdef CONFIG_PM_DEBUG
298 if (pm_test_level
== level
) {
299 pr_info("suspend debug: Waiting for %d second(s).\n",
301 mdelay(pm_test_delay
* 1000);
304 #endif /* !CONFIG_PM_DEBUG */
309 * suspend_prepare - Prepare for entering system sleep state.
311 * Common code run for every system sleep state that can be entered (except for
312 * hibernation). Run suspend notifiers, allocate the "suspend" console and
315 static int suspend_prepare(suspend_state_t state
)
317 int error
, nr_calls
= 0;
319 if (!sleep_state_supported(state
))
322 pm_prepare_console();
324 error
= __pm_notifier_call_chain(PM_SUSPEND_PREPARE
, -1, &nr_calls
);
330 trace_suspend_resume(TPS("freeze_processes"), 0, true);
331 error
= suspend_freeze_processes();
332 trace_suspend_resume(TPS("freeze_processes"), 0, false);
336 suspend_stats
.failed_freeze
++;
337 dpm_save_failed_step(SUSPEND_FREEZE
);
339 __pm_notifier_call_chain(PM_POST_SUSPEND
, nr_calls
, NULL
);
340 pm_restore_console();
344 /* default implementation */
345 void __weak
arch_suspend_disable_irqs(void)
350 /* default implementation */
351 void __weak
arch_suspend_enable_irqs(void)
357 * suspend_enter - Make the system enter the given sleep state.
358 * @state: System sleep state to enter.
359 * @wakeup: Returns information that the sleep state should not be re-entered.
361 * This function should be called after devices have been suspended.
363 static int suspend_enter(suspend_state_t state
, bool *wakeup
)
367 error
= platform_suspend_prepare(state
);
369 goto Platform_finish
;
371 error
= dpm_suspend_late(PMSG_SUSPEND
);
373 pr_err("PM: late suspend of devices failed\n");
374 goto Platform_finish
;
376 error
= platform_suspend_prepare_late(state
);
378 goto Devices_early_resume
;
380 error
= dpm_suspend_noirq(PMSG_SUSPEND
);
382 pr_err("PM: noirq suspend of devices failed\n");
383 goto Platform_early_resume
;
385 error
= platform_suspend_prepare_noirq(state
);
389 if (suspend_test(TEST_PLATFORM
))
393 * PM_SUSPEND_FREEZE equals
394 * frozen processes + suspended devices + idle processors.
395 * Thus we should invoke freeze_enter() soon after
396 * all the devices are suspended.
398 if (state
== PM_SUSPEND_FREEZE
) {
400 goto Platform_early_resume
;
403 error
= disable_nonboot_cpus();
404 if (error
|| suspend_test(TEST_CPUS
))
407 arch_suspend_disable_irqs();
408 BUG_ON(!irqs_disabled());
410 error
= syscore_suspend();
412 *wakeup
= pm_wakeup_pending();
413 if (!(suspend_test(TEST_CORE
) || *wakeup
)) {
414 trace_suspend_resume(TPS("machine_suspend"),
416 error
= suspend_ops
->enter(state
);
417 trace_suspend_resume(TPS("machine_suspend"),
419 events_check_enabled
= false;
420 } else if (*wakeup
) {
426 arch_suspend_enable_irqs();
427 BUG_ON(irqs_disabled());
430 enable_nonboot_cpus();
433 platform_resume_noirq(state
);
434 dpm_resume_noirq(PMSG_RESUME
);
436 Platform_early_resume
:
437 platform_resume_early(state
);
439 Devices_early_resume
:
440 dpm_resume_early(PMSG_RESUME
);
443 platform_resume_finish(state
);
448 * suspend_devices_and_enter - Suspend devices and enter system sleep state.
449 * @state: System sleep state to enter.
451 int suspend_devices_and_enter(suspend_state_t state
)
456 if (!sleep_state_supported(state
))
459 error
= platform_suspend_begin(state
);
464 suspend_test_start();
465 error
= dpm_suspend_start(PMSG_SUSPEND
);
467 pr_err("PM: Some devices failed to suspend, or early wake event detected\n");
468 goto Recover_platform
;
470 suspend_test_finish("suspend devices");
471 if (suspend_test(TEST_DEVICES
))
472 goto Recover_platform
;
475 error
= suspend_enter(state
, &wakeup
);
476 } while (!error
&& !wakeup
&& platform_suspend_again(state
));
479 suspend_test_start();
480 dpm_resume_end(PMSG_RESUME
);
481 suspend_test_finish("resume devices");
482 trace_suspend_resume(TPS("resume_console"), state
, true);
484 trace_suspend_resume(TPS("resume_console"), state
, false);
487 platform_resume_end(state
);
491 platform_recover(state
);
496 * suspend_finish - Clean up before finishing the suspend sequence.
498 * Call platform code to clean up, restart processes, and free the console that
499 * we've allocated. This routine is not called for hibernation.
501 static void suspend_finish(void)
503 suspend_thaw_processes();
504 pm_notifier_call_chain(PM_POST_SUSPEND
);
505 pm_restore_console();
509 * enter_state - Do common work needed to enter system sleep state.
510 * @state: System sleep state to enter.
512 * Make sure that no one else is trying to put the system into a sleep state.
513 * Fail if that's not the case. Otherwise, prepare for system suspend, make the
514 * system enter the given sleep state and clean up after wakeup.
516 static int enter_state(suspend_state_t state
)
520 trace_suspend_resume(TPS("suspend_enter"), state
, true);
521 if (state
== PM_SUSPEND_FREEZE
) {
522 #ifdef CONFIG_PM_DEBUG
523 if (pm_test_level
!= TEST_NONE
&& pm_test_level
<= TEST_CPUS
) {
524 pr_warn("PM: Unsupported test mode for suspend to idle, please choose none/freezer/devices/platform.\n");
528 } else if (!valid_state(state
)) {
531 if (!mutex_trylock(&pm_mutex
))
534 if (state
== PM_SUSPEND_FREEZE
)
537 #ifndef CONFIG_SUSPEND_SKIP_SYNC
538 trace_suspend_resume(TPS("sync_filesystems"), 0, true);
539 pr_info("PM: Syncing filesystems ... ");
542 trace_suspend_resume(TPS("sync_filesystems"), 0, false);
545 pr_debug("PM: Preparing system for sleep (%s)\n", pm_states
[state
]);
546 pm_suspend_clear_flags();
547 error
= suspend_prepare(state
);
551 if (suspend_test(TEST_FREEZER
))
554 trace_suspend_resume(TPS("suspend_enter"), state
, false);
555 pr_debug("PM: Suspending system (%s)\n", pm_states
[state
]);
556 pm_restrict_gfp_mask();
557 error
= suspend_devices_and_enter(state
);
558 pm_restore_gfp_mask();
561 pr_debug("PM: Finishing wakeup.\n");
564 mutex_unlock(&pm_mutex
);
569 * pm_suspend - Externally visible function for suspending the system.
570 * @state: System sleep state to enter.
572 * Check if the value of @state represents one of the supported states,
573 * execute enter_state() and update system suspend statistics.
575 int pm_suspend(suspend_state_t state
)
579 if (state
<= PM_SUSPEND_ON
|| state
>= PM_SUSPEND_MAX
)
582 error
= enter_state(state
);
584 suspend_stats
.fail
++;
585 dpm_save_failed_errno(error
);
587 suspend_stats
.success
++;
591 EXPORT_SYMBOL(pm_suspend
);