2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/async.h>
29 #include <linux/suspend.h>
35 * The entries in the dpm_list list are in a depth first order, simply
36 * because children are guaranteed to be discovered after parents, and
37 * are inserted at the back of the list on discovery.
39 * Since device_pm_add() may be called with a device lock held,
40 * we must never try to acquire a device lock while holding
45 LIST_HEAD(dpm_prepared_list
);
46 LIST_HEAD(dpm_suspended_list
);
47 LIST_HEAD(dpm_noirq_list
);
49 static DEFINE_MUTEX(dpm_list_mtx
);
50 static pm_message_t pm_transition
;
52 static int async_error
;
55 * device_pm_init - Initialize the PM-related part of a device object.
56 * @dev: Device object being initialized.
58 void device_pm_init(struct device
*dev
)
60 dev
->power
.is_prepared
= false;
61 init_completion(&dev
->power
.completion
);
62 complete_all(&dev
->power
.completion
);
63 dev
->power
.wakeup
= NULL
;
64 spin_lock_init(&dev
->power
.lock
);
66 INIT_LIST_HEAD(&dev
->power
.entry
);
70 * device_pm_lock - Lock the list of active devices used by the PM core.
72 void device_pm_lock(void)
74 mutex_lock(&dpm_list_mtx
);
78 * device_pm_unlock - Unlock the list of active devices used by the PM core.
80 void device_pm_unlock(void)
82 mutex_unlock(&dpm_list_mtx
);
86 * device_pm_add - Add a device to the PM core's list of active devices.
87 * @dev: Device to add to the list.
89 void device_pm_add(struct device
*dev
)
91 pr_debug("PM: Adding info for %s:%s\n",
92 dev
->bus
? dev
->bus
->name
: "No Bus", dev_name(dev
));
93 mutex_lock(&dpm_list_mtx
);
94 if (dev
->parent
&& dev
->parent
->power
.is_prepared
)
95 dev_warn(dev
, "parent %s should not be sleeping\n",
96 dev_name(dev
->parent
));
97 list_add_tail(&dev
->power
.entry
, &dpm_list
);
98 mutex_unlock(&dpm_list_mtx
);
102 * device_pm_remove - Remove a device from the PM core's list of active devices.
103 * @dev: Device to be removed from the list.
105 void device_pm_remove(struct device
*dev
)
107 pr_debug("PM: Removing info for %s:%s\n",
108 dev
->bus
? dev
->bus
->name
: "No Bus", dev_name(dev
));
109 complete_all(&dev
->power
.completion
);
110 mutex_lock(&dpm_list_mtx
);
111 list_del_init(&dev
->power
.entry
);
112 mutex_unlock(&dpm_list_mtx
);
113 device_wakeup_disable(dev
);
114 pm_runtime_remove(dev
);
118 * device_pm_move_before - Move device in the PM core's list of active devices.
119 * @deva: Device to move in dpm_list.
120 * @devb: Device @deva should come before.
122 void device_pm_move_before(struct device
*deva
, struct device
*devb
)
124 pr_debug("PM: Moving %s:%s before %s:%s\n",
125 deva
->bus
? deva
->bus
->name
: "No Bus", dev_name(deva
),
126 devb
->bus
? devb
->bus
->name
: "No Bus", dev_name(devb
));
127 /* Delete deva from dpm_list and reinsert before devb. */
128 list_move_tail(&deva
->power
.entry
, &devb
->power
.entry
);
132 * device_pm_move_after - Move device in the PM core's list of active devices.
133 * @deva: Device to move in dpm_list.
134 * @devb: Device @deva should come after.
136 void device_pm_move_after(struct device
*deva
, struct device
*devb
)
138 pr_debug("PM: Moving %s:%s after %s:%s\n",
139 deva
->bus
? deva
->bus
->name
: "No Bus", dev_name(deva
),
140 devb
->bus
? devb
->bus
->name
: "No Bus", dev_name(devb
));
141 /* Delete deva from dpm_list and reinsert after devb. */
142 list_move(&deva
->power
.entry
, &devb
->power
.entry
);
146 * device_pm_move_last - Move device to end of the PM core's list of devices.
147 * @dev: Device to move in dpm_list.
149 void device_pm_move_last(struct device
*dev
)
151 pr_debug("PM: Moving %s:%s to end of list\n",
152 dev
->bus
? dev
->bus
->name
: "No Bus", dev_name(dev
));
153 list_move_tail(&dev
->power
.entry
, &dpm_list
);
156 static ktime_t
initcall_debug_start(struct device
*dev
)
158 ktime_t calltime
= ktime_set(0, 0);
160 if (initcall_debug
) {
161 pr_info("calling %s+ @ %i\n",
162 dev_name(dev
), task_pid_nr(current
));
163 calltime
= ktime_get();
169 static void initcall_debug_report(struct device
*dev
, ktime_t calltime
,
172 ktime_t delta
, rettime
;
174 if (initcall_debug
) {
175 rettime
= ktime_get();
176 delta
= ktime_sub(rettime
, calltime
);
177 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev
),
178 error
, (unsigned long long)ktime_to_ns(delta
) >> 10);
183 * dpm_wait - Wait for a PM operation to complete.
184 * @dev: Device to wait for.
185 * @async: If unset, wait only if the device's power.async_suspend flag is set.
187 static void dpm_wait(struct device
*dev
, bool async
)
192 if (async
|| (pm_async_enabled
&& dev
->power
.async_suspend
))
193 wait_for_completion(&dev
->power
.completion
);
196 static int dpm_wait_fn(struct device
*dev
, void *async_ptr
)
198 dpm_wait(dev
, *((bool *)async_ptr
));
202 static void dpm_wait_for_children(struct device
*dev
, bool async
)
204 device_for_each_child(dev
, &async
, dpm_wait_fn
);
208 * pm_op - Execute the PM operation appropriate for given PM event.
209 * @dev: Device to handle.
210 * @ops: PM operations to choose from.
211 * @state: PM transition of the system being carried out.
213 static int pm_op(struct device
*dev
,
214 const struct dev_pm_ops
*ops
,
220 calltime
= initcall_debug_start(dev
);
222 switch (state
.event
) {
223 #ifdef CONFIG_SUSPEND
224 case PM_EVENT_SUSPEND
:
226 error
= ops
->suspend(dev
);
227 suspend_report_result(ops
->suspend
, error
);
230 case PM_EVENT_RESUME
:
232 error
= ops
->resume(dev
);
233 suspend_report_result(ops
->resume
, error
);
236 #endif /* CONFIG_SUSPEND */
237 #ifdef CONFIG_HIBERNATE_CALLBACKS
238 case PM_EVENT_FREEZE
:
239 case PM_EVENT_QUIESCE
:
241 error
= ops
->freeze(dev
);
242 suspend_report_result(ops
->freeze
, error
);
245 case PM_EVENT_HIBERNATE
:
247 error
= ops
->poweroff(dev
);
248 suspend_report_result(ops
->poweroff
, error
);
252 case PM_EVENT_RECOVER
:
254 error
= ops
->thaw(dev
);
255 suspend_report_result(ops
->thaw
, error
);
258 case PM_EVENT_RESTORE
:
260 error
= ops
->restore(dev
);
261 suspend_report_result(ops
->restore
, error
);
264 #endif /* CONFIG_HIBERNATE_CALLBACKS */
269 initcall_debug_report(dev
, calltime
, error
);
275 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
276 * @dev: Device to handle.
277 * @ops: PM operations to choose from.
278 * @state: PM transition of the system being carried out.
280 * The driver of @dev will not receive interrupts while this function is being
283 static int pm_noirq_op(struct device
*dev
,
284 const struct dev_pm_ops
*ops
,
288 ktime_t calltime
= ktime_set(0, 0), delta
, rettime
;
290 if (initcall_debug
) {
291 pr_info("calling %s+ @ %i, parent: %s\n",
292 dev_name(dev
), task_pid_nr(current
),
293 dev
->parent
? dev_name(dev
->parent
) : "none");
294 calltime
= ktime_get();
297 switch (state
.event
) {
298 #ifdef CONFIG_SUSPEND
299 case PM_EVENT_SUSPEND
:
300 if (ops
->suspend_noirq
) {
301 error
= ops
->suspend_noirq(dev
);
302 suspend_report_result(ops
->suspend_noirq
, error
);
305 case PM_EVENT_RESUME
:
306 if (ops
->resume_noirq
) {
307 error
= ops
->resume_noirq(dev
);
308 suspend_report_result(ops
->resume_noirq
, error
);
311 #endif /* CONFIG_SUSPEND */
312 #ifdef CONFIG_HIBERNATE_CALLBACKS
313 case PM_EVENT_FREEZE
:
314 case PM_EVENT_QUIESCE
:
315 if (ops
->freeze_noirq
) {
316 error
= ops
->freeze_noirq(dev
);
317 suspend_report_result(ops
->freeze_noirq
, error
);
320 case PM_EVENT_HIBERNATE
:
321 if (ops
->poweroff_noirq
) {
322 error
= ops
->poweroff_noirq(dev
);
323 suspend_report_result(ops
->poweroff_noirq
, error
);
327 case PM_EVENT_RECOVER
:
328 if (ops
->thaw_noirq
) {
329 error
= ops
->thaw_noirq(dev
);
330 suspend_report_result(ops
->thaw_noirq
, error
);
333 case PM_EVENT_RESTORE
:
334 if (ops
->restore_noirq
) {
335 error
= ops
->restore_noirq(dev
);
336 suspend_report_result(ops
->restore_noirq
, error
);
339 #endif /* CONFIG_HIBERNATE_CALLBACKS */
344 if (initcall_debug
) {
345 rettime
= ktime_get();
346 delta
= ktime_sub(rettime
, calltime
);
347 printk("initcall %s_i+ returned %d after %Ld usecs\n",
348 dev_name(dev
), error
,
349 (unsigned long long)ktime_to_ns(delta
) >> 10);
355 static char *pm_verb(int event
)
358 case PM_EVENT_SUSPEND
:
360 case PM_EVENT_RESUME
:
362 case PM_EVENT_FREEZE
:
364 case PM_EVENT_QUIESCE
:
366 case PM_EVENT_HIBERNATE
:
370 case PM_EVENT_RESTORE
:
372 case PM_EVENT_RECOVER
:
375 return "(unknown PM event)";
379 static void pm_dev_dbg(struct device
*dev
, pm_message_t state
, char *info
)
381 dev_dbg(dev
, "%s%s%s\n", info
, pm_verb(state
.event
),
382 ((state
.event
& PM_EVENT_SLEEP
) && device_may_wakeup(dev
)) ?
383 ", may wakeup" : "");
386 static void pm_dev_err(struct device
*dev
, pm_message_t state
, char *info
,
389 printk(KERN_ERR
"PM: Device %s failed to %s%s: error %d\n",
390 dev_name(dev
), pm_verb(state
.event
), info
, error
);
393 static void dpm_show_time(ktime_t starttime
, pm_message_t state
, char *info
)
399 calltime
= ktime_get();
400 usecs64
= ktime_to_ns(ktime_sub(calltime
, starttime
));
401 do_div(usecs64
, NSEC_PER_USEC
);
405 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
406 info
?: "", info
? " " : "", pm_verb(state
.event
),
407 usecs
/ USEC_PER_MSEC
, usecs
% USEC_PER_MSEC
);
410 /*------------------------- Resume routines -------------------------*/
413 * device_resume_noirq - Execute an "early resume" callback for given device.
414 * @dev: Device to handle.
415 * @state: PM transition of the system being carried out.
417 * The driver of @dev will not receive interrupts while this function is being
420 static int device_resume_noirq(struct device
*dev
, pm_message_t state
)
427 if (dev
->pwr_domain
) {
428 pm_dev_dbg(dev
, state
, "EARLY power domain ");
429 pm_noirq_op(dev
, &dev
->pwr_domain
->ops
, state
);
432 if (dev
->type
&& dev
->type
->pm
) {
433 pm_dev_dbg(dev
, state
, "EARLY type ");
434 error
= pm_noirq_op(dev
, dev
->type
->pm
, state
);
435 } else if (dev
->class && dev
->class->pm
) {
436 pm_dev_dbg(dev
, state
, "EARLY class ");
437 error
= pm_noirq_op(dev
, dev
->class->pm
, state
);
438 } else if (dev
->bus
&& dev
->bus
->pm
) {
439 pm_dev_dbg(dev
, state
, "EARLY ");
440 error
= pm_noirq_op(dev
, dev
->bus
->pm
, state
);
448 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
449 * @state: PM transition of the system being carried out.
451 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
452 * enable device drivers to receive interrupts.
454 void dpm_resume_noirq(pm_message_t state
)
456 ktime_t starttime
= ktime_get();
458 mutex_lock(&dpm_list_mtx
);
459 while (!list_empty(&dpm_noirq_list
)) {
460 struct device
*dev
= to_device(dpm_noirq_list
.next
);
464 list_move_tail(&dev
->power
.entry
, &dpm_suspended_list
);
465 mutex_unlock(&dpm_list_mtx
);
467 error
= device_resume_noirq(dev
, state
);
469 pm_dev_err(dev
, state
, " early", error
);
471 mutex_lock(&dpm_list_mtx
);
474 mutex_unlock(&dpm_list_mtx
);
475 dpm_show_time(starttime
, state
, "early");
476 resume_device_irqs();
478 EXPORT_SYMBOL_GPL(dpm_resume_noirq
);
481 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
482 * @dev: Device to resume.
483 * @cb: Resume callback to execute.
485 static int legacy_resume(struct device
*dev
, int (*cb
)(struct device
*dev
))
490 calltime
= initcall_debug_start(dev
);
493 suspend_report_result(cb
, error
);
495 initcall_debug_report(dev
, calltime
, error
);
501 * device_resume - Execute "resume" callbacks for given device.
502 * @dev: Device to handle.
503 * @state: PM transition of the system being carried out.
504 * @async: If true, the device is being resumed asynchronously.
506 static int device_resume(struct device
*dev
, pm_message_t state
, bool async
)
513 dpm_wait(dev
->parent
, async
);
517 * This is a fib. But we'll allow new children to be added below
518 * a resumed device, even if the device hasn't been completed yet.
520 dev
->power
.is_prepared
= false;
522 if (dev
->pwr_domain
) {
523 pm_dev_dbg(dev
, state
, "power domain ");
524 pm_op(dev
, &dev
->pwr_domain
->ops
, state
);
527 if (dev
->type
&& dev
->type
->pm
) {
528 pm_dev_dbg(dev
, state
, "type ");
529 error
= pm_op(dev
, dev
->type
->pm
, state
);
534 if (dev
->class->pm
) {
535 pm_dev_dbg(dev
, state
, "class ");
536 error
= pm_op(dev
, dev
->class->pm
, state
);
538 } else if (dev
->class->resume
) {
539 pm_dev_dbg(dev
, state
, "legacy class ");
540 error
= legacy_resume(dev
, dev
->class->resume
);
547 pm_dev_dbg(dev
, state
, "");
548 error
= pm_op(dev
, dev
->bus
->pm
, state
);
549 } else if (dev
->bus
->resume
) {
550 pm_dev_dbg(dev
, state
, "legacy ");
551 error
= legacy_resume(dev
, dev
->bus
->resume
);
557 complete_all(&dev
->power
.completion
);
563 static void async_resume(void *data
, async_cookie_t cookie
)
565 struct device
*dev
= (struct device
*)data
;
568 error
= device_resume(dev
, pm_transition
, true);
570 pm_dev_err(dev
, pm_transition
, " async", error
);
574 static bool is_async(struct device
*dev
)
576 return dev
->power
.async_suspend
&& pm_async_enabled
577 && !pm_trace_is_enabled();
581 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
582 * @state: PM transition of the system being carried out.
584 * Execute the appropriate "resume" callback for all devices whose status
585 * indicates that they are suspended.
587 static void dpm_resume(pm_message_t state
)
590 ktime_t starttime
= ktime_get();
592 mutex_lock(&dpm_list_mtx
);
593 pm_transition
= state
;
596 list_for_each_entry(dev
, &dpm_suspended_list
, power
.entry
) {
597 INIT_COMPLETION(dev
->power
.completion
);
600 async_schedule(async_resume
, dev
);
604 while (!list_empty(&dpm_suspended_list
)) {
605 dev
= to_device(dpm_suspended_list
.next
);
607 if (!is_async(dev
)) {
610 mutex_unlock(&dpm_list_mtx
);
612 error
= device_resume(dev
, state
, false);
614 pm_dev_err(dev
, state
, "", error
);
616 mutex_lock(&dpm_list_mtx
);
618 if (!list_empty(&dev
->power
.entry
))
619 list_move_tail(&dev
->power
.entry
, &dpm_prepared_list
);
622 mutex_unlock(&dpm_list_mtx
);
623 async_synchronize_full();
624 dpm_show_time(starttime
, state
, NULL
);
628 * device_complete - Complete a PM transition for given device.
629 * @dev: Device to handle.
630 * @state: PM transition of the system being carried out.
632 static void device_complete(struct device
*dev
, pm_message_t state
)
636 if (dev
->pwr_domain
&& dev
->pwr_domain
->ops
.complete
) {
637 pm_dev_dbg(dev
, state
, "completing power domain ");
638 dev
->pwr_domain
->ops
.complete(dev
);
641 if (dev
->type
&& dev
->type
->pm
) {
642 pm_dev_dbg(dev
, state
, "completing type ");
643 if (dev
->type
->pm
->complete
)
644 dev
->type
->pm
->complete(dev
);
645 } else if (dev
->class && dev
->class->pm
) {
646 pm_dev_dbg(dev
, state
, "completing class ");
647 if (dev
->class->pm
->complete
)
648 dev
->class->pm
->complete(dev
);
649 } else if (dev
->bus
&& dev
->bus
->pm
) {
650 pm_dev_dbg(dev
, state
, "completing ");
651 if (dev
->bus
->pm
->complete
)
652 dev
->bus
->pm
->complete(dev
);
659 * dpm_complete - Complete a PM transition for all non-sysdev devices.
660 * @state: PM transition of the system being carried out.
662 * Execute the ->complete() callbacks for all devices whose PM status is not
663 * DPM_ON (this allows new devices to be registered).
665 static void dpm_complete(pm_message_t state
)
667 struct list_head list
;
669 INIT_LIST_HEAD(&list
);
670 mutex_lock(&dpm_list_mtx
);
671 while (!list_empty(&dpm_prepared_list
)) {
672 struct device
*dev
= to_device(dpm_prepared_list
.prev
);
675 dev
->power
.is_prepared
= false;
676 list_move(&dev
->power
.entry
, &list
);
677 mutex_unlock(&dpm_list_mtx
);
679 device_complete(dev
, state
);
681 mutex_lock(&dpm_list_mtx
);
684 list_splice(&list
, &dpm_list
);
685 mutex_unlock(&dpm_list_mtx
);
689 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
690 * @state: PM transition of the system being carried out.
692 * Execute "resume" callbacks for all devices and complete the PM transition of
695 void dpm_resume_end(pm_message_t state
)
701 EXPORT_SYMBOL_GPL(dpm_resume_end
);
704 /*------------------------- Suspend routines -------------------------*/
707 * resume_event - Return a "resume" message for given "suspend" sleep state.
708 * @sleep_state: PM message representing a sleep state.
710 * Return a PM message representing the resume event corresponding to given
713 static pm_message_t
resume_event(pm_message_t sleep_state
)
715 switch (sleep_state
.event
) {
716 case PM_EVENT_SUSPEND
:
718 case PM_EVENT_FREEZE
:
719 case PM_EVENT_QUIESCE
:
721 case PM_EVENT_HIBERNATE
:
728 * device_suspend_noirq - Execute a "late suspend" callback for given device.
729 * @dev: Device to handle.
730 * @state: PM transition of the system being carried out.
732 * The driver of @dev will not receive interrupts while this function is being
735 static int device_suspend_noirq(struct device
*dev
, pm_message_t state
)
739 if (dev
->type
&& dev
->type
->pm
) {
740 pm_dev_dbg(dev
, state
, "LATE type ");
741 error
= pm_noirq_op(dev
, dev
->type
->pm
, state
);
744 } else if (dev
->class && dev
->class->pm
) {
745 pm_dev_dbg(dev
, state
, "LATE class ");
746 error
= pm_noirq_op(dev
, dev
->class->pm
, state
);
749 } else if (dev
->bus
&& dev
->bus
->pm
) {
750 pm_dev_dbg(dev
, state
, "LATE ");
751 error
= pm_noirq_op(dev
, dev
->bus
->pm
, state
);
756 if (dev
->pwr_domain
) {
757 pm_dev_dbg(dev
, state
, "LATE power domain ");
758 pm_noirq_op(dev
, &dev
->pwr_domain
->ops
, state
);
765 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
766 * @state: PM transition of the system being carried out.
768 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
769 * handlers for all non-sysdev devices.
771 int dpm_suspend_noirq(pm_message_t state
)
773 ktime_t starttime
= ktime_get();
776 suspend_device_irqs();
777 mutex_lock(&dpm_list_mtx
);
778 while (!list_empty(&dpm_suspended_list
)) {
779 struct device
*dev
= to_device(dpm_suspended_list
.prev
);
782 mutex_unlock(&dpm_list_mtx
);
784 error
= device_suspend_noirq(dev
, state
);
786 mutex_lock(&dpm_list_mtx
);
788 pm_dev_err(dev
, state
, " late", error
);
792 if (!list_empty(&dev
->power
.entry
))
793 list_move(&dev
->power
.entry
, &dpm_noirq_list
);
796 mutex_unlock(&dpm_list_mtx
);
798 dpm_resume_noirq(resume_event(state
));
800 dpm_show_time(starttime
, state
, "late");
803 EXPORT_SYMBOL_GPL(dpm_suspend_noirq
);
806 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
807 * @dev: Device to suspend.
808 * @state: PM transition of the system being carried out.
809 * @cb: Suspend callback to execute.
811 static int legacy_suspend(struct device
*dev
, pm_message_t state
,
812 int (*cb
)(struct device
*dev
, pm_message_t state
))
817 calltime
= initcall_debug_start(dev
);
819 error
= cb(dev
, state
);
820 suspend_report_result(cb
, error
);
822 initcall_debug_report(dev
, calltime
, error
);
828 * device_suspend - Execute "suspend" callbacks for given device.
829 * @dev: Device to handle.
830 * @state: PM transition of the system being carried out.
831 * @async: If true, the device is being suspended asynchronously.
833 static int __device_suspend(struct device
*dev
, pm_message_t state
, bool async
)
837 dpm_wait_for_children(dev
, async
);
843 if (pm_wakeup_pending()) {
844 async_error
= -EBUSY
;
848 if (dev
->type
&& dev
->type
->pm
) {
849 pm_dev_dbg(dev
, state
, "type ");
850 error
= pm_op(dev
, dev
->type
->pm
, state
);
855 if (dev
->class->pm
) {
856 pm_dev_dbg(dev
, state
, "class ");
857 error
= pm_op(dev
, dev
->class->pm
, state
);
859 } else if (dev
->class->suspend
) {
860 pm_dev_dbg(dev
, state
, "legacy class ");
861 error
= legacy_suspend(dev
, state
, dev
->class->suspend
);
868 pm_dev_dbg(dev
, state
, "");
869 error
= pm_op(dev
, dev
->bus
->pm
, state
);
870 } else if (dev
->bus
->suspend
) {
871 pm_dev_dbg(dev
, state
, "legacy ");
872 error
= legacy_suspend(dev
, state
, dev
->bus
->suspend
);
877 if (!error
&& dev
->pwr_domain
) {
878 pm_dev_dbg(dev
, state
, "power domain ");
879 pm_op(dev
, &dev
->pwr_domain
->ops
, state
);
884 complete_all(&dev
->power
.completion
);
892 static void async_suspend(void *data
, async_cookie_t cookie
)
894 struct device
*dev
= (struct device
*)data
;
897 error
= __device_suspend(dev
, pm_transition
, true);
899 pm_dev_err(dev
, pm_transition
, " async", error
);
904 static int device_suspend(struct device
*dev
)
906 INIT_COMPLETION(dev
->power
.completion
);
908 if (pm_async_enabled
&& dev
->power
.async_suspend
) {
910 async_schedule(async_suspend
, dev
);
914 return __device_suspend(dev
, pm_transition
, false);
918 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
919 * @state: PM transition of the system being carried out.
921 static int dpm_suspend(pm_message_t state
)
923 ktime_t starttime
= ktime_get();
926 mutex_lock(&dpm_list_mtx
);
927 pm_transition
= state
;
929 while (!list_empty(&dpm_prepared_list
)) {
930 struct device
*dev
= to_device(dpm_prepared_list
.prev
);
933 mutex_unlock(&dpm_list_mtx
);
935 error
= device_suspend(dev
);
937 mutex_lock(&dpm_list_mtx
);
939 pm_dev_err(dev
, state
, "", error
);
943 if (!list_empty(&dev
->power
.entry
))
944 list_move(&dev
->power
.entry
, &dpm_suspended_list
);
949 mutex_unlock(&dpm_list_mtx
);
950 async_synchronize_full();
954 dpm_show_time(starttime
, state
, NULL
);
959 * device_prepare - Prepare a device for system power transition.
960 * @dev: Device to handle.
961 * @state: PM transition of the system being carried out.
963 * Execute the ->prepare() callback(s) for given device. No new children of the
964 * device may be registered after this function has returned.
966 static int device_prepare(struct device
*dev
, pm_message_t state
)
972 if (dev
->type
&& dev
->type
->pm
) {
973 pm_dev_dbg(dev
, state
, "preparing type ");
974 if (dev
->type
->pm
->prepare
)
975 error
= dev
->type
->pm
->prepare(dev
);
976 suspend_report_result(dev
->type
->pm
->prepare
, error
);
979 } else if (dev
->class && dev
->class->pm
) {
980 pm_dev_dbg(dev
, state
, "preparing class ");
981 if (dev
->class->pm
->prepare
)
982 error
= dev
->class->pm
->prepare(dev
);
983 suspend_report_result(dev
->class->pm
->prepare
, error
);
986 } else if (dev
->bus
&& dev
->bus
->pm
) {
987 pm_dev_dbg(dev
, state
, "preparing ");
988 if (dev
->bus
->pm
->prepare
)
989 error
= dev
->bus
->pm
->prepare(dev
);
990 suspend_report_result(dev
->bus
->pm
->prepare
, error
);
995 if (dev
->pwr_domain
&& dev
->pwr_domain
->ops
.prepare
) {
996 pm_dev_dbg(dev
, state
, "preparing power domain ");
997 dev
->pwr_domain
->ops
.prepare(dev
);
1007 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1008 * @state: PM transition of the system being carried out.
1010 * Execute the ->prepare() callback(s) for all devices.
1012 static int dpm_prepare(pm_message_t state
)
1016 mutex_lock(&dpm_list_mtx
);
1017 while (!list_empty(&dpm_list
)) {
1018 struct device
*dev
= to_device(dpm_list
.next
);
1021 mutex_unlock(&dpm_list_mtx
);
1023 pm_runtime_get_noresume(dev
);
1024 if (pm_runtime_barrier(dev
) && device_may_wakeup(dev
))
1025 pm_wakeup_event(dev
, 0);
1027 pm_runtime_put_sync(dev
);
1028 error
= pm_wakeup_pending() ?
1029 -EBUSY
: device_prepare(dev
, state
);
1031 mutex_lock(&dpm_list_mtx
);
1033 if (error
== -EAGAIN
) {
1038 printk(KERN_INFO
"PM: Device %s not prepared "
1039 "for power transition: code %d\n",
1040 dev_name(dev
), error
);
1044 dev
->power
.is_prepared
= true;
1045 if (!list_empty(&dev
->power
.entry
))
1046 list_move_tail(&dev
->power
.entry
, &dpm_prepared_list
);
1049 mutex_unlock(&dpm_list_mtx
);
1054 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1055 * @state: PM transition of the system being carried out.
1057 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1058 * callbacks for them.
1060 int dpm_suspend_start(pm_message_t state
)
1065 error
= dpm_prepare(state
);
1067 error
= dpm_suspend(state
);
1070 EXPORT_SYMBOL_GPL(dpm_suspend_start
);
1072 void __suspend_report_result(const char *function
, void *fn
, int ret
)
1075 printk(KERN_ERR
"%s(): %pF returns %d\n", function
, fn
, ret
);
1077 EXPORT_SYMBOL_GPL(__suspend_report_result
);
1080 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1081 * @dev: Device to wait for.
1082 * @subordinate: Device that needs to wait for @dev.
1084 int device_pm_wait_for_dev(struct device
*subordinate
, struct device
*dev
)
1086 dpm_wait(dev
, subordinate
->power
.async_suspend
);
1089 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev
);