2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will intialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/async.h>
34 * The entries in the dpm_list list are in a depth first order, simply
35 * because children are guaranteed to be discovered after parents, and
36 * are inserted at the back of the list on discovery.
38 * Since device_pm_add() may be called with a device lock held,
39 * we must never try to acquire a device lock while holding
45 static DEFINE_MUTEX(dpm_list_mtx
);
46 static pm_message_t pm_transition
;
49 * Set once the preparation of devices for a PM transition has started, reset
50 * before starting to resume devices. Protected by dpm_list_mtx.
52 static bool transition_started
;
55 * device_pm_init - Initialize the PM-related part of a device object.
56 * @dev: Device object being initialized.
58 void device_pm_init(struct device
*dev
)
60 dev
->power
.status
= DPM_ON
;
61 init_completion(&dev
->power
.completion
);
62 complete_all(&dev
->power
.completion
);
63 dev
->power
.wakeup_count
= 0;
68 * device_pm_lock - Lock the list of active devices used by the PM core.
70 void device_pm_lock(void)
72 mutex_lock(&dpm_list_mtx
);
76 * device_pm_unlock - Unlock the list of active devices used by the PM core.
78 void device_pm_unlock(void)
80 mutex_unlock(&dpm_list_mtx
);
84 * device_pm_add - Add a device to the PM core's list of active devices.
85 * @dev: Device to add to the list.
87 void device_pm_add(struct device
*dev
)
89 pr_debug("PM: Adding info for %s:%s\n",
90 dev
->bus
? dev
->bus
->name
: "No Bus",
91 kobject_name(&dev
->kobj
));
92 mutex_lock(&dpm_list_mtx
);
94 if (dev
->parent
->power
.status
>= DPM_SUSPENDING
)
95 dev_warn(dev
, "parent %s should not be sleeping\n",
96 dev_name(dev
->parent
));
97 } else if (transition_started
) {
99 * We refuse to register parentless devices while a PM
100 * transition is in progress in order to avoid leaving them
101 * unhandled down the road
103 dev_WARN(dev
, "Parentless device registered during a PM transaction\n");
106 list_add_tail(&dev
->power
.entry
, &dpm_list
);
107 mutex_unlock(&dpm_list_mtx
);
111 * device_pm_remove - Remove a device from the PM core's list of active devices.
112 * @dev: Device to be removed from the list.
114 void device_pm_remove(struct device
*dev
)
116 pr_debug("PM: Removing info for %s:%s\n",
117 dev
->bus
? dev
->bus
->name
: "No Bus",
118 kobject_name(&dev
->kobj
));
119 complete_all(&dev
->power
.completion
);
120 mutex_lock(&dpm_list_mtx
);
121 list_del_init(&dev
->power
.entry
);
122 mutex_unlock(&dpm_list_mtx
);
123 pm_runtime_remove(dev
);
127 * device_pm_move_before - Move device in the PM core's list of active devices.
128 * @deva: Device to move in dpm_list.
129 * @devb: Device @deva should come before.
131 void device_pm_move_before(struct device
*deva
, struct device
*devb
)
133 pr_debug("PM: Moving %s:%s before %s:%s\n",
134 deva
->bus
? deva
->bus
->name
: "No Bus",
135 kobject_name(&deva
->kobj
),
136 devb
->bus
? devb
->bus
->name
: "No Bus",
137 kobject_name(&devb
->kobj
));
138 /* Delete deva from dpm_list and reinsert before devb. */
139 list_move_tail(&deva
->power
.entry
, &devb
->power
.entry
);
143 * device_pm_move_after - Move device in the PM core's list of active devices.
144 * @deva: Device to move in dpm_list.
145 * @devb: Device @deva should come after.
147 void device_pm_move_after(struct device
*deva
, struct device
*devb
)
149 pr_debug("PM: Moving %s:%s after %s:%s\n",
150 deva
->bus
? deva
->bus
->name
: "No Bus",
151 kobject_name(&deva
->kobj
),
152 devb
->bus
? devb
->bus
->name
: "No Bus",
153 kobject_name(&devb
->kobj
));
154 /* Delete deva from dpm_list and reinsert after devb. */
155 list_move(&deva
->power
.entry
, &devb
->power
.entry
);
159 * device_pm_move_last - Move device to end of the PM core's list of devices.
160 * @dev: Device to move in dpm_list.
162 void device_pm_move_last(struct device
*dev
)
164 pr_debug("PM: Moving %s:%s to end of list\n",
165 dev
->bus
? dev
->bus
->name
: "No Bus",
166 kobject_name(&dev
->kobj
));
167 list_move_tail(&dev
->power
.entry
, &dpm_list
);
170 static ktime_t
initcall_debug_start(struct device
*dev
)
172 ktime_t calltime
= ktime_set(0, 0);
174 if (initcall_debug
) {
175 pr_info("calling %s+ @ %i\n",
176 dev_name(dev
), task_pid_nr(current
));
177 calltime
= ktime_get();
183 static void initcall_debug_report(struct device
*dev
, ktime_t calltime
,
186 ktime_t delta
, rettime
;
188 if (initcall_debug
) {
189 rettime
= ktime_get();
190 delta
= ktime_sub(rettime
, calltime
);
191 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev
),
192 error
, (unsigned long long)ktime_to_ns(delta
) >> 10);
197 * dpm_wait - Wait for a PM operation to complete.
198 * @dev: Device to wait for.
199 * @async: If unset, wait only if the device's power.async_suspend flag is set.
201 static void dpm_wait(struct device
*dev
, bool async
)
206 if (async
|| (pm_async_enabled
&& dev
->power
.async_suspend
))
207 wait_for_completion(&dev
->power
.completion
);
210 static int dpm_wait_fn(struct device
*dev
, void *async_ptr
)
212 dpm_wait(dev
, *((bool *)async_ptr
));
216 static void dpm_wait_for_children(struct device
*dev
, bool async
)
218 device_for_each_child(dev
, &async
, dpm_wait_fn
);
222 * pm_op - Execute the PM operation appropriate for given PM event.
223 * @dev: Device to handle.
224 * @ops: PM operations to choose from.
225 * @state: PM transition of the system being carried out.
227 static int pm_op(struct device
*dev
,
228 const struct dev_pm_ops
*ops
,
234 calltime
= initcall_debug_start(dev
);
236 switch (state
.event
) {
237 #ifdef CONFIG_SUSPEND
238 case PM_EVENT_SUSPEND
:
240 error
= ops
->suspend(dev
);
241 suspend_report_result(ops
->suspend
, error
);
244 case PM_EVENT_RESUME
:
246 error
= ops
->resume(dev
);
247 suspend_report_result(ops
->resume
, error
);
250 #endif /* CONFIG_SUSPEND */
251 #ifdef CONFIG_HIBERNATION
252 case PM_EVENT_FREEZE
:
253 case PM_EVENT_QUIESCE
:
255 error
= ops
->freeze(dev
);
256 suspend_report_result(ops
->freeze
, error
);
259 case PM_EVENT_HIBERNATE
:
261 error
= ops
->poweroff(dev
);
262 suspend_report_result(ops
->poweroff
, error
);
266 case PM_EVENT_RECOVER
:
268 error
= ops
->thaw(dev
);
269 suspend_report_result(ops
->thaw
, error
);
272 case PM_EVENT_RESTORE
:
274 error
= ops
->restore(dev
);
275 suspend_report_result(ops
->restore
, error
);
278 #endif /* CONFIG_HIBERNATION */
283 initcall_debug_report(dev
, calltime
, error
);
289 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
290 * @dev: Device to handle.
291 * @ops: PM operations to choose from.
292 * @state: PM transition of the system being carried out.
294 * The driver of @dev will not receive interrupts while this function is being
297 static int pm_noirq_op(struct device
*dev
,
298 const struct dev_pm_ops
*ops
,
302 ktime_t calltime
, delta
, rettime
;
304 if (initcall_debug
) {
305 pr_info("calling %s+ @ %i, parent: %s\n",
306 dev_name(dev
), task_pid_nr(current
),
307 dev
->parent
? dev_name(dev
->parent
) : "none");
308 calltime
= ktime_get();
311 switch (state
.event
) {
312 #ifdef CONFIG_SUSPEND
313 case PM_EVENT_SUSPEND
:
314 if (ops
->suspend_noirq
) {
315 error
= ops
->suspend_noirq(dev
);
316 suspend_report_result(ops
->suspend_noirq
, error
);
319 case PM_EVENT_RESUME
:
320 if (ops
->resume_noirq
) {
321 error
= ops
->resume_noirq(dev
);
322 suspend_report_result(ops
->resume_noirq
, error
);
325 #endif /* CONFIG_SUSPEND */
326 #ifdef CONFIG_HIBERNATION
327 case PM_EVENT_FREEZE
:
328 case PM_EVENT_QUIESCE
:
329 if (ops
->freeze_noirq
) {
330 error
= ops
->freeze_noirq(dev
);
331 suspend_report_result(ops
->freeze_noirq
, error
);
334 case PM_EVENT_HIBERNATE
:
335 if (ops
->poweroff_noirq
) {
336 error
= ops
->poweroff_noirq(dev
);
337 suspend_report_result(ops
->poweroff_noirq
, error
);
341 case PM_EVENT_RECOVER
:
342 if (ops
->thaw_noirq
) {
343 error
= ops
->thaw_noirq(dev
);
344 suspend_report_result(ops
->thaw_noirq
, error
);
347 case PM_EVENT_RESTORE
:
348 if (ops
->restore_noirq
) {
349 error
= ops
->restore_noirq(dev
);
350 suspend_report_result(ops
->restore_noirq
, error
);
353 #endif /* CONFIG_HIBERNATION */
358 if (initcall_debug
) {
359 rettime
= ktime_get();
360 delta
= ktime_sub(rettime
, calltime
);
361 printk("initcall %s_i+ returned %d after %Ld usecs\n",
362 dev_name(dev
), error
,
363 (unsigned long long)ktime_to_ns(delta
) >> 10);
369 static char *pm_verb(int event
)
372 case PM_EVENT_SUSPEND
:
374 case PM_EVENT_RESUME
:
376 case PM_EVENT_FREEZE
:
378 case PM_EVENT_QUIESCE
:
380 case PM_EVENT_HIBERNATE
:
384 case PM_EVENT_RESTORE
:
386 case PM_EVENT_RECOVER
:
389 return "(unknown PM event)";
393 static void pm_dev_dbg(struct device
*dev
, pm_message_t state
, char *info
)
395 dev_dbg(dev
, "%s%s%s\n", info
, pm_verb(state
.event
),
396 ((state
.event
& PM_EVENT_SLEEP
) && device_may_wakeup(dev
)) ?
397 ", may wakeup" : "");
400 static void pm_dev_err(struct device
*dev
, pm_message_t state
, char *info
,
403 printk(KERN_ERR
"PM: Device %s failed to %s%s: error %d\n",
404 kobject_name(&dev
->kobj
), pm_verb(state
.event
), info
, error
);
407 static void dpm_show_time(ktime_t starttime
, pm_message_t state
, char *info
)
413 calltime
= ktime_get();
414 usecs64
= ktime_to_ns(ktime_sub(calltime
, starttime
));
415 do_div(usecs64
, NSEC_PER_USEC
);
419 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
420 info
?: "", info
? " " : "", pm_verb(state
.event
),
421 usecs
/ USEC_PER_MSEC
, usecs
% USEC_PER_MSEC
);
424 /*------------------------- Resume routines -------------------------*/
427 * device_resume_noirq - Execute an "early resume" callback for given device.
428 * @dev: Device to handle.
429 * @state: PM transition of the system being carried out.
431 * The driver of @dev will not receive interrupts while this function is being
434 static int device_resume_noirq(struct device
*dev
, pm_message_t state
)
441 if (dev
->bus
&& dev
->bus
->pm
) {
442 pm_dev_dbg(dev
, state
, "EARLY ");
443 error
= pm_noirq_op(dev
, dev
->bus
->pm
, state
);
448 if (dev
->type
&& dev
->type
->pm
) {
449 pm_dev_dbg(dev
, state
, "EARLY type ");
450 error
= pm_noirq_op(dev
, dev
->type
->pm
, state
);
455 if (dev
->class && dev
->class->pm
) {
456 pm_dev_dbg(dev
, state
, "EARLY class ");
457 error
= pm_noirq_op(dev
, dev
->class->pm
, state
);
466 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
467 * @state: PM transition of the system being carried out.
469 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
470 * enable device drivers to receive interrupts.
472 void dpm_resume_noirq(pm_message_t state
)
475 ktime_t starttime
= ktime_get();
477 mutex_lock(&dpm_list_mtx
);
478 transition_started
= false;
479 list_for_each_entry(dev
, &dpm_list
, power
.entry
)
480 if (dev
->power
.status
> DPM_OFF
) {
483 dev
->power
.status
= DPM_OFF
;
484 error
= device_resume_noirq(dev
, state
);
486 pm_dev_err(dev
, state
, " early", error
);
488 mutex_unlock(&dpm_list_mtx
);
489 dpm_show_time(starttime
, state
, "early");
490 resume_device_irqs();
492 EXPORT_SYMBOL_GPL(dpm_resume_noirq
);
495 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
496 * @dev: Device to resume.
497 * @cb: Resume callback to execute.
499 static int legacy_resume(struct device
*dev
, int (*cb
)(struct device
*dev
))
504 calltime
= initcall_debug_start(dev
);
507 suspend_report_result(cb
, error
);
509 initcall_debug_report(dev
, calltime
, error
);
515 * device_resume - Execute "resume" callbacks for given device.
516 * @dev: Device to handle.
517 * @state: PM transition of the system being carried out.
518 * @async: If true, the device is being resumed asynchronously.
520 static int device_resume(struct device
*dev
, pm_message_t state
, bool async
)
527 dpm_wait(dev
->parent
, async
);
530 dev
->power
.status
= DPM_RESUMING
;
534 pm_dev_dbg(dev
, state
, "");
535 error
= pm_op(dev
, dev
->bus
->pm
, state
);
536 } else if (dev
->bus
->resume
) {
537 pm_dev_dbg(dev
, state
, "legacy ");
538 error
= legacy_resume(dev
, dev
->bus
->resume
);
546 pm_dev_dbg(dev
, state
, "type ");
547 error
= pm_op(dev
, dev
->type
->pm
, state
);
554 if (dev
->class->pm
) {
555 pm_dev_dbg(dev
, state
, "class ");
556 error
= pm_op(dev
, dev
->class->pm
, state
);
557 } else if (dev
->class->resume
) {
558 pm_dev_dbg(dev
, state
, "legacy class ");
559 error
= legacy_resume(dev
, dev
->class->resume
);
564 complete_all(&dev
->power
.completion
);
570 static void async_resume(void *data
, async_cookie_t cookie
)
572 struct device
*dev
= (struct device
*)data
;
575 error
= device_resume(dev
, pm_transition
, true);
577 pm_dev_err(dev
, pm_transition
, " async", error
);
581 static bool is_async(struct device
*dev
)
583 return dev
->power
.async_suspend
&& pm_async_enabled
584 && !pm_trace_is_enabled();
588 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
589 * @state: PM transition of the system being carried out.
591 * Execute the appropriate "resume" callback for all devices whose status
592 * indicates that they are suspended.
594 static void dpm_resume(pm_message_t state
)
596 struct list_head list
;
598 ktime_t starttime
= ktime_get();
600 INIT_LIST_HEAD(&list
);
601 mutex_lock(&dpm_list_mtx
);
602 pm_transition
= state
;
604 list_for_each_entry(dev
, &dpm_list
, power
.entry
) {
605 if (dev
->power
.status
< DPM_OFF
)
608 INIT_COMPLETION(dev
->power
.completion
);
611 async_schedule(async_resume
, dev
);
615 while (!list_empty(&dpm_list
)) {
616 dev
= to_device(dpm_list
.next
);
618 if (dev
->power
.status
>= DPM_OFF
&& !is_async(dev
)) {
621 mutex_unlock(&dpm_list_mtx
);
623 error
= device_resume(dev
, state
, false);
625 mutex_lock(&dpm_list_mtx
);
627 pm_dev_err(dev
, state
, "", error
);
628 } else if (dev
->power
.status
== DPM_SUSPENDING
) {
629 /* Allow new children of the device to be registered */
630 dev
->power
.status
= DPM_RESUMING
;
632 if (!list_empty(&dev
->power
.entry
))
633 list_move_tail(&dev
->power
.entry
, &list
);
636 list_splice(&list
, &dpm_list
);
637 mutex_unlock(&dpm_list_mtx
);
638 async_synchronize_full();
639 dpm_show_time(starttime
, state
, NULL
);
643 * device_complete - Complete a PM transition for given device.
644 * @dev: Device to handle.
645 * @state: PM transition of the system being carried out.
647 static void device_complete(struct device
*dev
, pm_message_t state
)
651 if (dev
->class && dev
->class->pm
&& dev
->class->pm
->complete
) {
652 pm_dev_dbg(dev
, state
, "completing class ");
653 dev
->class->pm
->complete(dev
);
656 if (dev
->type
&& dev
->type
->pm
&& dev
->type
->pm
->complete
) {
657 pm_dev_dbg(dev
, state
, "completing type ");
658 dev
->type
->pm
->complete(dev
);
661 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->complete
) {
662 pm_dev_dbg(dev
, state
, "completing ");
663 dev
->bus
->pm
->complete(dev
);
670 * dpm_complete - Complete a PM transition for all non-sysdev devices.
671 * @state: PM transition of the system being carried out.
673 * Execute the ->complete() callbacks for all devices whose PM status is not
674 * DPM_ON (this allows new devices to be registered).
676 static void dpm_complete(pm_message_t state
)
678 struct list_head list
;
680 INIT_LIST_HEAD(&list
);
681 mutex_lock(&dpm_list_mtx
);
682 transition_started
= false;
683 while (!list_empty(&dpm_list
)) {
684 struct device
*dev
= to_device(dpm_list
.prev
);
687 if (dev
->power
.status
> DPM_ON
) {
688 dev
->power
.status
= DPM_ON
;
689 mutex_unlock(&dpm_list_mtx
);
691 device_complete(dev
, state
);
692 pm_runtime_put_sync(dev
);
694 mutex_lock(&dpm_list_mtx
);
696 if (!list_empty(&dev
->power
.entry
))
697 list_move(&dev
->power
.entry
, &list
);
700 list_splice(&list
, &dpm_list
);
701 mutex_unlock(&dpm_list_mtx
);
705 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
706 * @state: PM transition of the system being carried out.
708 * Execute "resume" callbacks for all devices and complete the PM transition of
711 void dpm_resume_end(pm_message_t state
)
717 EXPORT_SYMBOL_GPL(dpm_resume_end
);
720 /*------------------------- Suspend routines -------------------------*/
723 * resume_event - Return a "resume" message for given "suspend" sleep state.
724 * @sleep_state: PM message representing a sleep state.
726 * Return a PM message representing the resume event corresponding to given
729 static pm_message_t
resume_event(pm_message_t sleep_state
)
731 switch (sleep_state
.event
) {
732 case PM_EVENT_SUSPEND
:
734 case PM_EVENT_FREEZE
:
735 case PM_EVENT_QUIESCE
:
737 case PM_EVENT_HIBERNATE
:
744 * device_suspend_noirq - Execute a "late suspend" callback for given device.
745 * @dev: Device to handle.
746 * @state: PM transition of the system being carried out.
748 * The driver of @dev will not receive interrupts while this function is being
751 static int device_suspend_noirq(struct device
*dev
, pm_message_t state
)
755 if (dev
->class && dev
->class->pm
) {
756 pm_dev_dbg(dev
, state
, "LATE class ");
757 error
= pm_noirq_op(dev
, dev
->class->pm
, state
);
762 if (dev
->type
&& dev
->type
->pm
) {
763 pm_dev_dbg(dev
, state
, "LATE type ");
764 error
= pm_noirq_op(dev
, dev
->type
->pm
, state
);
769 if (dev
->bus
&& dev
->bus
->pm
) {
770 pm_dev_dbg(dev
, state
, "LATE ");
771 error
= pm_noirq_op(dev
, dev
->bus
->pm
, state
);
779 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
780 * @state: PM transition of the system being carried out.
782 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
783 * handlers for all non-sysdev devices.
785 int dpm_suspend_noirq(pm_message_t state
)
788 ktime_t starttime
= ktime_get();
791 suspend_device_irqs();
792 mutex_lock(&dpm_list_mtx
);
793 list_for_each_entry_reverse(dev
, &dpm_list
, power
.entry
) {
794 error
= device_suspend_noirq(dev
, state
);
796 pm_dev_err(dev
, state
, " late", error
);
799 dev
->power
.status
= DPM_OFF_IRQ
;
801 mutex_unlock(&dpm_list_mtx
);
803 dpm_resume_noirq(resume_event(state
));
805 dpm_show_time(starttime
, state
, "late");
808 EXPORT_SYMBOL_GPL(dpm_suspend_noirq
);
811 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
812 * @dev: Device to suspend.
813 * @state: PM transition of the system being carried out.
814 * @cb: Suspend callback to execute.
816 static int legacy_suspend(struct device
*dev
, pm_message_t state
,
817 int (*cb
)(struct device
*dev
, pm_message_t state
))
822 calltime
= initcall_debug_start(dev
);
824 error
= cb(dev
, state
);
825 suspend_report_result(cb
, error
);
827 initcall_debug_report(dev
, calltime
, error
);
832 static int async_error
;
835 * device_suspend - Execute "suspend" callbacks for given device.
836 * @dev: Device to handle.
837 * @state: PM transition of the system being carried out.
838 * @async: If true, the device is being suspended asynchronously.
840 static int __device_suspend(struct device
*dev
, pm_message_t state
, bool async
)
844 dpm_wait_for_children(dev
, async
);
851 if (dev
->class->pm
) {
852 pm_dev_dbg(dev
, state
, "class ");
853 error
= pm_op(dev
, dev
->class->pm
, state
);
854 } else if (dev
->class->suspend
) {
855 pm_dev_dbg(dev
, state
, "legacy class ");
856 error
= legacy_suspend(dev
, state
, dev
->class->suspend
);
864 pm_dev_dbg(dev
, state
, "type ");
865 error
= pm_op(dev
, dev
->type
->pm
, state
);
873 pm_dev_dbg(dev
, state
, "");
874 error
= pm_op(dev
, dev
->bus
->pm
, state
);
875 } else if (dev
->bus
->suspend
) {
876 pm_dev_dbg(dev
, state
, "legacy ");
877 error
= legacy_suspend(dev
, state
, dev
->bus
->suspend
);
882 dev
->power
.status
= DPM_OFF
;
886 complete_all(&dev
->power
.completion
);
891 static void async_suspend(void *data
, async_cookie_t cookie
)
893 struct device
*dev
= (struct device
*)data
;
896 error
= __device_suspend(dev
, pm_transition
, true);
898 pm_dev_err(dev
, pm_transition
, " async", error
);
905 static int device_suspend(struct device
*dev
)
907 INIT_COMPLETION(dev
->power
.completion
);
909 if (pm_async_enabled
&& dev
->power
.async_suspend
) {
911 async_schedule(async_suspend
, dev
);
915 return __device_suspend(dev
, pm_transition
, false);
919 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
920 * @state: PM transition of the system being carried out.
922 static int dpm_suspend(pm_message_t state
)
924 struct list_head list
;
925 ktime_t starttime
= ktime_get();
928 INIT_LIST_HEAD(&list
);
929 mutex_lock(&dpm_list_mtx
);
930 pm_transition
= state
;
932 while (!list_empty(&dpm_list
)) {
933 struct device
*dev
= to_device(dpm_list
.prev
);
936 mutex_unlock(&dpm_list_mtx
);
938 error
= device_suspend(dev
);
940 mutex_lock(&dpm_list_mtx
);
942 pm_dev_err(dev
, state
, "", error
);
946 if (!list_empty(&dev
->power
.entry
))
947 list_move(&dev
->power
.entry
, &list
);
952 list_splice(&list
, dpm_list
.prev
);
953 mutex_unlock(&dpm_list_mtx
);
954 async_synchronize_full();
958 dpm_show_time(starttime
, state
, NULL
);
963 * device_prepare - Prepare a device for system power transition.
964 * @dev: Device to handle.
965 * @state: PM transition of the system being carried out.
967 * Execute the ->prepare() callback(s) for given device. No new children of the
968 * device may be registered after this function has returned.
970 static int device_prepare(struct device
*dev
, pm_message_t state
)
976 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->prepare
) {
977 pm_dev_dbg(dev
, state
, "preparing ");
978 error
= dev
->bus
->pm
->prepare(dev
);
979 suspend_report_result(dev
->bus
->pm
->prepare
, error
);
984 if (dev
->type
&& dev
->type
->pm
&& dev
->type
->pm
->prepare
) {
985 pm_dev_dbg(dev
, state
, "preparing type ");
986 error
= dev
->type
->pm
->prepare(dev
);
987 suspend_report_result(dev
->type
->pm
->prepare
, error
);
992 if (dev
->class && dev
->class->pm
&& dev
->class->pm
->prepare
) {
993 pm_dev_dbg(dev
, state
, "preparing class ");
994 error
= dev
->class->pm
->prepare(dev
);
995 suspend_report_result(dev
->class->pm
->prepare
, error
);
1004 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1005 * @state: PM transition of the system being carried out.
1007 * Execute the ->prepare() callback(s) for all devices.
1009 static int dpm_prepare(pm_message_t state
)
1011 struct list_head list
;
1014 INIT_LIST_HEAD(&list
);
1015 mutex_lock(&dpm_list_mtx
);
1016 transition_started
= true;
1017 while (!list_empty(&dpm_list
)) {
1018 struct device
*dev
= to_device(dpm_list
.next
);
1021 dev
->power
.status
= DPM_PREPARING
;
1022 mutex_unlock(&dpm_list_mtx
);
1024 pm_runtime_get_noresume(dev
);
1025 if (pm_runtime_barrier(dev
) && device_may_wakeup(dev
)) {
1026 /* Wake-up requested during system sleep transition. */
1027 pm_runtime_put_sync(dev
);
1030 error
= device_prepare(dev
, state
);
1033 mutex_lock(&dpm_list_mtx
);
1035 dev
->power
.status
= DPM_ON
;
1036 if (error
== -EAGAIN
) {
1041 printk(KERN_ERR
"PM: Failed to prepare device %s "
1042 "for power transition: error %d\n",
1043 kobject_name(&dev
->kobj
), error
);
1047 dev
->power
.status
= DPM_SUSPENDING
;
1048 if (!list_empty(&dev
->power
.entry
))
1049 list_move_tail(&dev
->power
.entry
, &list
);
1052 list_splice(&list
, &dpm_list
);
1053 mutex_unlock(&dpm_list_mtx
);
1058 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1059 * @state: PM transition of the system being carried out.
1061 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1062 * callbacks for them.
1064 int dpm_suspend_start(pm_message_t state
)
1069 error
= dpm_prepare(state
);
1071 error
= dpm_suspend(state
);
1074 EXPORT_SYMBOL_GPL(dpm_suspend_start
);
1076 void __suspend_report_result(const char *function
, void *fn
, int ret
)
1079 printk(KERN_ERR
"%s(): %pF returns %d\n", function
, fn
, ret
);
1081 EXPORT_SYMBOL_GPL(__suspend_report_result
);
1084 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1085 * @dev: Device to wait for.
1086 * @subordinate: Device that needs to wait for @dev.
1088 void device_pm_wait_for_dev(struct device
*subordinate
, struct device
*dev
)
1090 dpm_wait(dev
, subordinate
->power
.async_suspend
);
1092 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev
);