PM / Domains: Add device stop governor function (v4)
[linux-2.6/libata-dev.git] / drivers / base / power / domain.c
blob3af9f5a71ad509d1c3c23bd5f6dbf7a5c505dedb
1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 * This file is released under the GPLv2.
7 */
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
18 #include <linux/export.h>
20 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
21 ({ \
22 type (*__routine)(struct device *__d); \
23 type __ret = (type)0; \
25 __routine = genpd->dev_ops.callback; \
26 if (__routine) { \
27 __ret = __routine(dev); \
28 } else { \
29 __routine = dev_gpd_data(dev)->ops.callback; \
30 if (__routine) \
31 __ret = __routine(dev); \
32 } \
33 __ret; \
36 static LIST_HEAD(gpd_list);
37 static DEFINE_MUTEX(gpd_list_lock);
39 #ifdef CONFIG_PM
41 struct generic_pm_domain *dev_to_genpd(struct device *dev)
43 if (IS_ERR_OR_NULL(dev->pm_domain))
44 return ERR_PTR(-EINVAL);
46 return pd_to_genpd(dev->pm_domain);
49 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
51 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
54 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
56 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
59 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
61 return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
64 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
66 return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
69 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
71 bool ret = false;
73 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
74 ret = !!atomic_dec_and_test(&genpd->sd_count);
76 return ret;
79 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
81 atomic_inc(&genpd->sd_count);
82 smp_mb__after_atomic_inc();
85 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
87 DEFINE_WAIT(wait);
89 mutex_lock(&genpd->lock);
91 * Wait for the domain to transition into either the active,
92 * or the power off state.
94 for (;;) {
95 prepare_to_wait(&genpd->status_wait_queue, &wait,
96 TASK_UNINTERRUPTIBLE);
97 if (genpd->status == GPD_STATE_ACTIVE
98 || genpd->status == GPD_STATE_POWER_OFF)
99 break;
100 mutex_unlock(&genpd->lock);
102 schedule();
104 mutex_lock(&genpd->lock);
106 finish_wait(&genpd->status_wait_queue, &wait);
109 static void genpd_release_lock(struct generic_pm_domain *genpd)
111 mutex_unlock(&genpd->lock);
114 static void genpd_set_active(struct generic_pm_domain *genpd)
116 if (genpd->resume_count == 0)
117 genpd->status = GPD_STATE_ACTIVE;
121 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
122 * @genpd: PM domain to power up.
124 * Restore power to @genpd and all of its masters so that it is possible to
125 * resume a device belonging to it.
127 int __pm_genpd_poweron(struct generic_pm_domain *genpd)
128 __releases(&genpd->lock) __acquires(&genpd->lock)
130 struct gpd_link *link;
131 DEFINE_WAIT(wait);
132 int ret = 0;
134 /* If the domain's master is being waited for, we have to wait too. */
135 for (;;) {
136 prepare_to_wait(&genpd->status_wait_queue, &wait,
137 TASK_UNINTERRUPTIBLE);
138 if (genpd->status != GPD_STATE_WAIT_MASTER)
139 break;
140 mutex_unlock(&genpd->lock);
142 schedule();
144 mutex_lock(&genpd->lock);
146 finish_wait(&genpd->status_wait_queue, &wait);
148 if (genpd->status == GPD_STATE_ACTIVE
149 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
150 return 0;
152 if (genpd->status != GPD_STATE_POWER_OFF) {
153 genpd_set_active(genpd);
154 return 0;
158 * The list is guaranteed not to change while the loop below is being
159 * executed, unless one of the masters' .power_on() callbacks fiddles
160 * with it.
162 list_for_each_entry(link, &genpd->slave_links, slave_node) {
163 genpd_sd_counter_inc(link->master);
164 genpd->status = GPD_STATE_WAIT_MASTER;
166 mutex_unlock(&genpd->lock);
168 ret = pm_genpd_poweron(link->master);
170 mutex_lock(&genpd->lock);
173 * The "wait for parent" status is guaranteed not to change
174 * while the master is powering on.
176 genpd->status = GPD_STATE_POWER_OFF;
177 wake_up_all(&genpd->status_wait_queue);
178 if (ret) {
179 genpd_sd_counter_dec(link->master);
180 goto err;
184 if (genpd->power_on) {
185 ret = genpd->power_on(genpd);
186 if (ret)
187 goto err;
190 genpd_set_active(genpd);
192 return 0;
194 err:
195 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
196 genpd_sd_counter_dec(link->master);
198 return ret;
202 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
203 * @genpd: PM domain to power up.
205 int pm_genpd_poweron(struct generic_pm_domain *genpd)
207 int ret;
209 mutex_lock(&genpd->lock);
210 ret = __pm_genpd_poweron(genpd);
211 mutex_unlock(&genpd->lock);
212 return ret;
215 #endif /* CONFIG_PM */
217 #ifdef CONFIG_PM_RUNTIME
220 * __pm_genpd_save_device - Save the pre-suspend state of a device.
221 * @pdd: Domain data of the device to save the state of.
222 * @genpd: PM domain the device belongs to.
224 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
225 struct generic_pm_domain *genpd)
226 __releases(&genpd->lock) __acquires(&genpd->lock)
228 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
229 struct device *dev = pdd->dev;
230 int ret = 0;
232 if (gpd_data->need_restore)
233 return 0;
235 mutex_unlock(&genpd->lock);
237 genpd_start_dev(genpd, dev);
238 ret = genpd_save_dev(genpd, dev);
239 genpd_stop_dev(genpd, dev);
241 mutex_lock(&genpd->lock);
243 if (!ret)
244 gpd_data->need_restore = true;
246 return ret;
250 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
251 * @pdd: Domain data of the device to restore the state of.
252 * @genpd: PM domain the device belongs to.
254 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
255 struct generic_pm_domain *genpd)
256 __releases(&genpd->lock) __acquires(&genpd->lock)
258 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
259 struct device *dev = pdd->dev;
261 if (!gpd_data->need_restore)
262 return;
264 mutex_unlock(&genpd->lock);
266 genpd_start_dev(genpd, dev);
267 genpd_restore_dev(genpd, dev);
268 genpd_stop_dev(genpd, dev);
270 mutex_lock(&genpd->lock);
272 gpd_data->need_restore = false;
276 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
277 * @genpd: PM domain to check.
279 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
280 * a "power off" operation, which means that a "power on" has occured in the
281 * meantime, or if its resume_count field is different from zero, which means
282 * that one of its devices has been resumed in the meantime.
284 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
286 return genpd->status == GPD_STATE_WAIT_MASTER
287 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
291 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
292 * @genpd: PM domait to power off.
294 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
295 * before.
297 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
299 if (!work_pending(&genpd->power_off_work))
300 queue_work(pm_wq, &genpd->power_off_work);
304 * pm_genpd_poweroff - Remove power from a given PM domain.
305 * @genpd: PM domain to power down.
307 * If all of the @genpd's devices have been suspended and all of its subdomains
308 * have been powered down, run the runtime suspend callbacks provided by all of
309 * the @genpd's devices' drivers and remove power from @genpd.
311 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
312 __releases(&genpd->lock) __acquires(&genpd->lock)
314 struct pm_domain_data *pdd;
315 struct gpd_link *link;
316 unsigned int not_suspended;
317 int ret = 0;
319 start:
321 * Do not try to power off the domain in the following situations:
322 * (1) The domain is already in the "power off" state.
323 * (2) The domain is waiting for its master to power up.
324 * (3) One of the domain's devices is being resumed right now.
325 * (4) System suspend is in progress.
327 if (genpd->status == GPD_STATE_POWER_OFF
328 || genpd->status == GPD_STATE_WAIT_MASTER
329 || genpd->resume_count > 0 || genpd->prepared_count > 0)
330 return 0;
332 if (atomic_read(&genpd->sd_count) > 0)
333 return -EBUSY;
335 not_suspended = 0;
336 list_for_each_entry(pdd, &genpd->dev_list, list_node)
337 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
338 || pdd->dev->power.irq_safe))
339 not_suspended++;
341 if (not_suspended > genpd->in_progress)
342 return -EBUSY;
344 if (genpd->poweroff_task) {
346 * Another instance of pm_genpd_poweroff() is executing
347 * callbacks, so tell it to start over and return.
349 genpd->status = GPD_STATE_REPEAT;
350 return 0;
353 if (genpd->gov && genpd->gov->power_down_ok) {
354 if (!genpd->gov->power_down_ok(&genpd->domain))
355 return -EAGAIN;
358 genpd->status = GPD_STATE_BUSY;
359 genpd->poweroff_task = current;
361 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
362 ret = atomic_read(&genpd->sd_count) == 0 ?
363 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
365 if (genpd_abort_poweroff(genpd))
366 goto out;
368 if (ret) {
369 genpd_set_active(genpd);
370 goto out;
373 if (genpd->status == GPD_STATE_REPEAT) {
374 genpd->poweroff_task = NULL;
375 goto start;
379 if (genpd->power_off) {
380 if (atomic_read(&genpd->sd_count) > 0) {
381 ret = -EBUSY;
382 goto out;
386 * If sd_count > 0 at this point, one of the subdomains hasn't
387 * managed to call pm_genpd_poweron() for the master yet after
388 * incrementing it. In that case pm_genpd_poweron() will wait
389 * for us to drop the lock, so we can call .power_off() and let
390 * the pm_genpd_poweron() restore power for us (this shouldn't
391 * happen very often).
393 ret = genpd->power_off(genpd);
394 if (ret == -EBUSY) {
395 genpd_set_active(genpd);
396 goto out;
400 genpd->status = GPD_STATE_POWER_OFF;
402 list_for_each_entry(link, &genpd->slave_links, slave_node) {
403 genpd_sd_counter_dec(link->master);
404 genpd_queue_power_off_work(link->master);
407 out:
408 genpd->poweroff_task = NULL;
409 wake_up_all(&genpd->status_wait_queue);
410 return ret;
414 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
415 * @work: Work structure used for scheduling the execution of this function.
417 static void genpd_power_off_work_fn(struct work_struct *work)
419 struct generic_pm_domain *genpd;
421 genpd = container_of(work, struct generic_pm_domain, power_off_work);
423 genpd_acquire_lock(genpd);
424 pm_genpd_poweroff(genpd);
425 genpd_release_lock(genpd);
429 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
430 * @dev: Device to suspend.
432 * Carry out a runtime suspend of a device under the assumption that its
433 * pm_domain field points to the domain member of an object of type
434 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
436 static int pm_genpd_runtime_suspend(struct device *dev)
438 struct generic_pm_domain *genpd;
439 bool (*stop_ok)(struct device *__dev);
440 int ret;
442 dev_dbg(dev, "%s()\n", __func__);
444 genpd = dev_to_genpd(dev);
445 if (IS_ERR(genpd))
446 return -EINVAL;
448 might_sleep_if(!genpd->dev_irq_safe);
450 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
451 if (stop_ok && !stop_ok(dev))
452 return -EBUSY;
454 ret = genpd_stop_dev(genpd, dev);
455 if (ret)
456 return ret;
458 pm_runtime_update_max_time_suspended(dev,
459 dev_gpd_data(dev)->td.start_latency_ns);
462 * If power.irq_safe is set, this routine will be run with interrupts
463 * off, so it can't use mutexes.
465 if (dev->power.irq_safe)
466 return 0;
468 mutex_lock(&genpd->lock);
469 genpd->in_progress++;
470 pm_genpd_poweroff(genpd);
471 genpd->in_progress--;
472 mutex_unlock(&genpd->lock);
474 return 0;
478 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
479 * @dev: Device to resume.
481 * Carry out a runtime resume of a device under the assumption that its
482 * pm_domain field points to the domain member of an object of type
483 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
485 static int pm_genpd_runtime_resume(struct device *dev)
487 struct generic_pm_domain *genpd;
488 DEFINE_WAIT(wait);
489 int ret;
491 dev_dbg(dev, "%s()\n", __func__);
493 genpd = dev_to_genpd(dev);
494 if (IS_ERR(genpd))
495 return -EINVAL;
497 might_sleep_if(!genpd->dev_irq_safe);
499 /* If power.irq_safe, the PM domain is never powered off. */
500 if (dev->power.irq_safe)
501 goto out;
503 mutex_lock(&genpd->lock);
504 ret = __pm_genpd_poweron(genpd);
505 if (ret) {
506 mutex_unlock(&genpd->lock);
507 return ret;
509 genpd->status = GPD_STATE_BUSY;
510 genpd->resume_count++;
511 for (;;) {
512 prepare_to_wait(&genpd->status_wait_queue, &wait,
513 TASK_UNINTERRUPTIBLE);
515 * If current is the powering off task, we have been called
516 * reentrantly from one of the device callbacks, so we should
517 * not wait.
519 if (!genpd->poweroff_task || genpd->poweroff_task == current)
520 break;
521 mutex_unlock(&genpd->lock);
523 schedule();
525 mutex_lock(&genpd->lock);
527 finish_wait(&genpd->status_wait_queue, &wait);
528 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
529 genpd->resume_count--;
530 genpd_set_active(genpd);
531 wake_up_all(&genpd->status_wait_queue);
532 mutex_unlock(&genpd->lock);
534 out:
535 genpd_start_dev(genpd, dev);
537 return 0;
541 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
543 void pm_genpd_poweroff_unused(void)
545 struct generic_pm_domain *genpd;
547 mutex_lock(&gpd_list_lock);
549 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
550 genpd_queue_power_off_work(genpd);
552 mutex_unlock(&gpd_list_lock);
555 #else
557 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
559 #define pm_genpd_runtime_suspend NULL
560 #define pm_genpd_runtime_resume NULL
562 #endif /* CONFIG_PM_RUNTIME */
564 #ifdef CONFIG_PM_SLEEP
566 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
567 struct device *dev)
569 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
572 static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
574 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
577 static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
579 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
582 static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
584 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
587 static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
589 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
592 static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
594 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
597 static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
599 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
602 static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
604 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
607 static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
609 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
613 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
614 * @genpd: PM domain to power off, if possible.
616 * Check if the given PM domain can be powered off (during system suspend or
617 * hibernation) and do that if so. Also, in that case propagate to its masters.
619 * This function is only called in "noirq" stages of system power transitions,
620 * so it need not acquire locks (all of the "noirq" callbacks are executed
621 * sequentially, so it is guaranteed that it will never run twice in parallel).
623 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
625 struct gpd_link *link;
627 if (genpd->status == GPD_STATE_POWER_OFF)
628 return;
630 if (genpd->suspended_count != genpd->device_count
631 || atomic_read(&genpd->sd_count) > 0)
632 return;
634 if (genpd->power_off)
635 genpd->power_off(genpd);
637 genpd->status = GPD_STATE_POWER_OFF;
639 list_for_each_entry(link, &genpd->slave_links, slave_node) {
640 genpd_sd_counter_dec(link->master);
641 pm_genpd_sync_poweroff(link->master);
646 * resume_needed - Check whether to resume a device before system suspend.
647 * @dev: Device to check.
648 * @genpd: PM domain the device belongs to.
650 * There are two cases in which a device that can wake up the system from sleep
651 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
652 * to wake up the system and it has to remain active for this purpose while the
653 * system is in the sleep state and (2) if the device is not enabled to wake up
654 * the system from sleep states and it generally doesn't generate wakeup signals
655 * by itself (those signals are generated on its behalf by other parts of the
656 * system). In the latter case it may be necessary to reconfigure the device's
657 * wakeup settings during system suspend, because it may have been set up to
658 * signal remote wakeup from the system's working state as needed by runtime PM.
659 * Return 'true' in either of the above cases.
661 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
663 bool active_wakeup;
665 if (!device_can_wakeup(dev))
666 return false;
668 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
669 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
673 * pm_genpd_prepare - Start power transition of a device in a PM domain.
674 * @dev: Device to start the transition of.
676 * Start a power transition of a device (during a system-wide power transition)
677 * under the assumption that its pm_domain field points to the domain member of
678 * an object of type struct generic_pm_domain representing a PM domain
679 * consisting of I/O devices.
681 static int pm_genpd_prepare(struct device *dev)
683 struct generic_pm_domain *genpd;
684 int ret;
686 dev_dbg(dev, "%s()\n", __func__);
688 genpd = dev_to_genpd(dev);
689 if (IS_ERR(genpd))
690 return -EINVAL;
693 * If a wakeup request is pending for the device, it should be woken up
694 * at this point and a system wakeup event should be reported if it's
695 * set up to wake up the system from sleep states.
697 pm_runtime_get_noresume(dev);
698 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
699 pm_wakeup_event(dev, 0);
701 if (pm_wakeup_pending()) {
702 pm_runtime_put_sync(dev);
703 return -EBUSY;
706 if (resume_needed(dev, genpd))
707 pm_runtime_resume(dev);
709 genpd_acquire_lock(genpd);
711 if (genpd->prepared_count++ == 0)
712 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
714 genpd_release_lock(genpd);
716 if (genpd->suspend_power_off) {
717 pm_runtime_put_noidle(dev);
718 return 0;
722 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
723 * so pm_genpd_poweron() will return immediately, but if the device
724 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
725 * to make it operational.
727 pm_runtime_resume(dev);
728 __pm_runtime_disable(dev, false);
730 ret = pm_generic_prepare(dev);
731 if (ret) {
732 mutex_lock(&genpd->lock);
734 if (--genpd->prepared_count == 0)
735 genpd->suspend_power_off = false;
737 mutex_unlock(&genpd->lock);
738 pm_runtime_enable(dev);
741 pm_runtime_put_sync(dev);
742 return ret;
746 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
747 * @dev: Device to suspend.
749 * Suspend a device under the assumption that its pm_domain field points to the
750 * domain member of an object of type struct generic_pm_domain representing
751 * a PM domain consisting of I/O devices.
753 static int pm_genpd_suspend(struct device *dev)
755 struct generic_pm_domain *genpd;
757 dev_dbg(dev, "%s()\n", __func__);
759 genpd = dev_to_genpd(dev);
760 if (IS_ERR(genpd))
761 return -EINVAL;
763 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
767 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
768 * @dev: Device to suspend.
770 * Carry out a late suspend of a device under the assumption that its
771 * pm_domain field points to the domain member of an object of type
772 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
774 static int pm_genpd_suspend_noirq(struct device *dev)
776 struct generic_pm_domain *genpd;
777 int ret;
779 dev_dbg(dev, "%s()\n", __func__);
781 genpd = dev_to_genpd(dev);
782 if (IS_ERR(genpd))
783 return -EINVAL;
785 if (genpd->suspend_power_off)
786 return 0;
788 ret = genpd_suspend_late(genpd, dev);
789 if (ret)
790 return ret;
792 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
793 return 0;
795 genpd_stop_dev(genpd, dev);
798 * Since all of the "noirq" callbacks are executed sequentially, it is
799 * guaranteed that this function will never run twice in parallel for
800 * the same PM domain, so it is not necessary to use locking here.
802 genpd->suspended_count++;
803 pm_genpd_sync_poweroff(genpd);
805 return 0;
809 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
810 * @dev: Device to resume.
812 * Carry out an early resume of a device under the assumption that its
813 * pm_domain field points to the domain member of an object of type
814 * struct generic_pm_domain representing a power domain consisting of I/O
815 * devices.
817 static int pm_genpd_resume_noirq(struct device *dev)
819 struct generic_pm_domain *genpd;
821 dev_dbg(dev, "%s()\n", __func__);
823 genpd = dev_to_genpd(dev);
824 if (IS_ERR(genpd))
825 return -EINVAL;
827 if (genpd->suspend_power_off)
828 return 0;
831 * Since all of the "noirq" callbacks are executed sequentially, it is
832 * guaranteed that this function will never run twice in parallel for
833 * the same PM domain, so it is not necessary to use locking here.
835 pm_genpd_poweron(genpd);
836 genpd->suspended_count--;
837 genpd_start_dev(genpd, dev);
839 return genpd_resume_early(genpd, dev);
843 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
844 * @dev: Device to resume.
846 * Resume a device under the assumption that its pm_domain field points to the
847 * domain member of an object of type struct generic_pm_domain representing
848 * a power domain consisting of I/O devices.
850 static int pm_genpd_resume(struct device *dev)
852 struct generic_pm_domain *genpd;
854 dev_dbg(dev, "%s()\n", __func__);
856 genpd = dev_to_genpd(dev);
857 if (IS_ERR(genpd))
858 return -EINVAL;
860 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
864 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
865 * @dev: Device to freeze.
867 * Freeze a device under the assumption that its pm_domain field points to the
868 * domain member of an object of type struct generic_pm_domain representing
869 * a power domain consisting of I/O devices.
871 static int pm_genpd_freeze(struct device *dev)
873 struct generic_pm_domain *genpd;
875 dev_dbg(dev, "%s()\n", __func__);
877 genpd = dev_to_genpd(dev);
878 if (IS_ERR(genpd))
879 return -EINVAL;
881 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
885 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
886 * @dev: Device to freeze.
888 * Carry out a late freeze of a device under the assumption that its
889 * pm_domain field points to the domain member of an object of type
890 * struct generic_pm_domain representing a power domain consisting of I/O
891 * devices.
893 static int pm_genpd_freeze_noirq(struct device *dev)
895 struct generic_pm_domain *genpd;
896 int ret;
898 dev_dbg(dev, "%s()\n", __func__);
900 genpd = dev_to_genpd(dev);
901 if (IS_ERR(genpd))
902 return -EINVAL;
904 if (genpd->suspend_power_off)
905 return 0;
907 ret = genpd_freeze_late(genpd, dev);
908 if (ret)
909 return ret;
911 genpd_stop_dev(genpd, dev);
913 return 0;
917 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
918 * @dev: Device to thaw.
920 * Carry out an early thaw of a device under the assumption that its
921 * pm_domain field points to the domain member of an object of type
922 * struct generic_pm_domain representing a power domain consisting of I/O
923 * devices.
925 static int pm_genpd_thaw_noirq(struct device *dev)
927 struct generic_pm_domain *genpd;
929 dev_dbg(dev, "%s()\n", __func__);
931 genpd = dev_to_genpd(dev);
932 if (IS_ERR(genpd))
933 return -EINVAL;
935 if (genpd->suspend_power_off)
936 return 0;
938 genpd_start_dev(genpd, dev);
940 return genpd_thaw_early(genpd, dev);
944 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
945 * @dev: Device to thaw.
947 * Thaw a device under the assumption that its pm_domain field points to the
948 * domain member of an object of type struct generic_pm_domain representing
949 * a power domain consisting of I/O devices.
951 static int pm_genpd_thaw(struct device *dev)
953 struct generic_pm_domain *genpd;
955 dev_dbg(dev, "%s()\n", __func__);
957 genpd = dev_to_genpd(dev);
958 if (IS_ERR(genpd))
959 return -EINVAL;
961 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
965 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
966 * @dev: Device to resume.
968 * Carry out an early restore of a device under the assumption that its
969 * pm_domain field points to the domain member of an object of type
970 * struct generic_pm_domain representing a power domain consisting of I/O
971 * devices.
973 static int pm_genpd_restore_noirq(struct device *dev)
975 struct generic_pm_domain *genpd;
977 dev_dbg(dev, "%s()\n", __func__);
979 genpd = dev_to_genpd(dev);
980 if (IS_ERR(genpd))
981 return -EINVAL;
984 * Since all of the "noirq" callbacks are executed sequentially, it is
985 * guaranteed that this function will never run twice in parallel for
986 * the same PM domain, so it is not necessary to use locking here.
988 genpd->status = GPD_STATE_POWER_OFF;
989 if (genpd->suspend_power_off) {
991 * The boot kernel might put the domain into the power on state,
992 * so make sure it really is powered off.
994 if (genpd->power_off)
995 genpd->power_off(genpd);
996 return 0;
999 pm_genpd_poweron(genpd);
1000 genpd->suspended_count--;
1001 genpd_start_dev(genpd, dev);
1003 return genpd_resume_early(genpd, dev);
1007 * pm_genpd_complete - Complete power transition of a device in a power domain.
1008 * @dev: Device to complete the transition of.
1010 * Complete a power transition of a device (during a system-wide power
1011 * transition) under the assumption that its pm_domain field points to the
1012 * domain member of an object of type struct generic_pm_domain representing
1013 * a power domain consisting of I/O devices.
1015 static void pm_genpd_complete(struct device *dev)
1017 struct generic_pm_domain *genpd;
1018 bool run_complete;
1020 dev_dbg(dev, "%s()\n", __func__);
1022 genpd = dev_to_genpd(dev);
1023 if (IS_ERR(genpd))
1024 return;
1026 mutex_lock(&genpd->lock);
1028 run_complete = !genpd->suspend_power_off;
1029 if (--genpd->prepared_count == 0)
1030 genpd->suspend_power_off = false;
1032 mutex_unlock(&genpd->lock);
1034 if (run_complete) {
1035 pm_generic_complete(dev);
1036 pm_runtime_set_active(dev);
1037 pm_runtime_enable(dev);
1038 pm_runtime_idle(dev);
1042 #else
1044 #define pm_genpd_prepare NULL
1045 #define pm_genpd_suspend NULL
1046 #define pm_genpd_suspend_noirq NULL
1047 #define pm_genpd_resume_noirq NULL
1048 #define pm_genpd_resume NULL
1049 #define pm_genpd_freeze NULL
1050 #define pm_genpd_freeze_noirq NULL
1051 #define pm_genpd_thaw_noirq NULL
1052 #define pm_genpd_thaw NULL
1053 #define pm_genpd_restore_noirq NULL
1054 #define pm_genpd_complete NULL
1056 #endif /* CONFIG_PM_SLEEP */
1059 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1060 * @genpd: PM domain to add the device to.
1061 * @dev: Device to be added.
1062 * @td: Set of PM QoS timing parameters to attach to the device.
1064 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1065 struct gpd_timing_data *td)
1067 struct generic_pm_domain_data *gpd_data;
1068 struct pm_domain_data *pdd;
1069 int ret = 0;
1071 dev_dbg(dev, "%s()\n", __func__);
1073 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1074 return -EINVAL;
1076 genpd_acquire_lock(genpd);
1078 if (genpd->status == GPD_STATE_POWER_OFF) {
1079 ret = -EINVAL;
1080 goto out;
1083 if (genpd->prepared_count > 0) {
1084 ret = -EAGAIN;
1085 goto out;
1088 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1089 if (pdd->dev == dev) {
1090 ret = -EINVAL;
1091 goto out;
1094 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1095 if (!gpd_data) {
1096 ret = -ENOMEM;
1097 goto out;
1100 genpd->device_count++;
1102 dev->pm_domain = &genpd->domain;
1103 dev_pm_get_subsys_data(dev);
1104 dev->power.subsys_data->domain_data = &gpd_data->base;
1105 gpd_data->base.dev = dev;
1106 gpd_data->need_restore = false;
1107 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1108 if (td)
1109 gpd_data->td = *td;
1111 out:
1112 genpd_release_lock(genpd);
1114 return ret;
1118 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1119 * @genpd: PM domain to remove the device from.
1120 * @dev: Device to be removed.
1122 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1123 struct device *dev)
1125 struct pm_domain_data *pdd;
1126 int ret = -EINVAL;
1128 dev_dbg(dev, "%s()\n", __func__);
1130 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1131 return -EINVAL;
1133 genpd_acquire_lock(genpd);
1135 if (genpd->prepared_count > 0) {
1136 ret = -EAGAIN;
1137 goto out;
1140 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
1141 if (pdd->dev != dev)
1142 continue;
1144 list_del_init(&pdd->list_node);
1145 pdd->dev = NULL;
1146 dev_pm_put_subsys_data(dev);
1147 dev->pm_domain = NULL;
1148 kfree(to_gpd_data(pdd));
1150 genpd->device_count--;
1152 ret = 0;
1153 break;
1156 out:
1157 genpd_release_lock(genpd);
1159 return ret;
1163 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1164 * @genpd: Master PM domain to add the subdomain to.
1165 * @subdomain: Subdomain to be added.
1167 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1168 struct generic_pm_domain *subdomain)
1170 struct gpd_link *link;
1171 int ret = 0;
1173 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1174 return -EINVAL;
1176 start:
1177 genpd_acquire_lock(genpd);
1178 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1180 if (subdomain->status != GPD_STATE_POWER_OFF
1181 && subdomain->status != GPD_STATE_ACTIVE) {
1182 mutex_unlock(&subdomain->lock);
1183 genpd_release_lock(genpd);
1184 goto start;
1187 if (genpd->status == GPD_STATE_POWER_OFF
1188 && subdomain->status != GPD_STATE_POWER_OFF) {
1189 ret = -EINVAL;
1190 goto out;
1193 list_for_each_entry(link, &genpd->slave_links, slave_node) {
1194 if (link->slave == subdomain && link->master == genpd) {
1195 ret = -EINVAL;
1196 goto out;
1200 link = kzalloc(sizeof(*link), GFP_KERNEL);
1201 if (!link) {
1202 ret = -ENOMEM;
1203 goto out;
1205 link->master = genpd;
1206 list_add_tail(&link->master_node, &genpd->master_links);
1207 link->slave = subdomain;
1208 list_add_tail(&link->slave_node, &subdomain->slave_links);
1209 if (subdomain->status != GPD_STATE_POWER_OFF)
1210 genpd_sd_counter_inc(genpd);
1212 out:
1213 mutex_unlock(&subdomain->lock);
1214 genpd_release_lock(genpd);
1216 return ret;
1220 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1221 * @genpd: Master PM domain to remove the subdomain from.
1222 * @subdomain: Subdomain to be removed.
1224 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1225 struct generic_pm_domain *subdomain)
1227 struct gpd_link *link;
1228 int ret = -EINVAL;
1230 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1231 return -EINVAL;
1233 start:
1234 genpd_acquire_lock(genpd);
1236 list_for_each_entry(link, &genpd->master_links, master_node) {
1237 if (link->slave != subdomain)
1238 continue;
1240 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1242 if (subdomain->status != GPD_STATE_POWER_OFF
1243 && subdomain->status != GPD_STATE_ACTIVE) {
1244 mutex_unlock(&subdomain->lock);
1245 genpd_release_lock(genpd);
1246 goto start;
1249 list_del(&link->master_node);
1250 list_del(&link->slave_node);
1251 kfree(link);
1252 if (subdomain->status != GPD_STATE_POWER_OFF)
1253 genpd_sd_counter_dec(genpd);
1255 mutex_unlock(&subdomain->lock);
1257 ret = 0;
1258 break;
1261 genpd_release_lock(genpd);
1263 return ret;
1267 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1268 * @dev: Device to add the callbacks to.
1269 * @ops: Set of callbacks to add.
1270 * @td: Timing data to add to the device along with the callbacks (optional).
1272 int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1273 struct gpd_timing_data *td)
1275 struct pm_domain_data *pdd;
1276 int ret = 0;
1278 if (!(dev && dev->power.subsys_data && ops))
1279 return -EINVAL;
1281 pm_runtime_disable(dev);
1282 device_pm_lock();
1284 pdd = dev->power.subsys_data->domain_data;
1285 if (pdd) {
1286 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1288 gpd_data->ops = *ops;
1289 if (td)
1290 gpd_data->td = *td;
1291 } else {
1292 ret = -EINVAL;
1295 device_pm_unlock();
1296 pm_runtime_enable(dev);
1298 return ret;
1300 EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1303 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1304 * @dev: Device to remove the callbacks from.
1305 * @clear_td: If set, clear the device's timing data too.
1307 int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1309 struct pm_domain_data *pdd;
1310 int ret = 0;
1312 if (!(dev && dev->power.subsys_data))
1313 return -EINVAL;
1315 pm_runtime_disable(dev);
1316 device_pm_lock();
1318 pdd = dev->power.subsys_data->domain_data;
1319 if (pdd) {
1320 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1322 gpd_data->ops = (struct gpd_dev_ops){ 0 };
1323 if (clear_td)
1324 gpd_data->td = (struct gpd_timing_data){ 0 };
1325 } else {
1326 ret = -EINVAL;
1329 device_pm_unlock();
1330 pm_runtime_enable(dev);
1332 return ret;
1334 EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1336 /* Default device callbacks for generic PM domains. */
1339 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1340 * @dev: Device to handle.
1342 static int pm_genpd_default_save_state(struct device *dev)
1344 int (*cb)(struct device *__dev);
1345 struct device_driver *drv = dev->driver;
1347 cb = dev_gpd_data(dev)->ops.save_state;
1348 if (cb)
1349 return cb(dev);
1351 if (drv && drv->pm && drv->pm->runtime_suspend)
1352 return drv->pm->runtime_suspend(dev);
1354 return 0;
1358 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1359 * @dev: Device to handle.
1361 static int pm_genpd_default_restore_state(struct device *dev)
1363 int (*cb)(struct device *__dev);
1364 struct device_driver *drv = dev->driver;
1366 cb = dev_gpd_data(dev)->ops.restore_state;
1367 if (cb)
1368 return cb(dev);
1370 if (drv && drv->pm && drv->pm->runtime_resume)
1371 return drv->pm->runtime_resume(dev);
1373 return 0;
1377 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1378 * @dev: Device to handle.
1380 static int pm_genpd_default_suspend(struct device *dev)
1382 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1384 return cb ? cb(dev) : pm_generic_suspend(dev);
1388 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1389 * @dev: Device to handle.
1391 static int pm_genpd_default_suspend_late(struct device *dev)
1393 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1395 return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
1399 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1400 * @dev: Device to handle.
1402 static int pm_genpd_default_resume_early(struct device *dev)
1404 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1406 return cb ? cb(dev) : pm_generic_resume_noirq(dev);
1410 * pm_genpd_default_resume - Default "device resume" for PM domians.
1411 * @dev: Device to handle.
1413 static int pm_genpd_default_resume(struct device *dev)
1415 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1417 return cb ? cb(dev) : pm_generic_resume(dev);
1421 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1422 * @dev: Device to handle.
1424 static int pm_genpd_default_freeze(struct device *dev)
1426 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1428 return cb ? cb(dev) : pm_generic_freeze(dev);
1432 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1433 * @dev: Device to handle.
1435 static int pm_genpd_default_freeze_late(struct device *dev)
1437 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1439 return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
1443 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1444 * @dev: Device to handle.
1446 static int pm_genpd_default_thaw_early(struct device *dev)
1448 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1450 return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
1454 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1455 * @dev: Device to handle.
1457 static int pm_genpd_default_thaw(struct device *dev)
1459 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1461 return cb ? cb(dev) : pm_generic_thaw(dev);
1465 * pm_genpd_init - Initialize a generic I/O PM domain object.
1466 * @genpd: PM domain object to initialize.
1467 * @gov: PM domain governor to associate with the domain (may be NULL).
1468 * @is_off: Initial value of the domain's power_is_off field.
1470 void pm_genpd_init(struct generic_pm_domain *genpd,
1471 struct dev_power_governor *gov, bool is_off)
1473 if (IS_ERR_OR_NULL(genpd))
1474 return;
1476 INIT_LIST_HEAD(&genpd->master_links);
1477 INIT_LIST_HEAD(&genpd->slave_links);
1478 INIT_LIST_HEAD(&genpd->dev_list);
1479 mutex_init(&genpd->lock);
1480 genpd->gov = gov;
1481 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1482 genpd->in_progress = 0;
1483 atomic_set(&genpd->sd_count, 0);
1484 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1485 init_waitqueue_head(&genpd->status_wait_queue);
1486 genpd->poweroff_task = NULL;
1487 genpd->resume_count = 0;
1488 genpd->device_count = 0;
1489 genpd->suspended_count = 0;
1490 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1491 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1492 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1493 genpd->domain.ops.prepare = pm_genpd_prepare;
1494 genpd->domain.ops.suspend = pm_genpd_suspend;
1495 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1496 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1497 genpd->domain.ops.resume = pm_genpd_resume;
1498 genpd->domain.ops.freeze = pm_genpd_freeze;
1499 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1500 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1501 genpd->domain.ops.thaw = pm_genpd_thaw;
1502 genpd->domain.ops.poweroff = pm_genpd_suspend;
1503 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1504 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1505 genpd->domain.ops.restore = pm_genpd_resume;
1506 genpd->domain.ops.complete = pm_genpd_complete;
1507 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1508 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1509 genpd->dev_ops.freeze = pm_genpd_default_suspend;
1510 genpd->dev_ops.freeze_late = pm_genpd_default_suspend_late;
1511 genpd->dev_ops.thaw_early = pm_genpd_default_resume_early;
1512 genpd->dev_ops.thaw = pm_genpd_default_resume;
1513 genpd->dev_ops.freeze = pm_genpd_default_freeze;
1514 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
1515 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
1516 genpd->dev_ops.thaw = pm_genpd_default_thaw;
1517 mutex_lock(&gpd_list_lock);
1518 list_add(&genpd->gpd_list_node, &gpd_list);
1519 mutex_unlock(&gpd_list_lock);