Merge branch 'omap/headers4' into next/dt
[linux-2.6.git] / arch / arm / mach-omap2 / pm34xx.c
blob11f9669eb7ede4becc1aab56173f0927a17c9e03
1 /*
2 * OMAP3 Power Management Routines
4 * Copyright (C) 2006-2008 Nokia Corporation
5 * Tony Lindgren <tony@atomide.com>
6 * Jouni Hogander
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 * Rajendra Nayak <rnayak@ti.com>
11 * Copyright (C) 2005 Texas Instruments, Inc.
12 * Richard Woodruff <r-woodruff2@ti.com>
14 * Based on pm.c for omap1
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
21 #include <linux/pm.h>
22 #include <linux/suspend.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/err.h>
27 #include <linux/gpio.h>
28 #include <linux/clk.h>
29 #include <linux/delay.h>
30 #include <linux/slab.h>
31 #include <linux/platform_data/gpio-omap.h>
33 #include <trace/events/power.h>
35 #include <asm/fncpy.h>
36 #include <asm/suspend.h>
37 #include <asm/system_misc.h>
39 #include "clockdomain.h"
40 #include "powerdomain.h"
41 #include <plat/prcm.h>
42 #include <plat-omap/dma-omap.h>
44 #include "soc.h"
45 #include "common.h"
46 #include "cm2xxx_3xxx.h"
47 #include "cm-regbits-34xx.h"
48 #include "gpmc.h"
49 #include "prm-regbits-34xx.h"
51 #include "prm2xxx_3xxx.h"
52 #include "pm.h"
53 #include "sdrc.h"
54 #include "sram.h"
55 #include "control.h"
57 /* pm34xx errata defined in pm.h */
58 u16 pm34xx_errata;
60 struct power_state {
61 struct powerdomain *pwrdm;
62 u32 next_state;
63 #ifdef CONFIG_SUSPEND
64 u32 saved_state;
65 #endif
66 struct list_head node;
69 static LIST_HEAD(pwrst_list);
71 static int (*_omap_save_secure_sram)(u32 *addr);
72 void (*omap3_do_wfi_sram)(void);
74 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
75 static struct powerdomain *core_pwrdm, *per_pwrdm;
77 static void omap3_core_save_context(void)
79 omap3_ctrl_save_padconf();
82 * Force write last pad into memory, as this can fail in some
83 * cases according to errata 1.157, 1.185
85 omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
86 OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
88 /* Save the Interrupt controller context */
89 omap_intc_save_context();
90 /* Save the GPMC context */
91 omap3_gpmc_save_context();
92 /* Save the system control module context, padconf already save above*/
93 omap3_control_save_context();
94 omap_dma_global_context_save();
97 static void omap3_core_restore_context(void)
99 /* Restore the control module context, padconf restored by h/w */
100 omap3_control_restore_context();
101 /* Restore the GPMC context */
102 omap3_gpmc_restore_context();
103 /* Restore the interrupt controller context */
104 omap_intc_restore_context();
105 omap_dma_global_context_restore();
109 * FIXME: This function should be called before entering off-mode after
110 * OMAP3 secure services have been accessed. Currently it is only called
111 * once during boot sequence, but this works as we are not using secure
112 * services.
114 static void omap3_save_secure_ram_context(void)
116 u32 ret;
117 int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
119 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
121 * MPU next state must be set to POWER_ON temporarily,
122 * otherwise the WFI executed inside the ROM code
123 * will hang the system.
125 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
126 ret = _omap_save_secure_sram((u32 *)
127 __pa(omap3_secure_ram_storage));
128 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
129 /* Following is for error tracking, it should not happen */
130 if (ret) {
131 pr_err("save_secure_sram() returns %08x\n", ret);
132 while (1)
139 * PRCM Interrupt Handler Helper Function
141 * The purpose of this function is to clear any wake-up events latched
142 * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
143 * may occur whilst attempting to clear a PM_WKST_x register and thus
144 * set another bit in this register. A while loop is used to ensure
145 * that any peripheral wake-up events occurring while attempting to
146 * clear the PM_WKST_x are detected and cleared.
148 static int prcm_clear_mod_irqs(s16 module, u8 regs, u32 ignore_bits)
150 u32 wkst, fclk, iclk, clken;
151 u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
152 u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
153 u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
154 u16 grpsel_off = (regs == 3) ?
155 OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
156 int c = 0;
158 wkst = omap2_prm_read_mod_reg(module, wkst_off);
159 wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
160 wkst &= ~ignore_bits;
161 if (wkst) {
162 iclk = omap2_cm_read_mod_reg(module, iclk_off);
163 fclk = omap2_cm_read_mod_reg(module, fclk_off);
164 while (wkst) {
165 clken = wkst;
166 omap2_cm_set_mod_reg_bits(clken, module, iclk_off);
168 * For USBHOST, we don't know whether HOST1 or
169 * HOST2 woke us up, so enable both f-clocks
171 if (module == OMAP3430ES2_USBHOST_MOD)
172 clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
173 omap2_cm_set_mod_reg_bits(clken, module, fclk_off);
174 omap2_prm_write_mod_reg(wkst, module, wkst_off);
175 wkst = omap2_prm_read_mod_reg(module, wkst_off);
176 wkst &= ~ignore_bits;
177 c++;
179 omap2_cm_write_mod_reg(iclk, module, iclk_off);
180 omap2_cm_write_mod_reg(fclk, module, fclk_off);
183 return c;
186 static irqreturn_t _prcm_int_handle_io(int irq, void *unused)
188 int c;
190 c = prcm_clear_mod_irqs(WKUP_MOD, 1,
191 ~(OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK));
193 return c ? IRQ_HANDLED : IRQ_NONE;
196 static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused)
198 int c;
201 * Clear all except ST_IO and ST_IO_CHAIN for wkup module,
202 * these are handled in a separate handler to avoid acking
203 * IO events before parsing in mux code
205 c = prcm_clear_mod_irqs(WKUP_MOD, 1,
206 OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK);
207 c += prcm_clear_mod_irqs(CORE_MOD, 1, 0);
208 c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1, 0);
209 if (omap_rev() > OMAP3430_REV_ES1_0) {
210 c += prcm_clear_mod_irqs(CORE_MOD, 3, 0);
211 c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, 0);
214 return c ? IRQ_HANDLED : IRQ_NONE;
217 static void omap34xx_save_context(u32 *save)
219 u32 val;
221 /* Read Auxiliary Control Register */
222 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val));
223 *save++ = 1;
224 *save++ = val;
226 /* Read L2 AUX ctrl register */
227 asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
228 *save++ = 1;
229 *save++ = val;
232 static int omap34xx_do_sram_idle(unsigned long save_state)
234 omap34xx_cpu_suspend(save_state);
235 return 0;
238 void omap_sram_idle(void)
240 /* Variable to tell what needs to be saved and restored
241 * in omap_sram_idle*/
242 /* save_state = 0 => Nothing to save and restored */
243 /* save_state = 1 => Only L1 and logic lost */
244 /* save_state = 2 => Only L2 lost */
245 /* save_state = 3 => L1, L2 and logic lost */
246 int save_state = 0;
247 int mpu_next_state = PWRDM_POWER_ON;
248 int per_next_state = PWRDM_POWER_ON;
249 int core_next_state = PWRDM_POWER_ON;
250 int per_going_off;
251 int core_prev_state;
252 u32 sdrc_pwr = 0;
254 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
255 switch (mpu_next_state) {
256 case PWRDM_POWER_ON:
257 case PWRDM_POWER_RET:
258 /* No need to save context */
259 save_state = 0;
260 break;
261 case PWRDM_POWER_OFF:
262 save_state = 3;
263 break;
264 default:
265 /* Invalid state */
266 pr_err("Invalid mpu state in sram_idle\n");
267 return;
270 /* NEON control */
271 if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
272 pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
274 /* Enable IO-PAD and IO-CHAIN wakeups */
275 per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
276 core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
278 pwrdm_pre_transition(NULL);
280 /* PER */
281 if (per_next_state < PWRDM_POWER_ON) {
282 per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
283 omap2_gpio_prepare_for_idle(per_going_off);
286 /* CORE */
287 if (core_next_state < PWRDM_POWER_ON) {
288 if (core_next_state == PWRDM_POWER_OFF) {
289 omap3_core_save_context();
290 omap3_cm_save_context();
294 omap3_intc_prepare_idle();
297 * On EMU/HS devices ROM code restores a SRDC value
298 * from scratchpad which has automatic self refresh on timeout
299 * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
300 * Hence store/restore the SDRC_POWER register here.
302 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
303 (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
304 omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
305 core_next_state == PWRDM_POWER_OFF)
306 sdrc_pwr = sdrc_read_reg(SDRC_POWER);
309 * omap3_arm_context is the location where some ARM context
310 * get saved. The rest is placed on the stack, and restored
311 * from there before resuming.
313 if (save_state)
314 omap34xx_save_context(omap3_arm_context);
315 if (save_state == 1 || save_state == 3)
316 cpu_suspend(save_state, omap34xx_do_sram_idle);
317 else
318 omap34xx_do_sram_idle(save_state);
320 /* Restore normal SDRC POWER settings */
321 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
322 (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
323 omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
324 core_next_state == PWRDM_POWER_OFF)
325 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
327 /* CORE */
328 if (core_next_state < PWRDM_POWER_ON) {
329 core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
330 if (core_prev_state == PWRDM_POWER_OFF) {
331 omap3_core_restore_context();
332 omap3_cm_restore_context();
333 omap3_sram_restore_context();
334 omap2_sms_restore_context();
336 if (core_next_state == PWRDM_POWER_OFF)
337 omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
338 OMAP3430_GR_MOD,
339 OMAP3_PRM_VOLTCTRL_OFFSET);
341 omap3_intc_resume_idle();
343 pwrdm_post_transition(NULL);
345 /* PER */
346 if (per_next_state < PWRDM_POWER_ON)
347 omap2_gpio_resume_after_idle();
350 static void omap3_pm_idle(void)
352 local_fiq_disable();
354 if (omap_irq_pending())
355 goto out;
357 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
358 trace_cpu_idle(1, smp_processor_id());
360 omap_sram_idle();
362 trace_power_end(smp_processor_id());
363 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
365 out:
366 local_fiq_enable();
369 #ifdef CONFIG_SUSPEND
370 static int omap3_pm_suspend(void)
372 struct power_state *pwrst;
373 int state, ret = 0;
375 /* Read current next_pwrsts */
376 list_for_each_entry(pwrst, &pwrst_list, node)
377 pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
378 /* Set ones wanted by suspend */
379 list_for_each_entry(pwrst, &pwrst_list, node) {
380 if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
381 goto restore;
382 if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
383 goto restore;
386 omap3_intc_suspend();
388 omap_sram_idle();
390 restore:
391 /* Restore next_pwrsts */
392 list_for_each_entry(pwrst, &pwrst_list, node) {
393 state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
394 if (state > pwrst->next_state) {
395 pr_info("Powerdomain (%s) didn't enter target state %d\n",
396 pwrst->pwrdm->name, pwrst->next_state);
397 ret = -1;
399 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
401 if (ret)
402 pr_err("Could not enter target state in pm_suspend\n");
403 else
404 pr_info("Successfully put all powerdomains to target state\n");
406 return ret;
409 #endif /* CONFIG_SUSPEND */
413 * omap3_iva_idle(): ensure IVA is in idle so it can be put into
414 * retention
416 * In cases where IVA2 is activated by bootcode, it may prevent
417 * full-chip retention or off-mode because it is not idle. This
418 * function forces the IVA2 into idle state so it can go
419 * into retention/off and thus allow full-chip retention/off.
422 static void __init omap3_iva_idle(void)
424 /* ensure IVA2 clock is disabled */
425 omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
427 /* if no clock activity, nothing else to do */
428 if (!(omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
429 OMAP3430_CLKACTIVITY_IVA2_MASK))
430 return;
432 /* Reset IVA2 */
433 omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
434 OMAP3430_RST2_IVA2_MASK |
435 OMAP3430_RST3_IVA2_MASK,
436 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
438 /* Enable IVA2 clock */
439 omap2_cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
440 OMAP3430_IVA2_MOD, CM_FCLKEN);
442 /* Set IVA2 boot mode to 'idle' */
443 omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE,
444 OMAP343X_CONTROL_IVA2_BOOTMOD);
446 /* Un-reset IVA2 */
447 omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
449 /* Disable IVA2 clock */
450 omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
452 /* Reset IVA2 */
453 omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
454 OMAP3430_RST2_IVA2_MASK |
455 OMAP3430_RST3_IVA2_MASK,
456 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
459 static void __init omap3_d2d_idle(void)
461 u16 mask, padconf;
463 /* In a stand alone OMAP3430 where there is not a stacked
464 * modem for the D2D Idle Ack and D2D MStandby must be pulled
465 * high. S CONTROL_PADCONF_SAD2D_IDLEACK and
466 * CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */
467 mask = (1 << 4) | (1 << 3); /* pull-up, enabled */
468 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY);
469 padconf |= mask;
470 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY);
472 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK);
473 padconf |= mask;
474 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
476 /* reset modem */
477 omap2_prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
478 OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
479 CORE_MOD, OMAP2_RM_RSTCTRL);
480 omap2_prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
483 static void __init prcm_setup_regs(void)
485 u32 omap3630_en_uart4_mask = cpu_is_omap3630() ?
486 OMAP3630_EN_UART4_MASK : 0;
487 u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ?
488 OMAP3630_GRPSEL_UART4_MASK : 0;
490 /* XXX This should be handled by hwmod code or SCM init code */
491 omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
494 * Enable control of expternal oscillator through
495 * sys_clkreq. In the long run clock framework should
496 * take care of this.
498 omap2_prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
499 1 << OMAP_AUTOEXTCLKMODE_SHIFT,
500 OMAP3430_GR_MOD,
501 OMAP3_PRM_CLKSRC_CTRL_OFFSET);
503 /* setup wakup source */
504 omap2_prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
505 OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
506 WKUP_MOD, PM_WKEN);
507 /* No need to write EN_IO, that is always enabled */
508 omap2_prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
509 OMAP3430_GRPSEL_GPT1_MASK |
510 OMAP3430_GRPSEL_GPT12_MASK,
511 WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
513 /* Enable PM_WKEN to support DSS LPR */
514 omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
515 OMAP3430_DSS_MOD, PM_WKEN);
517 /* Enable wakeups in PER */
518 omap2_prm_write_mod_reg(omap3630_en_uart4_mask |
519 OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
520 OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
521 OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
522 OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
523 OMAP3430_EN_MCBSP4_MASK,
524 OMAP3430_PER_MOD, PM_WKEN);
525 /* and allow them to wake up MPU */
526 omap2_prm_write_mod_reg(omap3630_grpsel_uart4_mask |
527 OMAP3430_GRPSEL_GPIO2_MASK |
528 OMAP3430_GRPSEL_GPIO3_MASK |
529 OMAP3430_GRPSEL_GPIO4_MASK |
530 OMAP3430_GRPSEL_GPIO5_MASK |
531 OMAP3430_GRPSEL_GPIO6_MASK |
532 OMAP3430_GRPSEL_UART3_MASK |
533 OMAP3430_GRPSEL_MCBSP2_MASK |
534 OMAP3430_GRPSEL_MCBSP3_MASK |
535 OMAP3430_GRPSEL_MCBSP4_MASK,
536 OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
538 /* Don't attach IVA interrupts */
539 if (omap3_has_iva()) {
540 omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
541 omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
542 omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
543 omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD,
544 OMAP3430_PM_IVAGRPSEL);
547 /* Clear any pending 'reset' flags */
548 omap2_prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
549 omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
550 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
551 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
552 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
553 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
554 omap2_prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
556 /* Clear any pending PRCM interrupts */
557 omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
559 if (omap3_has_iva())
560 omap3_iva_idle();
562 omap3_d2d_idle();
565 void omap3_pm_off_mode_enable(int enable)
567 struct power_state *pwrst;
568 u32 state;
570 if (enable)
571 state = PWRDM_POWER_OFF;
572 else
573 state = PWRDM_POWER_RET;
575 list_for_each_entry(pwrst, &pwrst_list, node) {
576 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
577 pwrst->pwrdm == core_pwrdm &&
578 state == PWRDM_POWER_OFF) {
579 pwrst->next_state = PWRDM_POWER_RET;
580 pr_warn("%s: Core OFF disabled due to errata i583\n",
581 __func__);
582 } else {
583 pwrst->next_state = state;
585 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
589 int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
591 struct power_state *pwrst;
593 list_for_each_entry(pwrst, &pwrst_list, node) {
594 if (pwrst->pwrdm == pwrdm)
595 return pwrst->next_state;
597 return -EINVAL;
600 int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
602 struct power_state *pwrst;
604 list_for_each_entry(pwrst, &pwrst_list, node) {
605 if (pwrst->pwrdm == pwrdm) {
606 pwrst->next_state = state;
607 return 0;
610 return -EINVAL;
613 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
615 struct power_state *pwrst;
617 if (!pwrdm->pwrsts)
618 return 0;
620 pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
621 if (!pwrst)
622 return -ENOMEM;
623 pwrst->pwrdm = pwrdm;
624 pwrst->next_state = PWRDM_POWER_RET;
625 list_add(&pwrst->node, &pwrst_list);
627 if (pwrdm_has_hdwr_sar(pwrdm))
628 pwrdm_enable_hdwr_sar(pwrdm);
630 return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
634 * Push functions to SRAM
636 * The minimum set of functions is pushed to SRAM for execution:
637 * - omap3_do_wfi for erratum i581 WA,
638 * - save_secure_ram_context for security extensions.
640 void omap_push_sram_idle(void)
642 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
644 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
645 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
646 save_secure_ram_context_sz);
649 static void __init pm_errata_configure(void)
651 if (cpu_is_omap3630()) {
652 pm34xx_errata |= PM_RTA_ERRATUM_i608;
653 /* Enable the l2 cache toggling in sleep logic */
654 enable_omap3630_toggle_l2_on_restore();
655 if (omap_rev() < OMAP3630_REV_ES1_2)
656 pm34xx_errata |= (PM_SDRC_WAKEUP_ERRATUM_i583 |
657 PM_PER_MEMORIES_ERRATUM_i582);
658 } else if (cpu_is_omap34xx()) {
659 pm34xx_errata |= PM_PER_MEMORIES_ERRATUM_i582;
663 int __init omap3_pm_init(void)
665 struct power_state *pwrst, *tmp;
666 struct clockdomain *neon_clkdm, *mpu_clkdm, *per_clkdm, *wkup_clkdm;
667 int ret;
669 if (!omap3_has_io_chain_ctrl())
670 pr_warning("PM: no software I/O chain control; some wakeups may be lost\n");
672 pm_errata_configure();
674 /* XXX prcm_setup_regs needs to be before enabling hw
675 * supervised mode for powerdomains */
676 prcm_setup_regs();
678 ret = request_irq(omap_prcm_event_to_irq("wkup"),
679 _prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL);
681 if (ret) {
682 pr_err("pm: Failed to request pm_wkup irq\n");
683 goto err1;
686 /* IO interrupt is shared with mux code */
687 ret = request_irq(omap_prcm_event_to_irq("io"),
688 _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
689 omap3_pm_init);
690 enable_irq(omap_prcm_event_to_irq("io"));
692 if (ret) {
693 pr_err("pm: Failed to request pm_io irq\n");
694 goto err2;
697 ret = pwrdm_for_each(pwrdms_setup, NULL);
698 if (ret) {
699 pr_err("Failed to setup powerdomains\n");
700 goto err3;
703 (void) clkdm_for_each(omap_pm_clkdms_setup, NULL);
705 mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
706 if (mpu_pwrdm == NULL) {
707 pr_err("Failed to get mpu_pwrdm\n");
708 ret = -EINVAL;
709 goto err3;
712 neon_pwrdm = pwrdm_lookup("neon_pwrdm");
713 per_pwrdm = pwrdm_lookup("per_pwrdm");
714 core_pwrdm = pwrdm_lookup("core_pwrdm");
716 neon_clkdm = clkdm_lookup("neon_clkdm");
717 mpu_clkdm = clkdm_lookup("mpu_clkdm");
718 per_clkdm = clkdm_lookup("per_clkdm");
719 wkup_clkdm = clkdm_lookup("wkup_clkdm");
721 #ifdef CONFIG_SUSPEND
722 omap_pm_suspend = omap3_pm_suspend;
723 #endif
725 arm_pm_idle = omap3_pm_idle;
726 omap3_idle_init();
729 * RTA is disabled during initialization as per erratum i608
730 * it is safer to disable RTA by the bootloader, but we would like
731 * to be doubly sure here and prevent any mishaps.
733 if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
734 omap3630_ctrl_disable_rta();
737 * The UART3/4 FIFO and the sidetone memory in McBSP2/3 are
738 * not correctly reset when the PER powerdomain comes back
739 * from OFF or OSWR when the CORE powerdomain is kept active.
740 * See OMAP36xx Erratum i582 "PER Domain reset issue after
741 * Domain-OFF/OSWR Wakeup". This wakeup dependency is not a
742 * complete workaround. The kernel must also prevent the PER
743 * powerdomain from going to OSWR/OFF while the CORE
744 * powerdomain is not going to OSWR/OFF. And if PER last
745 * power state was off while CORE last power state was ON, the
746 * UART3/4 and McBSP2/3 SIDETONE devices need to run a
747 * self-test using their loopback tests; if that fails, those
748 * devices are unusable until the PER/CORE can complete a transition
749 * from ON to OSWR/OFF and then back to ON.
751 * XXX Technically this workaround is only needed if off-mode
752 * or OSWR is enabled.
754 if (IS_PM34XX_ERRATUM(PM_PER_MEMORIES_ERRATUM_i582))
755 clkdm_add_wkdep(per_clkdm, wkup_clkdm);
757 clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
758 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
759 omap3_secure_ram_storage =
760 kmalloc(0x803F, GFP_KERNEL);
761 if (!omap3_secure_ram_storage)
762 pr_err("Memory allocation failed when allocating for secure sram context\n");
764 local_irq_disable();
765 local_fiq_disable();
767 omap_dma_global_context_save();
768 omap3_save_secure_ram_context();
769 omap_dma_global_context_restore();
771 local_irq_enable();
772 local_fiq_enable();
775 omap3_save_scratchpad_contents();
776 return ret;
778 err3:
779 list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
780 list_del(&pwrst->node);
781 kfree(pwrst);
783 free_irq(omap_prcm_event_to_irq("io"), omap3_pm_init);
784 err2:
785 free_irq(omap_prcm_event_to_irq("wkup"), NULL);
786 err1:
787 return ret;