hwmon: (sht15) fix bad error code
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / mfd / db8500-prcmu.c
blob02a15d7cb3b08c4af6a1889a54b38259fb963a52
1 /*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
5 * License Terms: GNU General Public License v2
6 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
7 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
8 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
10 * U8500 PRCM Unit interface driver
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/delay.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/spinlock.h>
19 #include <linux/io.h>
20 #include <linux/slab.h>
21 #include <linux/mutex.h>
22 #include <linux/completion.h>
23 #include <linux/irq.h>
24 #include <linux/jiffies.h>
25 #include <linux/bitops.h>
26 #include <linux/fs.h>
27 #include <linux/platform_device.h>
28 #include <linux/uaccess.h>
29 #include <linux/mfd/core.h>
30 #include <linux/mfd/db8500-prcmu.h>
31 #include <linux/regulator/db8500-prcmu.h>
32 #include <linux/regulator/machine.h>
33 #include <mach/hardware.h>
34 #include <mach/irqs.h>
35 #include <mach/db8500-regs.h>
36 #include <mach/id.h>
37 #include "db8500-prcmu-regs.h"
39 /* Offset for the firmware version within the TCPM */
40 #define PRCMU_FW_VERSION_OFFSET 0xA4
42 /* PRCMU project numbers, defined by PRCMU FW */
43 #define PRCMU_PROJECT_ID_8500V1_0 1
44 #define PRCMU_PROJECT_ID_8500V2_0 2
45 #define PRCMU_PROJECT_ID_8400V2_0 3
47 /* Index of different voltages to be used when accessing AVSData */
48 #define PRCM_AVS_BASE 0x2FC
49 #define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0)
50 #define PRCM_AVS_VBB_MAX_OPP (PRCM_AVS_BASE + 0x1)
51 #define PRCM_AVS_VBB_100_OPP (PRCM_AVS_BASE + 0x2)
52 #define PRCM_AVS_VBB_50_OPP (PRCM_AVS_BASE + 0x3)
53 #define PRCM_AVS_VARM_MAX_OPP (PRCM_AVS_BASE + 0x4)
54 #define PRCM_AVS_VARM_100_OPP (PRCM_AVS_BASE + 0x5)
55 #define PRCM_AVS_VARM_50_OPP (PRCM_AVS_BASE + 0x6)
56 #define PRCM_AVS_VARM_RET (PRCM_AVS_BASE + 0x7)
57 #define PRCM_AVS_VAPE_100_OPP (PRCM_AVS_BASE + 0x8)
58 #define PRCM_AVS_VAPE_50_OPP (PRCM_AVS_BASE + 0x9)
59 #define PRCM_AVS_VMOD_100_OPP (PRCM_AVS_BASE + 0xA)
60 #define PRCM_AVS_VMOD_50_OPP (PRCM_AVS_BASE + 0xB)
61 #define PRCM_AVS_VSAFE (PRCM_AVS_BASE + 0xC)
63 #define PRCM_AVS_VOLTAGE 0
64 #define PRCM_AVS_VOLTAGE_MASK 0x3f
65 #define PRCM_AVS_ISSLOWSTARTUP 6
66 #define PRCM_AVS_ISSLOWSTARTUP_MASK (1 << PRCM_AVS_ISSLOWSTARTUP)
67 #define PRCM_AVS_ISMODEENABLE 7
68 #define PRCM_AVS_ISMODEENABLE_MASK (1 << PRCM_AVS_ISMODEENABLE)
70 #define PRCM_BOOT_STATUS 0xFFF
71 #define PRCM_ROMCODE_A2P 0xFFE
72 #define PRCM_ROMCODE_P2A 0xFFD
73 #define PRCM_XP70_CUR_PWR_STATE 0xFFC /* 4 BYTES */
75 #define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */
77 #define _PRCM_MBOX_HEADER 0xFE8 /* 16 bytes */
78 #define PRCM_MBOX_HEADER_REQ_MB0 (_PRCM_MBOX_HEADER + 0x0)
79 #define PRCM_MBOX_HEADER_REQ_MB1 (_PRCM_MBOX_HEADER + 0x1)
80 #define PRCM_MBOX_HEADER_REQ_MB2 (_PRCM_MBOX_HEADER + 0x2)
81 #define PRCM_MBOX_HEADER_REQ_MB3 (_PRCM_MBOX_HEADER + 0x3)
82 #define PRCM_MBOX_HEADER_REQ_MB4 (_PRCM_MBOX_HEADER + 0x4)
83 #define PRCM_MBOX_HEADER_REQ_MB5 (_PRCM_MBOX_HEADER + 0x5)
84 #define PRCM_MBOX_HEADER_ACK_MB0 (_PRCM_MBOX_HEADER + 0x8)
86 /* Req Mailboxes */
87 #define PRCM_REQ_MB0 0xFDC /* 12 bytes */
88 #define PRCM_REQ_MB1 0xFD0 /* 12 bytes */
89 #define PRCM_REQ_MB2 0xFC0 /* 16 bytes */
90 #define PRCM_REQ_MB3 0xE4C /* 372 bytes */
91 #define PRCM_REQ_MB4 0xE48 /* 4 bytes */
92 #define PRCM_REQ_MB5 0xE44 /* 4 bytes */
94 /* Ack Mailboxes */
95 #define PRCM_ACK_MB0 0xE08 /* 52 bytes */
96 #define PRCM_ACK_MB1 0xE04 /* 4 bytes */
97 #define PRCM_ACK_MB2 0xE00 /* 4 bytes */
98 #define PRCM_ACK_MB3 0xDFC /* 4 bytes */
99 #define PRCM_ACK_MB4 0xDF8 /* 4 bytes */
100 #define PRCM_ACK_MB5 0xDF4 /* 4 bytes */
102 /* Mailbox 0 headers */
103 #define MB0H_POWER_STATE_TRANS 0
104 #define MB0H_CONFIG_WAKEUPS_EXE 1
105 #define MB0H_READ_WAKEUP_ACK 3
106 #define MB0H_CONFIG_WAKEUPS_SLEEP 4
108 #define MB0H_WAKEUP_EXE 2
109 #define MB0H_WAKEUP_SLEEP 5
111 /* Mailbox 0 REQs */
112 #define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0)
113 #define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x1)
114 #define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x2)
115 #define PRCM_REQ_MB0_DO_NOT_WFI (PRCM_REQ_MB0 + 0x3)
116 #define PRCM_REQ_MB0_WAKEUP_8500 (PRCM_REQ_MB0 + 0x4)
117 #define PRCM_REQ_MB0_WAKEUP_4500 (PRCM_REQ_MB0 + 0x8)
119 /* Mailbox 0 ACKs */
120 #define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0)
121 #define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1)
122 #define PRCM_ACK_MB0_WAKEUP_0_8500 (PRCM_ACK_MB0 + 0x4)
123 #define PRCM_ACK_MB0_WAKEUP_0_4500 (PRCM_ACK_MB0 + 0x8)
124 #define PRCM_ACK_MB0_WAKEUP_1_8500 (PRCM_ACK_MB0 + 0x1C)
125 #define PRCM_ACK_MB0_WAKEUP_1_4500 (PRCM_ACK_MB0 + 0x20)
126 #define PRCM_ACK_MB0_EVENT_4500_NUMBERS 20
128 /* Mailbox 1 headers */
129 #define MB1H_ARM_APE_OPP 0x0
130 #define MB1H_RESET_MODEM 0x2
131 #define MB1H_REQUEST_APE_OPP_100_VOLT 0x3
132 #define MB1H_RELEASE_APE_OPP_100_VOLT 0x4
133 #define MB1H_RELEASE_USB_WAKEUP 0x5
135 /* Mailbox 1 Requests */
136 #define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0)
137 #define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1)
138 #define PRCM_REQ_MB1_APE_OPP_100_RESTORE (PRCM_REQ_MB1 + 0x4)
139 #define PRCM_REQ_MB1_ARM_OPP_100_RESTORE (PRCM_REQ_MB1 + 0x8)
141 /* Mailbox 1 ACKs */
142 #define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0)
143 #define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1)
144 #define PRCM_ACK_MB1_APE_VOLTAGE_STATUS (PRCM_ACK_MB1 + 0x2)
145 #define PRCM_ACK_MB1_DVFS_STATUS (PRCM_ACK_MB1 + 0x3)
147 /* Mailbox 2 headers */
148 #define MB2H_DPS 0x0
149 #define MB2H_AUTO_PWR 0x1
151 /* Mailbox 2 REQs */
152 #define PRCM_REQ_MB2_SVA_MMDSP (PRCM_REQ_MB2 + 0x0)
153 #define PRCM_REQ_MB2_SVA_PIPE (PRCM_REQ_MB2 + 0x1)
154 #define PRCM_REQ_MB2_SIA_MMDSP (PRCM_REQ_MB2 + 0x2)
155 #define PRCM_REQ_MB2_SIA_PIPE (PRCM_REQ_MB2 + 0x3)
156 #define PRCM_REQ_MB2_SGA (PRCM_REQ_MB2 + 0x4)
157 #define PRCM_REQ_MB2_B2R2_MCDE (PRCM_REQ_MB2 + 0x5)
158 #define PRCM_REQ_MB2_ESRAM12 (PRCM_REQ_MB2 + 0x6)
159 #define PRCM_REQ_MB2_ESRAM34 (PRCM_REQ_MB2 + 0x7)
160 #define PRCM_REQ_MB2_AUTO_PM_SLEEP (PRCM_REQ_MB2 + 0x8)
161 #define PRCM_REQ_MB2_AUTO_PM_IDLE (PRCM_REQ_MB2 + 0xC)
163 /* Mailbox 2 ACKs */
164 #define PRCM_ACK_MB2_DPS_STATUS (PRCM_ACK_MB2 + 0x0)
165 #define HWACC_PWR_ST_OK 0xFE
167 /* Mailbox 3 headers */
168 #define MB3H_ANC 0x0
169 #define MB3H_SIDETONE 0x1
170 #define MB3H_SYSCLK 0xE
172 /* Mailbox 3 Requests */
173 #define PRCM_REQ_MB3_ANC_FIR_COEFF (PRCM_REQ_MB3 + 0x0)
174 #define PRCM_REQ_MB3_ANC_IIR_COEFF (PRCM_REQ_MB3 + 0x20)
175 #define PRCM_REQ_MB3_ANC_SHIFTER (PRCM_REQ_MB3 + 0x60)
176 #define PRCM_REQ_MB3_ANC_WARP (PRCM_REQ_MB3 + 0x64)
177 #define PRCM_REQ_MB3_SIDETONE_FIR_GAIN (PRCM_REQ_MB3 + 0x68)
178 #define PRCM_REQ_MB3_SIDETONE_FIR_COEFF (PRCM_REQ_MB3 + 0x6C)
179 #define PRCM_REQ_MB3_SYSCLK_MGT (PRCM_REQ_MB3 + 0x16C)
181 /* Mailbox 4 headers */
182 #define MB4H_DDR_INIT 0x0
183 #define MB4H_MEM_ST 0x1
184 #define MB4H_HOTDOG 0x12
185 #define MB4H_HOTMON 0x13
186 #define MB4H_HOT_PERIOD 0x14
188 /* Mailbox 4 Requests */
189 #define PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE (PRCM_REQ_MB4 + 0x0)
190 #define PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE (PRCM_REQ_MB4 + 0x1)
191 #define PRCM_REQ_MB4_ESRAM0_ST (PRCM_REQ_MB4 + 0x3)
192 #define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 0x0)
193 #define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 0x0)
194 #define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 0x1)
195 #define PRCM_REQ_MB4_HOTMON_CONFIG (PRCM_REQ_MB4 + 0x2)
196 #define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 0x0)
197 #define HOTMON_CONFIG_LOW BIT(0)
198 #define HOTMON_CONFIG_HIGH BIT(1)
200 /* Mailbox 5 Requests */
201 #define PRCM_REQ_MB5_I2C_SLAVE_OP (PRCM_REQ_MB5 + 0x0)
202 #define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1)
203 #define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2)
204 #define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3)
205 #define PRCMU_I2C_WRITE(slave) \
206 (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0))
207 #define PRCMU_I2C_READ(slave) \
208 (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0))
209 #define PRCMU_I2C_STOP_EN BIT(3)
211 /* Mailbox 5 ACKs */
212 #define PRCM_ACK_MB5_I2C_STATUS (PRCM_ACK_MB5 + 0x1)
213 #define PRCM_ACK_MB5_I2C_VAL (PRCM_ACK_MB5 + 0x3)
214 #define I2C_WR_OK 0x1
215 #define I2C_RD_OK 0x2
217 #define NUM_MB 8
218 #define MBOX_BIT BIT
219 #define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1)
222 * Wakeups/IRQs
225 #define WAKEUP_BIT_RTC BIT(0)
226 #define WAKEUP_BIT_RTT0 BIT(1)
227 #define WAKEUP_BIT_RTT1 BIT(2)
228 #define WAKEUP_BIT_HSI0 BIT(3)
229 #define WAKEUP_BIT_HSI1 BIT(4)
230 #define WAKEUP_BIT_CA_WAKE BIT(5)
231 #define WAKEUP_BIT_USB BIT(6)
232 #define WAKEUP_BIT_ABB BIT(7)
233 #define WAKEUP_BIT_ABB_FIFO BIT(8)
234 #define WAKEUP_BIT_SYSCLK_OK BIT(9)
235 #define WAKEUP_BIT_CA_SLEEP BIT(10)
236 #define WAKEUP_BIT_AC_WAKE_ACK BIT(11)
237 #define WAKEUP_BIT_SIDE_TONE_OK BIT(12)
238 #define WAKEUP_BIT_ANC_OK BIT(13)
239 #define WAKEUP_BIT_SW_ERROR BIT(14)
240 #define WAKEUP_BIT_AC_SLEEP_ACK BIT(15)
241 #define WAKEUP_BIT_ARM BIT(17)
242 #define WAKEUP_BIT_HOTMON_LOW BIT(18)
243 #define WAKEUP_BIT_HOTMON_HIGH BIT(19)
244 #define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20)
245 #define WAKEUP_BIT_GPIO0 BIT(23)
246 #define WAKEUP_BIT_GPIO1 BIT(24)
247 #define WAKEUP_BIT_GPIO2 BIT(25)
248 #define WAKEUP_BIT_GPIO3 BIT(26)
249 #define WAKEUP_BIT_GPIO4 BIT(27)
250 #define WAKEUP_BIT_GPIO5 BIT(28)
251 #define WAKEUP_BIT_GPIO6 BIT(29)
252 #define WAKEUP_BIT_GPIO7 BIT(30)
253 #define WAKEUP_BIT_GPIO8 BIT(31)
256 * This vector maps irq numbers to the bits in the bit field used in
257 * communication with the PRCMU firmware.
259 * The reason for having this is to keep the irq numbers contiguous even though
260 * the bits in the bit field are not. (The bits also have a tendency to move
261 * around, to further complicate matters.)
263 #define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name) - IRQ_PRCMU_BASE)
264 #define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name)
265 static u32 prcmu_irq_bit[NUM_PRCMU_WAKEUPS] = {
266 IRQ_ENTRY(RTC),
267 IRQ_ENTRY(RTT0),
268 IRQ_ENTRY(RTT1),
269 IRQ_ENTRY(HSI0),
270 IRQ_ENTRY(HSI1),
271 IRQ_ENTRY(CA_WAKE),
272 IRQ_ENTRY(USB),
273 IRQ_ENTRY(ABB),
274 IRQ_ENTRY(ABB_FIFO),
275 IRQ_ENTRY(CA_SLEEP),
276 IRQ_ENTRY(ARM),
277 IRQ_ENTRY(HOTMON_LOW),
278 IRQ_ENTRY(HOTMON_HIGH),
279 IRQ_ENTRY(MODEM_SW_RESET_REQ),
280 IRQ_ENTRY(GPIO0),
281 IRQ_ENTRY(GPIO1),
282 IRQ_ENTRY(GPIO2),
283 IRQ_ENTRY(GPIO3),
284 IRQ_ENTRY(GPIO4),
285 IRQ_ENTRY(GPIO5),
286 IRQ_ENTRY(GPIO6),
287 IRQ_ENTRY(GPIO7),
288 IRQ_ENTRY(GPIO8)
291 #define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1)
292 #define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name)
293 static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = {
294 WAKEUP_ENTRY(RTC),
295 WAKEUP_ENTRY(RTT0),
296 WAKEUP_ENTRY(RTT1),
297 WAKEUP_ENTRY(HSI0),
298 WAKEUP_ENTRY(HSI1),
299 WAKEUP_ENTRY(USB),
300 WAKEUP_ENTRY(ABB),
301 WAKEUP_ENTRY(ABB_FIFO),
302 WAKEUP_ENTRY(ARM)
306 * mb0_transfer - state needed for mailbox 0 communication.
307 * @lock: The transaction lock.
308 * @dbb_events_lock: A lock used to handle concurrent access to (parts of)
309 * the request data.
310 * @mask_work: Work structure used for (un)masking wakeup interrupts.
311 * @req: Request data that need to persist between requests.
313 static struct {
314 spinlock_t lock;
315 spinlock_t dbb_irqs_lock;
316 struct work_struct mask_work;
317 struct mutex ac_wake_lock;
318 struct completion ac_wake_work;
319 struct {
320 u32 dbb_irqs;
321 u32 dbb_wakeups;
322 u32 abb_events;
323 } req;
324 } mb0_transfer;
327 * mb1_transfer - state needed for mailbox 1 communication.
328 * @lock: The transaction lock.
329 * @work: The transaction completion structure.
330 * @ack: Reply ("acknowledge") data.
332 static struct {
333 struct mutex lock;
334 struct completion work;
335 struct {
336 u8 header;
337 u8 arm_opp;
338 u8 ape_opp;
339 u8 ape_voltage_status;
340 } ack;
341 } mb1_transfer;
344 * mb2_transfer - state needed for mailbox 2 communication.
345 * @lock: The transaction lock.
346 * @work: The transaction completion structure.
347 * @auto_pm_lock: The autonomous power management configuration lock.
348 * @auto_pm_enabled: A flag indicating whether autonomous PM is enabled.
349 * @req: Request data that need to persist between requests.
350 * @ack: Reply ("acknowledge") data.
352 static struct {
353 struct mutex lock;
354 struct completion work;
355 spinlock_t auto_pm_lock;
356 bool auto_pm_enabled;
357 struct {
358 u8 status;
359 } ack;
360 } mb2_transfer;
363 * mb3_transfer - state needed for mailbox 3 communication.
364 * @lock: The request lock.
365 * @sysclk_lock: A lock used to handle concurrent sysclk requests.
366 * @sysclk_work: Work structure used for sysclk requests.
368 static struct {
369 spinlock_t lock;
370 struct mutex sysclk_lock;
371 struct completion sysclk_work;
372 } mb3_transfer;
375 * mb4_transfer - state needed for mailbox 4 communication.
376 * @lock: The transaction lock.
377 * @work: The transaction completion structure.
379 static struct {
380 struct mutex lock;
381 struct completion work;
382 } mb4_transfer;
385 * mb5_transfer - state needed for mailbox 5 communication.
386 * @lock: The transaction lock.
387 * @work: The transaction completion structure.
388 * @ack: Reply ("acknowledge") data.
390 static struct {
391 struct mutex lock;
392 struct completion work;
393 struct {
394 u8 status;
395 u8 value;
396 } ack;
397 } mb5_transfer;
399 static atomic_t ac_wake_req_state = ATOMIC_INIT(0);
401 /* Spinlocks */
402 static DEFINE_SPINLOCK(clkout_lock);
403 static DEFINE_SPINLOCK(gpiocr_lock);
405 /* Global var to runtime determine TCDM base for v2 or v1 */
406 static __iomem void *tcdm_base;
408 struct clk_mgt {
409 unsigned int offset;
410 u32 pllsw;
413 static DEFINE_SPINLOCK(clk_mgt_lock);
415 #define CLK_MGT_ENTRY(_name)[PRCMU_##_name] = { (PRCM_##_name##_MGT), 0 }
416 struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
417 CLK_MGT_ENTRY(SGACLK),
418 CLK_MGT_ENTRY(UARTCLK),
419 CLK_MGT_ENTRY(MSP02CLK),
420 CLK_MGT_ENTRY(MSP1CLK),
421 CLK_MGT_ENTRY(I2CCLK),
422 CLK_MGT_ENTRY(SDMMCCLK),
423 CLK_MGT_ENTRY(SLIMCLK),
424 CLK_MGT_ENTRY(PER1CLK),
425 CLK_MGT_ENTRY(PER2CLK),
426 CLK_MGT_ENTRY(PER3CLK),
427 CLK_MGT_ENTRY(PER5CLK),
428 CLK_MGT_ENTRY(PER6CLK),
429 CLK_MGT_ENTRY(PER7CLK),
430 CLK_MGT_ENTRY(LCDCLK),
431 CLK_MGT_ENTRY(BMLCLK),
432 CLK_MGT_ENTRY(HSITXCLK),
433 CLK_MGT_ENTRY(HSIRXCLK),
434 CLK_MGT_ENTRY(HDMICLK),
435 CLK_MGT_ENTRY(APEATCLK),
436 CLK_MGT_ENTRY(APETRACECLK),
437 CLK_MGT_ENTRY(MCDECLK),
438 CLK_MGT_ENTRY(IPI2CCLK),
439 CLK_MGT_ENTRY(DSIALTCLK),
440 CLK_MGT_ENTRY(DMACLK),
441 CLK_MGT_ENTRY(B2R2CLK),
442 CLK_MGT_ENTRY(TVCLK),
443 CLK_MGT_ENTRY(SSPCLK),
444 CLK_MGT_ENTRY(RNGCLK),
445 CLK_MGT_ENTRY(UICCCLK),
449 * Used by MCDE to setup all necessary PRCMU registers
451 #define PRCMU_RESET_DSIPLL 0x00004000
452 #define PRCMU_UNCLAMP_DSIPLL 0x00400800
454 #define PRCMU_CLK_PLL_DIV_SHIFT 0
455 #define PRCMU_CLK_PLL_SW_SHIFT 5
456 #define PRCMU_CLK_38 (1 << 9)
457 #define PRCMU_CLK_38_SRC (1 << 10)
458 #define PRCMU_CLK_38_DIV (1 << 11)
460 /* PLLDIV=12, PLLSW=4 (PLLDDR) */
461 #define PRCMU_DSI_CLOCK_SETTING 0x0000008C
463 /* PLLDIV=8, PLLSW=4 (PLLDDR) */
464 #define PRCMU_DSI_CLOCK_SETTING_U8400 0x00000088
466 /* DPI 50000000 Hz */
467 #define PRCMU_DPI_CLOCK_SETTING ((1 << PRCMU_CLK_PLL_SW_SHIFT) | \
468 (16 << PRCMU_CLK_PLL_DIV_SHIFT))
469 #define PRCMU_DSI_LP_CLOCK_SETTING 0x00000E00
471 /* D=101, N=1, R=4, SELDIV2=0 */
472 #define PRCMU_PLLDSI_FREQ_SETTING 0x00040165
474 /* D=70, N=1, R=3, SELDIV2=0 */
475 #define PRCMU_PLLDSI_FREQ_SETTING_U8400 0x00030146
477 #define PRCMU_ENABLE_PLLDSI 0x00000001
478 #define PRCMU_DISABLE_PLLDSI 0x00000000
479 #define PRCMU_RELEASE_RESET_DSS 0x0000400C
480 #define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000202
481 /* ESC clk, div0=1, div1=1, div2=3 */
482 #define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x07030101
483 #define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00030101
484 #define PRCMU_DSI_RESET_SW 0x00000007
486 #define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
488 static struct {
489 u8 project_number;
490 u8 api_version;
491 u8 func_version;
492 u8 errata;
493 } prcmu_version;
496 int prcmu_enable_dsipll(void)
498 int i;
499 unsigned int plldsifreq;
501 /* Clear DSIPLL_RESETN */
502 writel(PRCMU_RESET_DSIPLL, (_PRCMU_BASE + PRCM_APE_RESETN_CLR));
503 /* Unclamp DSIPLL in/out */
504 writel(PRCMU_UNCLAMP_DSIPLL, (_PRCMU_BASE + PRCM_MMIP_LS_CLAMP_CLR));
506 if (prcmu_is_u8400())
507 plldsifreq = PRCMU_PLLDSI_FREQ_SETTING_U8400;
508 else
509 plldsifreq = PRCMU_PLLDSI_FREQ_SETTING;
510 /* Set DSI PLL FREQ */
511 writel(plldsifreq, (_PRCMU_BASE + PRCM_PLLDSI_FREQ));
512 writel(PRCMU_DSI_PLLOUT_SEL_SETTING,
513 (_PRCMU_BASE + PRCM_DSI_PLLOUT_SEL));
514 /* Enable Escape clocks */
515 writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV,
516 (_PRCMU_BASE + PRCM_DSITVCLK_DIV));
518 /* Start DSI PLL */
519 writel(PRCMU_ENABLE_PLLDSI, (_PRCMU_BASE + PRCM_PLLDSI_ENABLE));
520 /* Reset DSI PLL */
521 writel(PRCMU_DSI_RESET_SW, (_PRCMU_BASE + PRCM_DSI_SW_RESET));
522 for (i = 0; i < 10; i++) {
523 if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) &
524 PRCMU_PLLDSI_LOCKP_LOCKED)
525 == PRCMU_PLLDSI_LOCKP_LOCKED)
526 break;
527 udelay(100);
529 /* Set DSIPLL_RESETN */
530 writel(PRCMU_RESET_DSIPLL, (_PRCMU_BASE + PRCM_APE_RESETN_SET));
531 return 0;
534 int prcmu_disable_dsipll(void)
536 /* Disable dsi pll */
537 writel(PRCMU_DISABLE_PLLDSI, (_PRCMU_BASE + PRCM_PLLDSI_ENABLE));
538 /* Disable escapeclock */
539 writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV,
540 (_PRCMU_BASE + PRCM_DSITVCLK_DIV));
541 return 0;
544 int prcmu_set_display_clocks(void)
546 unsigned long flags;
547 unsigned int dsiclk;
549 if (prcmu_is_u8400())
550 dsiclk = PRCMU_DSI_CLOCK_SETTING_U8400;
551 else
552 dsiclk = PRCMU_DSI_CLOCK_SETTING;
554 spin_lock_irqsave(&clk_mgt_lock, flags);
556 /* Grab the HW semaphore. */
557 while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
558 cpu_relax();
560 writel(dsiclk, (_PRCMU_BASE + PRCM_HDMICLK_MGT));
561 writel(PRCMU_DSI_LP_CLOCK_SETTING, (_PRCMU_BASE + PRCM_TVCLK_MGT));
562 writel(PRCMU_DPI_CLOCK_SETTING, (_PRCMU_BASE + PRCM_LCDCLK_MGT));
564 /* Release the HW semaphore. */
565 writel(0, (_PRCMU_BASE + PRCM_SEM));
567 spin_unlock_irqrestore(&clk_mgt_lock, flags);
569 return 0;
573 * prcmu_enable_spi2 - Enables pin muxing for SPI2 on OtherAlternateC1.
575 void prcmu_enable_spi2(void)
577 u32 reg;
578 unsigned long flags;
580 spin_lock_irqsave(&gpiocr_lock, flags);
581 reg = readl(_PRCMU_BASE + PRCM_GPIOCR);
582 writel(reg | PRCM_GPIOCR_SPI2_SELECT, _PRCMU_BASE + PRCM_GPIOCR);
583 spin_unlock_irqrestore(&gpiocr_lock, flags);
587 * prcmu_disable_spi2 - Disables pin muxing for SPI2 on OtherAlternateC1.
589 void prcmu_disable_spi2(void)
591 u32 reg;
592 unsigned long flags;
594 spin_lock_irqsave(&gpiocr_lock, flags);
595 reg = readl(_PRCMU_BASE + PRCM_GPIOCR);
596 writel(reg & ~PRCM_GPIOCR_SPI2_SELECT, _PRCMU_BASE + PRCM_GPIOCR);
597 spin_unlock_irqrestore(&gpiocr_lock, flags);
600 bool prcmu_has_arm_maxopp(void)
602 return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) &
603 PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK;
606 bool prcmu_is_u8400(void)
608 return prcmu_version.project_number == PRCMU_PROJECT_ID_8400V2_0;
612 * prcmu_get_boot_status - PRCMU boot status checking
613 * Returns: the current PRCMU boot status
615 int prcmu_get_boot_status(void)
617 return readb(tcdm_base + PRCM_BOOT_STATUS);
621 * prcmu_set_rc_a2p - This function is used to run few power state sequences
622 * @val: Value to be set, i.e. transition requested
623 * Returns: 0 on success, -EINVAL on invalid argument
625 * This function is used to run the following power state sequences -
626 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
628 int prcmu_set_rc_a2p(enum romcode_write val)
630 if (val < RDY_2_DS || val > RDY_2_XP70_RST)
631 return -EINVAL;
632 writeb(val, (tcdm_base + PRCM_ROMCODE_A2P));
633 return 0;
637 * prcmu_get_rc_p2a - This function is used to get power state sequences
638 * Returns: the power transition that has last happened
640 * This function can return the following transitions-
641 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
643 enum romcode_read prcmu_get_rc_p2a(void)
645 return readb(tcdm_base + PRCM_ROMCODE_P2A);
649 * prcmu_get_current_mode - Return the current XP70 power mode
650 * Returns: Returns the current AP(ARM) power mode: init,
651 * apBoot, apExecute, apDeepSleep, apSleep, apIdle, apReset
653 enum ap_pwrst prcmu_get_xp70_current_state(void)
655 return readb(tcdm_base + PRCM_XP70_CUR_PWR_STATE);
659 * prcmu_config_clkout - Configure one of the programmable clock outputs.
660 * @clkout: The CLKOUT number (0 or 1).
661 * @source: The clock to be used (one of the PRCMU_CLKSRC_*).
662 * @div: The divider to be applied.
664 * Configures one of the programmable clock outputs (CLKOUTs).
665 * @div should be in the range [1,63] to request a configuration, or 0 to
666 * inform that the configuration is no longer requested.
668 int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
670 static int requests[2];
671 int r = 0;
672 unsigned long flags;
673 u32 val;
674 u32 bits;
675 u32 mask;
676 u32 div_mask;
678 BUG_ON(clkout > 1);
679 BUG_ON(div > 63);
680 BUG_ON((clkout == 0) && (source > PRCMU_CLKSRC_CLK009));
682 if (!div && !requests[clkout])
683 return -EINVAL;
685 switch (clkout) {
686 case 0:
687 div_mask = PRCM_CLKOCR_CLKODIV0_MASK;
688 mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK);
689 bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) |
690 (div << PRCM_CLKOCR_CLKODIV0_SHIFT));
691 break;
692 case 1:
693 div_mask = PRCM_CLKOCR_CLKODIV1_MASK;
694 mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK |
695 PRCM_CLKOCR_CLK1TYPE);
696 bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) |
697 (div << PRCM_CLKOCR_CLKODIV1_SHIFT));
698 break;
700 bits &= mask;
702 spin_lock_irqsave(&clkout_lock, flags);
704 val = readl(_PRCMU_BASE + PRCM_CLKOCR);
705 if (val & div_mask) {
706 if (div) {
707 if ((val & mask) != bits) {
708 r = -EBUSY;
709 goto unlock_and_return;
711 } else {
712 if ((val & mask & ~div_mask) != bits) {
713 r = -EINVAL;
714 goto unlock_and_return;
718 writel((bits | (val & ~mask)), (_PRCMU_BASE + PRCM_CLKOCR));
719 requests[clkout] += (div ? 1 : -1);
721 unlock_and_return:
722 spin_unlock_irqrestore(&clkout_lock, flags);
724 return r;
727 int prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
729 unsigned long flags;
731 BUG_ON((state < PRCMU_AP_SLEEP) || (PRCMU_AP_DEEP_IDLE < state));
733 spin_lock_irqsave(&mb0_transfer.lock, flags);
735 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
736 cpu_relax();
738 writeb(MB0H_POWER_STATE_TRANS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
739 writeb(state, (tcdm_base + PRCM_REQ_MB0_AP_POWER_STATE));
740 writeb((keep_ap_pll ? 1 : 0), (tcdm_base + PRCM_REQ_MB0_AP_PLL_STATE));
741 writeb((keep_ulp_clk ? 1 : 0),
742 (tcdm_base + PRCM_REQ_MB0_ULP_CLOCK_STATE));
743 writeb(0, (tcdm_base + PRCM_REQ_MB0_DO_NOT_WFI));
744 writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
746 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
748 return 0;
751 /* This function should only be called while mb0_transfer.lock is held. */
752 static void config_wakeups(void)
754 const u8 header[2] = {
755 MB0H_CONFIG_WAKEUPS_EXE,
756 MB0H_CONFIG_WAKEUPS_SLEEP
758 static u32 last_dbb_events;
759 static u32 last_abb_events;
760 u32 dbb_events;
761 u32 abb_events;
762 unsigned int i;
764 dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups;
765 dbb_events |= (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK);
767 abb_events = mb0_transfer.req.abb_events;
769 if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events))
770 return;
772 for (i = 0; i < 2; i++) {
773 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
774 cpu_relax();
775 writel(dbb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_8500));
776 writel(abb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_4500));
777 writeb(header[i], (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
778 writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
780 last_dbb_events = dbb_events;
781 last_abb_events = abb_events;
784 void prcmu_enable_wakeups(u32 wakeups)
786 unsigned long flags;
787 u32 bits;
788 int i;
790 BUG_ON(wakeups != (wakeups & VALID_WAKEUPS));
792 for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) {
793 if (wakeups & BIT(i))
794 bits |= prcmu_wakeup_bit[i];
797 spin_lock_irqsave(&mb0_transfer.lock, flags);
799 mb0_transfer.req.dbb_wakeups = bits;
800 config_wakeups();
802 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
805 void prcmu_config_abb_event_readout(u32 abb_events)
807 unsigned long flags;
809 spin_lock_irqsave(&mb0_transfer.lock, flags);
811 mb0_transfer.req.abb_events = abb_events;
812 config_wakeups();
814 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
817 void prcmu_get_abb_event_buffer(void __iomem **buf)
819 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
820 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_1_4500);
821 else
822 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_0_4500);
826 * prcmu_set_arm_opp - set the appropriate ARM OPP
827 * @opp: The new ARM operating point to which transition is to be made
828 * Returns: 0 on success, non-zero on failure
830 * This function sets the the operating point of the ARM.
832 int prcmu_set_arm_opp(u8 opp)
834 int r;
836 if (opp < ARM_NO_CHANGE || opp > ARM_EXTCLK)
837 return -EINVAL;
839 r = 0;
841 mutex_lock(&mb1_transfer.lock);
843 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
844 cpu_relax();
846 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
847 writeb(opp, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
848 writeb(APE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
850 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
851 wait_for_completion(&mb1_transfer.work);
853 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
854 (mb1_transfer.ack.arm_opp != opp))
855 r = -EIO;
857 mutex_unlock(&mb1_transfer.lock);
859 return r;
863 * prcmu_get_arm_opp - get the current ARM OPP
865 * Returns: the current ARM OPP
867 int prcmu_get_arm_opp(void)
869 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_ARM_OPP);
873 * prcmu_get_ddr_opp - get the current DDR OPP
875 * Returns: the current DDR OPP
877 int prcmu_get_ddr_opp(void)
879 return readb(_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW);
883 * set_ddr_opp - set the appropriate DDR OPP
884 * @opp: The new DDR operating point to which transition is to be made
885 * Returns: 0 on success, non-zero on failure
887 * This function sets the operating point of the DDR.
889 int prcmu_set_ddr_opp(u8 opp)
891 if (opp < DDR_100_OPP || opp > DDR_25_OPP)
892 return -EINVAL;
893 /* Changing the DDR OPP can hang the hardware pre-v21 */
894 if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20())
895 writeb(opp, (_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW));
897 return 0;
900 * set_ape_opp - set the appropriate APE OPP
901 * @opp: The new APE operating point to which transition is to be made
902 * Returns: 0 on success, non-zero on failure
904 * This function sets the operating point of the APE.
906 int prcmu_set_ape_opp(u8 opp)
908 int r = 0;
910 mutex_lock(&mb1_transfer.lock);
912 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
913 cpu_relax();
915 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
916 writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
917 writeb(opp, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
919 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
920 wait_for_completion(&mb1_transfer.work);
922 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
923 (mb1_transfer.ack.ape_opp != opp))
924 r = -EIO;
926 mutex_unlock(&mb1_transfer.lock);
928 return r;
932 * prcmu_get_ape_opp - get the current APE OPP
934 * Returns: the current APE OPP
936 int prcmu_get_ape_opp(void)
938 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP);
942 * prcmu_request_ape_opp_100_voltage - Request APE OPP 100% voltage
943 * @enable: true to request the higher voltage, false to drop a request.
945 * Calls to this function to enable and disable requests must be balanced.
947 int prcmu_request_ape_opp_100_voltage(bool enable)
949 int r = 0;
950 u8 header;
951 static unsigned int requests;
953 mutex_lock(&mb1_transfer.lock);
955 if (enable) {
956 if (0 != requests++)
957 goto unlock_and_return;
958 header = MB1H_REQUEST_APE_OPP_100_VOLT;
959 } else {
960 if (requests == 0) {
961 r = -EIO;
962 goto unlock_and_return;
963 } else if (1 != requests--) {
964 goto unlock_and_return;
966 header = MB1H_RELEASE_APE_OPP_100_VOLT;
969 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
970 cpu_relax();
972 writeb(header, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
974 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
975 wait_for_completion(&mb1_transfer.work);
977 if ((mb1_transfer.ack.header != header) ||
978 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
979 r = -EIO;
981 unlock_and_return:
982 mutex_unlock(&mb1_transfer.lock);
984 return r;
988 * prcmu_release_usb_wakeup_state - release the state required by a USB wakeup
990 * This function releases the power state requirements of a USB wakeup.
992 int prcmu_release_usb_wakeup_state(void)
994 int r = 0;
996 mutex_lock(&mb1_transfer.lock);
998 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
999 cpu_relax();
1001 writeb(MB1H_RELEASE_USB_WAKEUP,
1002 (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1004 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1005 wait_for_completion(&mb1_transfer.work);
1007 if ((mb1_transfer.ack.header != MB1H_RELEASE_USB_WAKEUP) ||
1008 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
1009 r = -EIO;
1011 mutex_unlock(&mb1_transfer.lock);
1013 return r;
1017 * prcmu_set_epod - set the state of a EPOD (power domain)
1018 * @epod_id: The EPOD to set
1019 * @epod_state: The new EPOD state
1021 * This function sets the state of a EPOD (power domain). It may not be called
1022 * from interrupt context.
1024 int prcmu_set_epod(u16 epod_id, u8 epod_state)
1026 int r = 0;
1027 bool ram_retention = false;
1028 int i;
1030 /* check argument */
1031 BUG_ON(epod_id >= NUM_EPOD_ID);
1033 /* set flag if retention is possible */
1034 switch (epod_id) {
1035 case EPOD_ID_SVAMMDSP:
1036 case EPOD_ID_SIAMMDSP:
1037 case EPOD_ID_ESRAM12:
1038 case EPOD_ID_ESRAM34:
1039 ram_retention = true;
1040 break;
1043 /* check argument */
1044 BUG_ON(epod_state > EPOD_STATE_ON);
1045 BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention);
1047 /* get lock */
1048 mutex_lock(&mb2_transfer.lock);
1050 /* wait for mailbox */
1051 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
1052 cpu_relax();
1054 /* fill in mailbox */
1055 for (i = 0; i < NUM_EPOD_ID; i++)
1056 writeb(EPOD_STATE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB2 + i));
1057 writeb(epod_state, (tcdm_base + PRCM_REQ_MB2 + epod_id));
1059 writeb(MB2H_DPS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB2));
1061 writel(MBOX_BIT(2), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1064 * The current firmware version does not handle errors correctly,
1065 * and we cannot recover if there is an error.
1066 * This is expected to change when the firmware is updated.
1068 if (!wait_for_completion_timeout(&mb2_transfer.work,
1069 msecs_to_jiffies(20000))) {
1070 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1071 __func__);
1072 r = -EIO;
1073 goto unlock_and_return;
1076 if (mb2_transfer.ack.status != HWACC_PWR_ST_OK)
1077 r = -EIO;
1079 unlock_and_return:
1080 mutex_unlock(&mb2_transfer.lock);
1081 return r;
1085 * prcmu_configure_auto_pm - Configure autonomous power management.
1086 * @sleep: Configuration for ApSleep.
1087 * @idle: Configuration for ApIdle.
1089 void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
1090 struct prcmu_auto_pm_config *idle)
1092 u32 sleep_cfg;
1093 u32 idle_cfg;
1094 unsigned long flags;
1096 BUG_ON((sleep == NULL) || (idle == NULL));
1098 sleep_cfg = (sleep->sva_auto_pm_enable & 0xF);
1099 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_auto_pm_enable & 0xF));
1100 sleep_cfg = ((sleep_cfg << 8) | (sleep->sva_power_on & 0xFF));
1101 sleep_cfg = ((sleep_cfg << 8) | (sleep->sia_power_on & 0xFF));
1102 sleep_cfg = ((sleep_cfg << 4) | (sleep->sva_policy & 0xF));
1103 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_policy & 0xF));
1105 idle_cfg = (idle->sva_auto_pm_enable & 0xF);
1106 idle_cfg = ((idle_cfg << 4) | (idle->sia_auto_pm_enable & 0xF));
1107 idle_cfg = ((idle_cfg << 8) | (idle->sva_power_on & 0xFF));
1108 idle_cfg = ((idle_cfg << 8) | (idle->sia_power_on & 0xFF));
1109 idle_cfg = ((idle_cfg << 4) | (idle->sva_policy & 0xF));
1110 idle_cfg = ((idle_cfg << 4) | (idle->sia_policy & 0xF));
1112 spin_lock_irqsave(&mb2_transfer.auto_pm_lock, flags);
1115 * The autonomous power management configuration is done through
1116 * fields in mailbox 2, but these fields are only used as shared
1117 * variables - i.e. there is no need to send a message.
1119 writel(sleep_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_SLEEP));
1120 writel(idle_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_IDLE));
1122 mb2_transfer.auto_pm_enabled =
1123 ((sleep->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1124 (sleep->sia_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1125 (idle->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1126 (idle->sia_auto_pm_enable == PRCMU_AUTO_PM_ON));
1128 spin_unlock_irqrestore(&mb2_transfer.auto_pm_lock, flags);
1130 EXPORT_SYMBOL(prcmu_configure_auto_pm);
1132 bool prcmu_is_auto_pm_enabled(void)
1134 return mb2_transfer.auto_pm_enabled;
1137 static int request_sysclk(bool enable)
1139 int r;
1140 unsigned long flags;
1142 r = 0;
1144 mutex_lock(&mb3_transfer.sysclk_lock);
1146 spin_lock_irqsave(&mb3_transfer.lock, flags);
1148 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(3))
1149 cpu_relax();
1151 writeb((enable ? ON : OFF), (tcdm_base + PRCM_REQ_MB3_SYSCLK_MGT));
1153 writeb(MB3H_SYSCLK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB3));
1154 writel(MBOX_BIT(3), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1156 spin_unlock_irqrestore(&mb3_transfer.lock, flags);
1159 * The firmware only sends an ACK if we want to enable the
1160 * SysClk, and it succeeds.
1162 if (enable && !wait_for_completion_timeout(&mb3_transfer.sysclk_work,
1163 msecs_to_jiffies(20000))) {
1164 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1165 __func__);
1166 r = -EIO;
1169 mutex_unlock(&mb3_transfer.sysclk_lock);
1171 return r;
1174 static int request_timclk(bool enable)
1176 u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
1178 if (!enable)
1179 val |= PRCM_TCR_STOP_TIMERS;
1180 writel(val, (_PRCMU_BASE + PRCM_TCR));
1182 return 0;
1185 static int request_reg_clock(u8 clock, bool enable)
1187 u32 val;
1188 unsigned long flags;
1190 spin_lock_irqsave(&clk_mgt_lock, flags);
1192 /* Grab the HW semaphore. */
1193 while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
1194 cpu_relax();
1196 val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
1197 if (enable) {
1198 val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
1199 } else {
1200 clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
1201 val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
1203 writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
1205 /* Release the HW semaphore. */
1206 writel(0, (_PRCMU_BASE + PRCM_SEM));
1208 spin_unlock_irqrestore(&clk_mgt_lock, flags);
1210 return 0;
1214 * prcmu_request_clock() - Request for a clock to be enabled or disabled.
1215 * @clock: The clock for which the request is made.
1216 * @enable: Whether the clock should be enabled (true) or disabled (false).
1218 * This function should only be used by the clock implementation.
1219 * Do not use it from any other place!
1221 int prcmu_request_clock(u8 clock, bool enable)
1223 if (clock < PRCMU_NUM_REG_CLOCKS)
1224 return request_reg_clock(clock, enable);
1225 else if (clock == PRCMU_TIMCLK)
1226 return request_timclk(enable);
1227 else if (clock == PRCMU_SYSCLK)
1228 return request_sysclk(enable);
1229 else
1230 return -EINVAL;
1233 int prcmu_config_esram0_deep_sleep(u8 state)
1235 if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
1236 (state < ESRAM0_DEEP_SLEEP_STATE_OFF))
1237 return -EINVAL;
1239 mutex_lock(&mb4_transfer.lock);
1241 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1242 cpu_relax();
1244 writeb(MB4H_MEM_ST, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1245 writeb(((DDR_PWR_STATE_OFFHIGHLAT << 4) | DDR_PWR_STATE_ON),
1246 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE));
1247 writeb(DDR_PWR_STATE_ON,
1248 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE));
1249 writeb(state, (tcdm_base + PRCM_REQ_MB4_ESRAM0_ST));
1251 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1252 wait_for_completion(&mb4_transfer.work);
1254 mutex_unlock(&mb4_transfer.lock);
1256 return 0;
1259 int prcmu_config_hotdog(u8 threshold)
1261 mutex_lock(&mb4_transfer.lock);
1263 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1264 cpu_relax();
1266 writeb(threshold, (tcdm_base + PRCM_REQ_MB4_HOTDOG_THRESHOLD));
1267 writeb(MB4H_HOTDOG, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1269 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1270 wait_for_completion(&mb4_transfer.work);
1272 mutex_unlock(&mb4_transfer.lock);
1274 return 0;
1277 int prcmu_config_hotmon(u8 low, u8 high)
1279 mutex_lock(&mb4_transfer.lock);
1281 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1282 cpu_relax();
1284 writeb(low, (tcdm_base + PRCM_REQ_MB4_HOTMON_LOW));
1285 writeb(high, (tcdm_base + PRCM_REQ_MB4_HOTMON_HIGH));
1286 writeb((HOTMON_CONFIG_LOW | HOTMON_CONFIG_HIGH),
1287 (tcdm_base + PRCM_REQ_MB4_HOTMON_CONFIG));
1288 writeb(MB4H_HOTMON, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1290 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1291 wait_for_completion(&mb4_transfer.work);
1293 mutex_unlock(&mb4_transfer.lock);
1295 return 0;
1298 static int config_hot_period(u16 val)
1300 mutex_lock(&mb4_transfer.lock);
1302 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1303 cpu_relax();
1305 writew(val, (tcdm_base + PRCM_REQ_MB4_HOT_PERIOD));
1306 writeb(MB4H_HOT_PERIOD, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1308 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1309 wait_for_completion(&mb4_transfer.work);
1311 mutex_unlock(&mb4_transfer.lock);
1313 return 0;
1316 int prcmu_start_temp_sense(u16 cycles32k)
1318 if (cycles32k == 0xFFFF)
1319 return -EINVAL;
1321 return config_hot_period(cycles32k);
1324 int prcmu_stop_temp_sense(void)
1326 return config_hot_period(0xFFFF);
1330 * prcmu_set_clock_divider() - Configure the clock divider.
1331 * @clock: The clock for which the request is made.
1332 * @divider: The clock divider. (< 32)
1334 * This function should only be used by the clock implementation.
1335 * Do not use it from any other place!
1337 int prcmu_set_clock_divider(u8 clock, u8 divider)
1339 u32 val;
1340 unsigned long flags;
1342 if ((clock >= PRCMU_NUM_REG_CLOCKS) || (divider < 1) || (31 < divider))
1343 return -EINVAL;
1345 spin_lock_irqsave(&clk_mgt_lock, flags);
1347 /* Grab the HW semaphore. */
1348 while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
1349 cpu_relax();
1351 val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
1352 val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK);
1353 val |= (u32)divider;
1354 writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
1356 /* Release the HW semaphore. */
1357 writel(0, (_PRCMU_BASE + PRCM_SEM));
1359 spin_unlock_irqrestore(&clk_mgt_lock, flags);
1361 return 0;
1365 * prcmu_abb_read() - Read register value(s) from the ABB.
1366 * @slave: The I2C slave address.
1367 * @reg: The (start) register address.
1368 * @value: The read out value(s).
1369 * @size: The number of registers to read.
1371 * Reads register value(s) from the ABB.
1372 * @size has to be 1 for the current firmware version.
1374 int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
1376 int r;
1378 if (size != 1)
1379 return -EINVAL;
1381 mutex_lock(&mb5_transfer.lock);
1383 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
1384 cpu_relax();
1386 writeb(PRCMU_I2C_READ(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
1387 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
1388 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
1389 writeb(0, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
1391 writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1393 if (!wait_for_completion_timeout(&mb5_transfer.work,
1394 msecs_to_jiffies(20000))) {
1395 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1396 __func__);
1397 r = -EIO;
1398 } else {
1399 r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO);
1402 if (!r)
1403 *value = mb5_transfer.ack.value;
1405 mutex_unlock(&mb5_transfer.lock);
1407 return r;
1411 * prcmu_abb_write() - Write register value(s) to the ABB.
1412 * @slave: The I2C slave address.
1413 * @reg: The (start) register address.
1414 * @value: The value(s) to write.
1415 * @size: The number of registers to write.
1417 * Reads register value(s) from the ABB.
1418 * @size has to be 1 for the current firmware version.
1420 int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
1422 int r;
1424 if (size != 1)
1425 return -EINVAL;
1427 mutex_lock(&mb5_transfer.lock);
1429 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
1430 cpu_relax();
1432 writeb(PRCMU_I2C_WRITE(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
1433 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
1434 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
1435 writeb(*value, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
1437 writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1439 if (!wait_for_completion_timeout(&mb5_transfer.work,
1440 msecs_to_jiffies(20000))) {
1441 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1442 __func__);
1443 r = -EIO;
1444 } else {
1445 r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO);
1448 mutex_unlock(&mb5_transfer.lock);
1450 return r;
1454 * prcmu_ac_wake_req - should be called whenever ARM wants to wakeup Modem
1456 void prcmu_ac_wake_req(void)
1458 u32 val;
1460 mutex_lock(&mb0_transfer.ac_wake_lock);
1462 val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
1463 if (val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ)
1464 goto unlock_and_return;
1466 atomic_set(&ac_wake_req_state, 1);
1468 writel((val | PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
1469 (_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
1471 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
1472 msecs_to_jiffies(20000))) {
1473 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1474 __func__);
1477 unlock_and_return:
1478 mutex_unlock(&mb0_transfer.ac_wake_lock);
1482 * prcmu_ac_sleep_req - called when ARM no longer needs to talk to modem
1484 void prcmu_ac_sleep_req()
1486 u32 val;
1488 mutex_lock(&mb0_transfer.ac_wake_lock);
1490 val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
1491 if (!(val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ))
1492 goto unlock_and_return;
1494 writel((val & ~PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
1495 (_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
1497 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
1498 msecs_to_jiffies(20000))) {
1499 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1500 __func__);
1503 atomic_set(&ac_wake_req_state, 0);
1505 unlock_and_return:
1506 mutex_unlock(&mb0_transfer.ac_wake_lock);
1509 bool prcmu_is_ac_wake_requested(void)
1511 return (atomic_read(&ac_wake_req_state) != 0);
1515 * prcmu_system_reset - System reset
1517 * Saves the reset reason code and then sets the APE_SOFRST register which
1518 * fires interrupt to fw
1520 void prcmu_system_reset(u16 reset_code)
1522 writew(reset_code, (tcdm_base + PRCM_SW_RST_REASON));
1523 writel(1, (_PRCMU_BASE + PRCM_APE_SOFTRST));
1527 * prcmu_reset_modem - ask the PRCMU to reset modem
1529 void prcmu_modem_reset(void)
1531 mutex_lock(&mb1_transfer.lock);
1533 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
1534 cpu_relax();
1536 writeb(MB1H_RESET_MODEM, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1537 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1538 wait_for_completion(&mb1_transfer.work);
1541 * No need to check return from PRCMU as modem should go in reset state
1542 * This state is already managed by upper layer
1545 mutex_unlock(&mb1_transfer.lock);
1548 static void ack_dbb_wakeup(void)
1550 unsigned long flags;
1552 spin_lock_irqsave(&mb0_transfer.lock, flags);
1554 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
1555 cpu_relax();
1557 writeb(MB0H_READ_WAKEUP_ACK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
1558 writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1560 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
1563 static inline void print_unknown_header_warning(u8 n, u8 header)
1565 pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n",
1566 header, n);
1569 static bool read_mailbox_0(void)
1571 bool r;
1572 u32 ev;
1573 unsigned int n;
1574 u8 header;
1576 header = readb(tcdm_base + PRCM_MBOX_HEADER_ACK_MB0);
1577 switch (header) {
1578 case MB0H_WAKEUP_EXE:
1579 case MB0H_WAKEUP_SLEEP:
1580 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
1581 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_1_8500);
1582 else
1583 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_0_8500);
1585 if (ev & (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK))
1586 complete(&mb0_transfer.ac_wake_work);
1587 if (ev & WAKEUP_BIT_SYSCLK_OK)
1588 complete(&mb3_transfer.sysclk_work);
1590 ev &= mb0_transfer.req.dbb_irqs;
1592 for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
1593 if (ev & prcmu_irq_bit[n])
1594 generic_handle_irq(IRQ_PRCMU_BASE + n);
1596 r = true;
1597 break;
1598 default:
1599 print_unknown_header_warning(0, header);
1600 r = false;
1601 break;
1603 writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1604 return r;
1607 static bool read_mailbox_1(void)
1609 mb1_transfer.ack.header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1);
1610 mb1_transfer.ack.arm_opp = readb(tcdm_base +
1611 PRCM_ACK_MB1_CURRENT_ARM_OPP);
1612 mb1_transfer.ack.ape_opp = readb(tcdm_base +
1613 PRCM_ACK_MB1_CURRENT_APE_OPP);
1614 mb1_transfer.ack.ape_voltage_status = readb(tcdm_base +
1615 PRCM_ACK_MB1_APE_VOLTAGE_STATUS);
1616 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1617 complete(&mb1_transfer.work);
1618 return false;
1621 static bool read_mailbox_2(void)
1623 mb2_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB2_DPS_STATUS);
1624 writel(MBOX_BIT(2), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1625 complete(&mb2_transfer.work);
1626 return false;
1629 static bool read_mailbox_3(void)
1631 writel(MBOX_BIT(3), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1632 return false;
1635 static bool read_mailbox_4(void)
1637 u8 header;
1638 bool do_complete = true;
1640 header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB4);
1641 switch (header) {
1642 case MB4H_MEM_ST:
1643 case MB4H_HOTDOG:
1644 case MB4H_HOTMON:
1645 case MB4H_HOT_PERIOD:
1646 break;
1647 default:
1648 print_unknown_header_warning(4, header);
1649 do_complete = false;
1650 break;
1653 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1655 if (do_complete)
1656 complete(&mb4_transfer.work);
1658 return false;
1661 static bool read_mailbox_5(void)
1663 mb5_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB5_I2C_STATUS);
1664 mb5_transfer.ack.value = readb(tcdm_base + PRCM_ACK_MB5_I2C_VAL);
1665 writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1666 complete(&mb5_transfer.work);
1667 return false;
1670 static bool read_mailbox_6(void)
1672 writel(MBOX_BIT(6), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1673 return false;
1676 static bool read_mailbox_7(void)
1678 writel(MBOX_BIT(7), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1679 return false;
1682 static bool (* const read_mailbox[NUM_MB])(void) = {
1683 read_mailbox_0,
1684 read_mailbox_1,
1685 read_mailbox_2,
1686 read_mailbox_3,
1687 read_mailbox_4,
1688 read_mailbox_5,
1689 read_mailbox_6,
1690 read_mailbox_7
1693 static irqreturn_t prcmu_irq_handler(int irq, void *data)
1695 u32 bits;
1696 u8 n;
1697 irqreturn_t r;
1699 bits = (readl(_PRCMU_BASE + PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
1700 if (unlikely(!bits))
1701 return IRQ_NONE;
1703 r = IRQ_HANDLED;
1704 for (n = 0; bits; n++) {
1705 if (bits & MBOX_BIT(n)) {
1706 bits -= MBOX_BIT(n);
1707 if (read_mailbox[n]())
1708 r = IRQ_WAKE_THREAD;
1711 return r;
1714 static irqreturn_t prcmu_irq_thread_fn(int irq, void *data)
1716 ack_dbb_wakeup();
1717 return IRQ_HANDLED;
1720 static void prcmu_mask_work(struct work_struct *work)
1722 unsigned long flags;
1724 spin_lock_irqsave(&mb0_transfer.lock, flags);
1726 config_wakeups();
1728 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
1731 static void prcmu_irq_mask(struct irq_data *d)
1733 unsigned long flags;
1735 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
1737 mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE];
1739 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
1741 if (d->irq != IRQ_PRCMU_CA_SLEEP)
1742 schedule_work(&mb0_transfer.mask_work);
1745 static void prcmu_irq_unmask(struct irq_data *d)
1747 unsigned long flags;
1749 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
1751 mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE];
1753 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
1755 if (d->irq != IRQ_PRCMU_CA_SLEEP)
1756 schedule_work(&mb0_transfer.mask_work);
1759 static void noop(struct irq_data *d)
1763 static struct irq_chip prcmu_irq_chip = {
1764 .name = "prcmu",
1765 .irq_disable = prcmu_irq_mask,
1766 .irq_ack = noop,
1767 .irq_mask = prcmu_irq_mask,
1768 .irq_unmask = prcmu_irq_unmask,
1771 void __init prcmu_early_init(void)
1773 unsigned int i;
1775 if (cpu_is_u8500v1()) {
1776 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE_V1);
1777 } else if (cpu_is_u8500v2()) {
1778 void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
1780 if (tcpm_base != NULL) {
1781 int version;
1782 version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
1783 prcmu_version.project_number = version & 0xFF;
1784 prcmu_version.api_version = (version >> 8) & 0xFF;
1785 prcmu_version.func_version = (version >> 16) & 0xFF;
1786 prcmu_version.errata = (version >> 24) & 0xFF;
1787 pr_info("PRCMU firmware version %d.%d.%d\n",
1788 (version >> 8) & 0xFF, (version >> 16) & 0xFF,
1789 (version >> 24) & 0xFF);
1790 iounmap(tcpm_base);
1793 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
1794 } else {
1795 pr_err("prcmu: Unsupported chip version\n");
1796 BUG();
1799 spin_lock_init(&mb0_transfer.lock);
1800 spin_lock_init(&mb0_transfer.dbb_irqs_lock);
1801 mutex_init(&mb0_transfer.ac_wake_lock);
1802 init_completion(&mb0_transfer.ac_wake_work);
1803 mutex_init(&mb1_transfer.lock);
1804 init_completion(&mb1_transfer.work);
1805 mutex_init(&mb2_transfer.lock);
1806 init_completion(&mb2_transfer.work);
1807 spin_lock_init(&mb2_transfer.auto_pm_lock);
1808 spin_lock_init(&mb3_transfer.lock);
1809 mutex_init(&mb3_transfer.sysclk_lock);
1810 init_completion(&mb3_transfer.sysclk_work);
1811 mutex_init(&mb4_transfer.lock);
1812 init_completion(&mb4_transfer.work);
1813 mutex_init(&mb5_transfer.lock);
1814 init_completion(&mb5_transfer.work);
1816 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
1818 /* Initalize irqs. */
1819 for (i = 0; i < NUM_PRCMU_WAKEUPS; i++) {
1820 unsigned int irq;
1822 irq = IRQ_PRCMU_BASE + i;
1823 irq_set_chip_and_handler(irq, &prcmu_irq_chip,
1824 handle_simple_irq);
1825 set_irq_flags(irq, IRQF_VALID);
1830 * Power domain switches (ePODs) modeled as regulators for the DB8500 SoC
1832 static struct regulator_consumer_supply db8500_vape_consumers[] = {
1833 REGULATOR_SUPPLY("v-ape", NULL),
1834 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"),
1835 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
1836 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
1837 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
1838 /* "v-mmc" changed to "vcore" in the mainline kernel */
1839 REGULATOR_SUPPLY("vcore", "sdi0"),
1840 REGULATOR_SUPPLY("vcore", "sdi1"),
1841 REGULATOR_SUPPLY("vcore", "sdi2"),
1842 REGULATOR_SUPPLY("vcore", "sdi3"),
1843 REGULATOR_SUPPLY("vcore", "sdi4"),
1844 REGULATOR_SUPPLY("v-dma", "dma40.0"),
1845 REGULATOR_SUPPLY("v-ape", "ab8500-usb.0"),
1846 /* "v-uart" changed to "vcore" in the mainline kernel */
1847 REGULATOR_SUPPLY("vcore", "uart0"),
1848 REGULATOR_SUPPLY("vcore", "uart1"),
1849 REGULATOR_SUPPLY("vcore", "uart2"),
1850 REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
1853 static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
1854 /* CG2900 and CW1200 power to off-chip peripherals */
1855 REGULATOR_SUPPLY("gbf_1v8", "cg2900-uart.0"),
1856 REGULATOR_SUPPLY("wlan_1v8", "cw1200.0"),
1857 REGULATOR_SUPPLY("musb_1v8", "ab8500-usb.0"),
1858 /* AV8100 regulator */
1859 REGULATOR_SUPPLY("hdmi_1v8", "0-0070"),
1862 static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = {
1863 REGULATOR_SUPPLY("vsupply", "b2r2.0"),
1864 REGULATOR_SUPPLY("vsupply", "mcde.0"),
1867 static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
1868 [DB8500_REGULATOR_VAPE] = {
1869 .constraints = {
1870 .name = "db8500-vape",
1871 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1873 .consumer_supplies = db8500_vape_consumers,
1874 .num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers),
1876 [DB8500_REGULATOR_VARM] = {
1877 .constraints = {
1878 .name = "db8500-varm",
1879 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1882 [DB8500_REGULATOR_VMODEM] = {
1883 .constraints = {
1884 .name = "db8500-vmodem",
1885 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1888 [DB8500_REGULATOR_VPLL] = {
1889 .constraints = {
1890 .name = "db8500-vpll",
1891 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1894 [DB8500_REGULATOR_VSMPS1] = {
1895 .constraints = {
1896 .name = "db8500-vsmps1",
1897 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1900 [DB8500_REGULATOR_VSMPS2] = {
1901 .constraints = {
1902 .name = "db8500-vsmps2",
1903 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1905 .consumer_supplies = db8500_vsmps2_consumers,
1906 .num_consumer_supplies = ARRAY_SIZE(db8500_vsmps2_consumers),
1908 [DB8500_REGULATOR_VSMPS3] = {
1909 .constraints = {
1910 .name = "db8500-vsmps3",
1911 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1914 [DB8500_REGULATOR_VRF1] = {
1915 .constraints = {
1916 .name = "db8500-vrf1",
1917 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1920 [DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
1921 .supply_regulator = "db8500-vape",
1922 .constraints = {
1923 .name = "db8500-sva-mmdsp",
1924 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1927 [DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
1928 .constraints = {
1929 /* "ret" means "retention" */
1930 .name = "db8500-sva-mmdsp-ret",
1931 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1934 [DB8500_REGULATOR_SWITCH_SVAPIPE] = {
1935 .supply_regulator = "db8500-vape",
1936 .constraints = {
1937 .name = "db8500-sva-pipe",
1938 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1941 [DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
1942 .supply_regulator = "db8500-vape",
1943 .constraints = {
1944 .name = "db8500-sia-mmdsp",
1945 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1948 [DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
1949 .constraints = {
1950 .name = "db8500-sia-mmdsp-ret",
1951 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1954 [DB8500_REGULATOR_SWITCH_SIAPIPE] = {
1955 .supply_regulator = "db8500-vape",
1956 .constraints = {
1957 .name = "db8500-sia-pipe",
1958 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1961 [DB8500_REGULATOR_SWITCH_SGA] = {
1962 .supply_regulator = "db8500-vape",
1963 .constraints = {
1964 .name = "db8500-sga",
1965 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1968 [DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
1969 .supply_regulator = "db8500-vape",
1970 .constraints = {
1971 .name = "db8500-b2r2-mcde",
1972 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1974 .consumer_supplies = db8500_b2r2_mcde_consumers,
1975 .num_consumer_supplies = ARRAY_SIZE(db8500_b2r2_mcde_consumers),
1977 [DB8500_REGULATOR_SWITCH_ESRAM12] = {
1978 .supply_regulator = "db8500-vape",
1979 .constraints = {
1980 .name = "db8500-esram12",
1981 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1984 [DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
1985 .constraints = {
1986 .name = "db8500-esram12-ret",
1987 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1990 [DB8500_REGULATOR_SWITCH_ESRAM34] = {
1991 .supply_regulator = "db8500-vape",
1992 .constraints = {
1993 .name = "db8500-esram34",
1994 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1997 [DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
1998 .constraints = {
1999 .name = "db8500-esram34-ret",
2000 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2005 static struct mfd_cell db8500_prcmu_devs[] = {
2007 .name = "db8500-prcmu-regulators",
2008 .platform_data = &db8500_regulators,
2009 .pdata_size = sizeof(db8500_regulators),
2012 .name = "cpufreq-u8500",
2017 * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
2020 static int __init db8500_prcmu_probe(struct platform_device *pdev)
2022 int err = 0;
2024 if (ux500_is_svp())
2025 return -ENODEV;
2027 /* Clean up the mailbox interrupts after pre-kernel code. */
2028 writel(ALL_MBOX_BITS, (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
2030 err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler,
2031 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
2032 if (err < 0) {
2033 pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
2034 err = -EBUSY;
2035 goto no_irq_return;
2038 if (cpu_is_u8500v20_or_later())
2039 prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
2041 err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
2042 ARRAY_SIZE(db8500_prcmu_devs), NULL,
2045 if (err)
2046 pr_err("prcmu: Failed to add subdevices\n");
2047 else
2048 pr_info("DB8500 PRCMU initialized\n");
2050 no_irq_return:
2051 return err;
2054 static struct platform_driver db8500_prcmu_driver = {
2055 .driver = {
2056 .name = "db8500-prcmu",
2057 .owner = THIS_MODULE,
2061 static int __init db8500_prcmu_init(void)
2063 return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe);
2066 arch_initcall(db8500_prcmu_init);
2068 MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com>");
2069 MODULE_DESCRIPTION("DB8500 PRCM Unit driver");
2070 MODULE_LICENSE("GPL v2");