2 * Qualcomm Peripheral Image Loader
4 * Copyright (C) 2016 Linaro Ltd.
5 * Copyright (C) 2014 Sony Mobile Communications AB
6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/clk.h>
19 #include <linux/delay.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/mfd/syscon.h>
24 #include <linux/module.h>
25 #include <linux/of_address.h>
26 #include <linux/of_device.h>
27 #include <linux/platform_device.h>
28 #include <linux/regmap.h>
29 #include <linux/regulator/consumer.h>
30 #include <linux/remoteproc.h>
31 #include <linux/reset.h>
32 #include <linux/soc/qcom/mdt_loader.h>
33 #include <linux/soc/qcom/smem.h>
34 #include <linux/soc/qcom/smem_state.h>
36 #include "remoteproc_internal.h"
37 #include "qcom_common.h"
39 #include <linux/qcom_scm.h>
41 #define MPSS_CRASH_REASON_SMEM 421
43 /* RMB Status Register Values */
44 #define RMB_PBL_SUCCESS 0x1
46 #define RMB_MBA_XPU_UNLOCKED 0x1
47 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
48 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
49 #define RMB_MBA_AUTH_COMPLETE 0x4
51 /* PBL/MBA interface registers */
52 #define RMB_MBA_IMAGE_REG 0x00
53 #define RMB_PBL_STATUS_REG 0x04
54 #define RMB_MBA_COMMAND_REG 0x08
55 #define RMB_MBA_STATUS_REG 0x0C
56 #define RMB_PMI_META_DATA_REG 0x10
57 #define RMB_PMI_CODE_START_REG 0x14
58 #define RMB_PMI_CODE_LENGTH_REG 0x18
60 #define RMB_CMD_META_DATA_READY 0x1
61 #define RMB_CMD_LOAD_READY 0x2
63 /* QDSP6SS Register Offsets */
64 #define QDSP6SS_RESET_REG 0x014
65 #define QDSP6SS_GFMUX_CTL_REG 0x020
66 #define QDSP6SS_PWR_CTL_REG 0x030
68 /* AXI Halt Register Offsets */
69 #define AXI_HALTREQ_REG 0x0
70 #define AXI_HALTACK_REG 0x4
71 #define AXI_IDLE_REG 0x8
73 #define HALT_ACK_TIMEOUT_MS 100
76 #define Q6SS_STOP_CORE BIT(0)
77 #define Q6SS_CORE_ARES BIT(1)
78 #define Q6SS_BUS_ARES_ENABLE BIT(2)
80 /* QDSP6SS_GFMUX_CTL */
81 #define Q6SS_CLK_ENABLE BIT(1)
84 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
85 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
86 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
87 #define Q6SS_L2TAG_SLP_NRET_N BIT(16)
88 #define Q6SS_ETB_SLP_NRET_N BIT(17)
89 #define Q6SS_L2DATA_STBY_N BIT(18)
90 #define Q6SS_SLP_RET_N BIT(19)
91 #define Q6SS_CLAMP_IO BIT(20)
92 #define QDSS_BHS_ON BIT(21)
93 #define QDSS_LDO_BYP BIT(22)
96 struct regulator
*reg
;
101 struct qcom_mss_reg_res
{
107 struct rproc_hexagon_res
{
108 const char *hexagon_mba_image
;
109 struct qcom_mss_reg_res
*proxy_supply
;
110 struct qcom_mss_reg_res
*active_supply
;
111 char **proxy_clk_names
;
112 char **active_clk_names
;
119 void __iomem
*reg_base
;
120 void __iomem
*rmb_base
;
122 struct regmap
*halt_map
;
127 struct reset_control
*mss_restart
;
129 struct qcom_smem_state
*state
;
132 struct clk
*active_clks
[8];
133 struct clk
*proxy_clks
[4];
134 int active_clk_count
;
137 struct reg_info active_regs
[1];
138 struct reg_info proxy_regs
[3];
139 int active_reg_count
;
142 struct completion start_done
;
143 struct completion stop_done
;
146 phys_addr_t mba_phys
;
150 phys_addr_t mpss_phys
;
151 phys_addr_t mpss_reloc
;
155 struct qcom_rproc_subdev smd_subdev
;
158 static int q6v5_regulator_init(struct device
*dev
, struct reg_info
*regs
,
159 const struct qcom_mss_reg_res
*reg_res
)
167 for (i
= 0; reg_res
[i
].supply
; i
++) {
168 regs
[i
].reg
= devm_regulator_get(dev
, reg_res
[i
].supply
);
169 if (IS_ERR(regs
[i
].reg
)) {
170 rc
= PTR_ERR(regs
[i
].reg
);
171 if (rc
!= -EPROBE_DEFER
)
172 dev_err(dev
, "Failed to get %s\n regulator",
177 regs
[i
].uV
= reg_res
[i
].uV
;
178 regs
[i
].uA
= reg_res
[i
].uA
;
184 static int q6v5_regulator_enable(struct q6v5
*qproc
,
185 struct reg_info
*regs
, int count
)
190 for (i
= 0; i
< count
; i
++) {
191 if (regs
[i
].uV
> 0) {
192 ret
= regulator_set_voltage(regs
[i
].reg
,
193 regs
[i
].uV
, INT_MAX
);
196 "Failed to request voltage for %d.\n",
202 if (regs
[i
].uA
> 0) {
203 ret
= regulator_set_load(regs
[i
].reg
,
207 "Failed to set regulator mode\n");
212 ret
= regulator_enable(regs
[i
].reg
);
214 dev_err(qproc
->dev
, "Regulator enable failed\n");
221 for (; i
>= 0; i
--) {
223 regulator_set_voltage(regs
[i
].reg
, 0, INT_MAX
);
226 regulator_set_load(regs
[i
].reg
, 0);
228 regulator_disable(regs
[i
].reg
);
234 static void q6v5_regulator_disable(struct q6v5
*qproc
,
235 struct reg_info
*regs
, int count
)
239 for (i
= 0; i
< count
; i
++) {
241 regulator_set_voltage(regs
[i
].reg
, 0, INT_MAX
);
244 regulator_set_load(regs
[i
].reg
, 0);
246 regulator_disable(regs
[i
].reg
);
250 static int q6v5_clk_enable(struct device
*dev
,
251 struct clk
**clks
, int count
)
256 for (i
= 0; i
< count
; i
++) {
257 rc
= clk_prepare_enable(clks
[i
]);
259 dev_err(dev
, "Clock enable failed\n");
266 for (i
--; i
>= 0; i
--)
267 clk_disable_unprepare(clks
[i
]);
272 static void q6v5_clk_disable(struct device
*dev
,
273 struct clk
**clks
, int count
)
277 for (i
= 0; i
< count
; i
++)
278 clk_disable_unprepare(clks
[i
]);
281 static struct resource_table
*q6v5_find_rsc_table(struct rproc
*rproc
,
282 const struct firmware
*fw
,
285 static struct resource_table table
= { .ver
= 1, };
287 *tablesz
= sizeof(table
);
291 static int q6v5_load(struct rproc
*rproc
, const struct firmware
*fw
)
293 struct q6v5
*qproc
= rproc
->priv
;
295 memcpy(qproc
->mba_region
, fw
->data
, fw
->size
);
300 static const struct rproc_fw_ops q6v5_fw_ops
= {
301 .find_rsc_table
= q6v5_find_rsc_table
,
305 static int q6v5_rmb_pbl_wait(struct q6v5
*qproc
, int ms
)
307 unsigned long timeout
;
310 timeout
= jiffies
+ msecs_to_jiffies(ms
);
312 val
= readl(qproc
->rmb_base
+ RMB_PBL_STATUS_REG
);
316 if (time_after(jiffies
, timeout
))
325 static int q6v5_rmb_mba_wait(struct q6v5
*qproc
, u32 status
, int ms
)
328 unsigned long timeout
;
331 timeout
= jiffies
+ msecs_to_jiffies(ms
);
333 val
= readl(qproc
->rmb_base
+ RMB_MBA_STATUS_REG
);
339 else if (status
&& val
== status
)
342 if (time_after(jiffies
, timeout
))
351 static int q6v5proc_reset(struct q6v5
*qproc
)
356 /* Assert resets, stop core */
357 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
358 val
|= (Q6SS_CORE_ARES
| Q6SS_BUS_ARES_ENABLE
| Q6SS_STOP_CORE
);
359 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
361 /* Enable power block headswitch, and wait for it to stabilize */
362 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
363 val
|= QDSS_BHS_ON
| QDSS_LDO_BYP
;
364 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
368 * Turn on memories. L2 banks should be done individually
369 * to minimize inrush current.
371 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
372 val
|= Q6SS_SLP_RET_N
| Q6SS_L2TAG_SLP_NRET_N
|
373 Q6SS_ETB_SLP_NRET_N
| Q6SS_L2DATA_STBY_N
;
374 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
375 val
|= Q6SS_L2DATA_SLP_NRET_N_2
;
376 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
377 val
|= Q6SS_L2DATA_SLP_NRET_N_1
;
378 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
379 val
|= Q6SS_L2DATA_SLP_NRET_N_0
;
380 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
382 /* Remove IO clamp */
383 val
&= ~Q6SS_CLAMP_IO
;
384 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
386 /* Bring core out of reset */
387 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
388 val
&= ~Q6SS_CORE_ARES
;
389 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
391 /* Turn on core clock */
392 val
= readl(qproc
->reg_base
+ QDSP6SS_GFMUX_CTL_REG
);
393 val
|= Q6SS_CLK_ENABLE
;
394 writel(val
, qproc
->reg_base
+ QDSP6SS_GFMUX_CTL_REG
);
396 /* Start core execution */
397 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
398 val
&= ~Q6SS_STOP_CORE
;
399 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
401 /* Wait for PBL status */
402 ret
= q6v5_rmb_pbl_wait(qproc
, 1000);
403 if (ret
== -ETIMEDOUT
) {
404 dev_err(qproc
->dev
, "PBL boot timed out\n");
405 } else if (ret
!= RMB_PBL_SUCCESS
) {
406 dev_err(qproc
->dev
, "PBL returned unexpected status %d\n", ret
);
415 static void q6v5proc_halt_axi_port(struct q6v5
*qproc
,
416 struct regmap
*halt_map
,
419 unsigned long timeout
;
423 /* Check if we're already idle */
424 ret
= regmap_read(halt_map
, offset
+ AXI_IDLE_REG
, &val
);
428 /* Assert halt request */
429 regmap_write(halt_map
, offset
+ AXI_HALTREQ_REG
, 1);
432 timeout
= jiffies
+ msecs_to_jiffies(HALT_ACK_TIMEOUT_MS
);
434 ret
= regmap_read(halt_map
, offset
+ AXI_HALTACK_REG
, &val
);
435 if (ret
|| val
|| time_after(jiffies
, timeout
))
441 ret
= regmap_read(halt_map
, offset
+ AXI_IDLE_REG
, &val
);
443 dev_err(qproc
->dev
, "port failed halt\n");
445 /* Clear halt request (port will remain halted until reset) */
446 regmap_write(halt_map
, offset
+ AXI_HALTREQ_REG
, 0);
449 static int q6v5_mpss_init_image(struct q6v5
*qproc
, const struct firmware
*fw
)
451 unsigned long dma_attrs
= DMA_ATTR_FORCE_CONTIGUOUS
;
456 ptr
= dma_alloc_attrs(qproc
->dev
, fw
->size
, &phys
, GFP_KERNEL
, dma_attrs
);
458 dev_err(qproc
->dev
, "failed to allocate mdt buffer\n");
462 memcpy(ptr
, fw
->data
, fw
->size
);
464 writel(phys
, qproc
->rmb_base
+ RMB_PMI_META_DATA_REG
);
465 writel(RMB_CMD_META_DATA_READY
, qproc
->rmb_base
+ RMB_MBA_COMMAND_REG
);
467 ret
= q6v5_rmb_mba_wait(qproc
, RMB_MBA_META_DATA_AUTH_SUCCESS
, 1000);
468 if (ret
== -ETIMEDOUT
)
469 dev_err(qproc
->dev
, "MPSS header authentication timed out\n");
471 dev_err(qproc
->dev
, "MPSS header authentication failed: %d\n", ret
);
473 dma_free_attrs(qproc
->dev
, fw
->size
, ptr
, phys
, dma_attrs
);
475 return ret
< 0 ? ret
: 0;
478 static bool q6v5_phdr_valid(const struct elf32_phdr
*phdr
)
480 if (phdr
->p_type
!= PT_LOAD
)
483 if ((phdr
->p_flags
& QCOM_MDT_TYPE_MASK
) == QCOM_MDT_TYPE_HASH
)
492 static int q6v5_mpss_load(struct q6v5
*qproc
)
494 const struct elf32_phdr
*phdrs
;
495 const struct elf32_phdr
*phdr
;
496 const struct firmware
*seg_fw
;
497 const struct firmware
*fw
;
498 struct elf32_hdr
*ehdr
;
499 phys_addr_t mpss_reloc
;
500 phys_addr_t boot_addr
;
501 phys_addr_t min_addr
= (phys_addr_t
)ULLONG_MAX
;
502 phys_addr_t max_addr
= 0;
503 bool relocate
= false;
511 ret
= request_firmware(&fw
, "modem.mdt", qproc
->dev
);
513 dev_err(qproc
->dev
, "unable to load modem.mdt\n");
517 /* Initialize the RMB validator */
518 writel(0, qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
520 ret
= q6v5_mpss_init_image(qproc
, fw
);
522 goto release_firmware
;
524 ehdr
= (struct elf32_hdr
*)fw
->data
;
525 phdrs
= (struct elf32_phdr
*)(ehdr
+ 1);
527 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
530 if (!q6v5_phdr_valid(phdr
))
533 if (phdr
->p_flags
& QCOM_MDT_RELOCATABLE
)
536 if (phdr
->p_paddr
< min_addr
)
537 min_addr
= phdr
->p_paddr
;
539 if (phdr
->p_paddr
+ phdr
->p_memsz
> max_addr
)
540 max_addr
= ALIGN(phdr
->p_paddr
+ phdr
->p_memsz
, SZ_4K
);
543 mpss_reloc
= relocate
? min_addr
: qproc
->mpss_phys
;
545 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
548 if (!q6v5_phdr_valid(phdr
))
551 offset
= phdr
->p_paddr
- mpss_reloc
;
552 if (offset
< 0 || offset
+ phdr
->p_memsz
> qproc
->mpss_size
) {
553 dev_err(qproc
->dev
, "segment outside memory range\n");
555 goto release_firmware
;
558 ptr
= qproc
->mpss_region
+ offset
;
560 if (phdr
->p_filesz
) {
561 snprintf(seg_name
, sizeof(seg_name
), "modem.b%02d", i
);
562 ret
= request_firmware(&seg_fw
, seg_name
, qproc
->dev
);
564 dev_err(qproc
->dev
, "failed to load %s\n", seg_name
);
565 goto release_firmware
;
568 memcpy(ptr
, seg_fw
->data
, seg_fw
->size
);
570 release_firmware(seg_fw
);
573 if (phdr
->p_memsz
> phdr
->p_filesz
) {
574 memset(ptr
+ phdr
->p_filesz
, 0,
575 phdr
->p_memsz
- phdr
->p_filesz
);
578 size
= readl(qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
580 boot_addr
= relocate
? qproc
->mpss_phys
: min_addr
;
581 writel(boot_addr
, qproc
->rmb_base
+ RMB_PMI_CODE_START_REG
);
582 writel(RMB_CMD_LOAD_READY
, qproc
->rmb_base
+ RMB_MBA_COMMAND_REG
);
585 size
+= phdr
->p_memsz
;
586 writel(size
, qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
589 ret
= q6v5_rmb_mba_wait(qproc
, RMB_MBA_AUTH_COMPLETE
, 10000);
590 if (ret
== -ETIMEDOUT
)
591 dev_err(qproc
->dev
, "MPSS authentication timed out\n");
593 dev_err(qproc
->dev
, "MPSS authentication failed: %d\n", ret
);
596 release_firmware(fw
);
598 return ret
< 0 ? ret
: 0;
601 static int q6v5_start(struct rproc
*rproc
)
603 struct q6v5
*qproc
= (struct q6v5
*)rproc
->priv
;
606 ret
= q6v5_regulator_enable(qproc
, qproc
->proxy_regs
,
607 qproc
->proxy_reg_count
);
609 dev_err(qproc
->dev
, "failed to enable proxy supplies\n");
613 ret
= q6v5_clk_enable(qproc
->dev
, qproc
->proxy_clks
,
614 qproc
->proxy_clk_count
);
616 dev_err(qproc
->dev
, "failed to enable proxy clocks\n");
617 goto disable_proxy_reg
;
620 ret
= q6v5_regulator_enable(qproc
, qproc
->active_regs
,
621 qproc
->active_reg_count
);
623 dev_err(qproc
->dev
, "failed to enable supplies\n");
624 goto disable_proxy_clk
;
626 ret
= reset_control_deassert(qproc
->mss_restart
);
628 dev_err(qproc
->dev
, "failed to deassert mss restart\n");
632 ret
= q6v5_clk_enable(qproc
->dev
, qproc
->active_clks
,
633 qproc
->active_clk_count
);
635 dev_err(qproc
->dev
, "failed to enable clocks\n");
639 writel(qproc
->mba_phys
, qproc
->rmb_base
+ RMB_MBA_IMAGE_REG
);
641 ret
= q6v5proc_reset(qproc
);
645 ret
= q6v5_rmb_mba_wait(qproc
, 0, 5000);
646 if (ret
== -ETIMEDOUT
) {
647 dev_err(qproc
->dev
, "MBA boot timed out\n");
649 } else if (ret
!= RMB_MBA_XPU_UNLOCKED
&&
650 ret
!= RMB_MBA_XPU_UNLOCKED_SCRIBBLED
) {
651 dev_err(qproc
->dev
, "MBA returned unexpected status %d\n", ret
);
656 dev_info(qproc
->dev
, "MBA booted, loading mpss\n");
658 ret
= q6v5_mpss_load(qproc
);
662 ret
= wait_for_completion_timeout(&qproc
->start_done
,
663 msecs_to_jiffies(5000));
665 dev_err(qproc
->dev
, "start timed out\n");
670 qproc
->running
= true;
672 q6v5_clk_disable(qproc
->dev
, qproc
->proxy_clks
,
673 qproc
->proxy_clk_count
);
674 q6v5_regulator_disable(qproc
, qproc
->proxy_regs
,
675 qproc
->proxy_reg_count
);
680 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_q6
);
681 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_modem
);
682 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_nc
);
683 q6v5_clk_disable(qproc
->dev
, qproc
->active_clks
,
684 qproc
->active_clk_count
);
686 reset_control_assert(qproc
->mss_restart
);
688 q6v5_regulator_disable(qproc
, qproc
->active_regs
,
689 qproc
->active_reg_count
);
691 q6v5_clk_disable(qproc
->dev
, qproc
->proxy_clks
,
692 qproc
->proxy_clk_count
);
694 q6v5_regulator_disable(qproc
, qproc
->proxy_regs
,
695 qproc
->proxy_reg_count
);
700 static int q6v5_stop(struct rproc
*rproc
)
702 struct q6v5
*qproc
= (struct q6v5
*)rproc
->priv
;
705 qproc
->running
= false;
707 qcom_smem_state_update_bits(qproc
->state
,
708 BIT(qproc
->stop_bit
), BIT(qproc
->stop_bit
));
710 ret
= wait_for_completion_timeout(&qproc
->stop_done
,
711 msecs_to_jiffies(5000));
713 dev_err(qproc
->dev
, "timed out on wait\n");
715 qcom_smem_state_update_bits(qproc
->state
, BIT(qproc
->stop_bit
), 0);
717 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_q6
);
718 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_modem
);
719 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_nc
);
721 reset_control_assert(qproc
->mss_restart
);
722 q6v5_clk_disable(qproc
->dev
, qproc
->active_clks
,
723 qproc
->active_clk_count
);
724 q6v5_regulator_disable(qproc
, qproc
->active_regs
,
725 qproc
->active_reg_count
);
730 static void *q6v5_da_to_va(struct rproc
*rproc
, u64 da
, int len
)
732 struct q6v5
*qproc
= rproc
->priv
;
735 offset
= da
- qproc
->mpss_reloc
;
736 if (offset
< 0 || offset
+ len
> qproc
->mpss_size
)
739 return qproc
->mpss_region
+ offset
;
742 static const struct rproc_ops q6v5_ops
= {
745 .da_to_va
= q6v5_da_to_va
,
748 static irqreturn_t
q6v5_wdog_interrupt(int irq
, void *dev
)
750 struct q6v5
*qproc
= dev
;
754 /* Sometimes the stop triggers a watchdog rather than a stop-ack */
755 if (!qproc
->running
) {
756 complete(&qproc
->stop_done
);
760 msg
= qcom_smem_get(QCOM_SMEM_HOST_ANY
, MPSS_CRASH_REASON_SMEM
, &len
);
761 if (!IS_ERR(msg
) && len
> 0 && msg
[0])
762 dev_err(qproc
->dev
, "watchdog received: %s\n", msg
);
764 dev_err(qproc
->dev
, "watchdog without message\n");
766 rproc_report_crash(qproc
->rproc
, RPROC_WATCHDOG
);
774 static irqreturn_t
q6v5_fatal_interrupt(int irq
, void *dev
)
776 struct q6v5
*qproc
= dev
;
780 msg
= qcom_smem_get(QCOM_SMEM_HOST_ANY
, MPSS_CRASH_REASON_SMEM
, &len
);
781 if (!IS_ERR(msg
) && len
> 0 && msg
[0])
782 dev_err(qproc
->dev
, "fatal error received: %s\n", msg
);
784 dev_err(qproc
->dev
, "fatal error without message\n");
786 rproc_report_crash(qproc
->rproc
, RPROC_FATAL_ERROR
);
794 static irqreturn_t
q6v5_handover_interrupt(int irq
, void *dev
)
796 struct q6v5
*qproc
= dev
;
798 complete(&qproc
->start_done
);
802 static irqreturn_t
q6v5_stop_ack_interrupt(int irq
, void *dev
)
804 struct q6v5
*qproc
= dev
;
806 complete(&qproc
->stop_done
);
810 static int q6v5_init_mem(struct q6v5
*qproc
, struct platform_device
*pdev
)
812 struct of_phandle_args args
;
813 struct resource
*res
;
816 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "qdsp6");
817 qproc
->reg_base
= devm_ioremap_resource(&pdev
->dev
, res
);
818 if (IS_ERR(qproc
->reg_base
))
819 return PTR_ERR(qproc
->reg_base
);
821 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "rmb");
822 qproc
->rmb_base
= devm_ioremap_resource(&pdev
->dev
, res
);
823 if (IS_ERR(qproc
->rmb_base
))
824 return PTR_ERR(qproc
->rmb_base
);
826 ret
= of_parse_phandle_with_fixed_args(pdev
->dev
.of_node
,
827 "qcom,halt-regs", 3, 0, &args
);
829 dev_err(&pdev
->dev
, "failed to parse qcom,halt-regs\n");
833 qproc
->halt_map
= syscon_node_to_regmap(args
.np
);
834 of_node_put(args
.np
);
835 if (IS_ERR(qproc
->halt_map
))
836 return PTR_ERR(qproc
->halt_map
);
838 qproc
->halt_q6
= args
.args
[0];
839 qproc
->halt_modem
= args
.args
[1];
840 qproc
->halt_nc
= args
.args
[2];
845 static int q6v5_init_clocks(struct device
*dev
, struct clk
**clks
,
853 for (i
= 0; clk_names
[i
]; i
++) {
854 clks
[i
] = devm_clk_get(dev
, clk_names
[i
]);
855 if (IS_ERR(clks
[i
])) {
856 int rc
= PTR_ERR(clks
[i
]);
858 if (rc
!= -EPROBE_DEFER
)
859 dev_err(dev
, "Failed to get %s clock\n",
868 static int q6v5_init_reset(struct q6v5
*qproc
)
870 qproc
->mss_restart
= devm_reset_control_get(qproc
->dev
, NULL
);
871 if (IS_ERR(qproc
->mss_restart
)) {
872 dev_err(qproc
->dev
, "failed to acquire mss restart\n");
873 return PTR_ERR(qproc
->mss_restart
);
879 static int q6v5_request_irq(struct q6v5
*qproc
,
880 struct platform_device
*pdev
,
882 irq_handler_t thread_fn
)
886 ret
= platform_get_irq_byname(pdev
, name
);
888 dev_err(&pdev
->dev
, "no %s IRQ defined\n", name
);
892 ret
= devm_request_threaded_irq(&pdev
->dev
, ret
,
894 IRQF_TRIGGER_RISING
| IRQF_ONESHOT
,
897 dev_err(&pdev
->dev
, "request %s IRQ failed\n", name
);
902 static int q6v5_alloc_memory_region(struct q6v5
*qproc
)
904 struct device_node
*child
;
905 struct device_node
*node
;
909 child
= of_get_child_by_name(qproc
->dev
->of_node
, "mba");
910 node
= of_parse_phandle(child
, "memory-region", 0);
911 ret
= of_address_to_resource(node
, 0, &r
);
913 dev_err(qproc
->dev
, "unable to resolve mba region\n");
917 qproc
->mba_phys
= r
.start
;
918 qproc
->mba_size
= resource_size(&r
);
919 qproc
->mba_region
= devm_ioremap_wc(qproc
->dev
, qproc
->mba_phys
, qproc
->mba_size
);
920 if (!qproc
->mba_region
) {
921 dev_err(qproc
->dev
, "unable to map memory region: %pa+%zx\n",
922 &r
.start
, qproc
->mba_size
);
926 child
= of_get_child_by_name(qproc
->dev
->of_node
, "mpss");
927 node
= of_parse_phandle(child
, "memory-region", 0);
928 ret
= of_address_to_resource(node
, 0, &r
);
930 dev_err(qproc
->dev
, "unable to resolve mpss region\n");
934 qproc
->mpss_phys
= qproc
->mpss_reloc
= r
.start
;
935 qproc
->mpss_size
= resource_size(&r
);
936 qproc
->mpss_region
= devm_ioremap_wc(qproc
->dev
, qproc
->mpss_phys
, qproc
->mpss_size
);
937 if (!qproc
->mpss_region
) {
938 dev_err(qproc
->dev
, "unable to map memory region: %pa+%zx\n",
939 &r
.start
, qproc
->mpss_size
);
946 static int q6v5_probe(struct platform_device
*pdev
)
948 const struct rproc_hexagon_res
*desc
;
953 desc
= of_device_get_match_data(&pdev
->dev
);
957 rproc
= rproc_alloc(&pdev
->dev
, pdev
->name
, &q6v5_ops
,
958 desc
->hexagon_mba_image
, sizeof(*qproc
));
960 dev_err(&pdev
->dev
, "failed to allocate rproc\n");
964 rproc
->fw_ops
= &q6v5_fw_ops
;
966 qproc
= (struct q6v5
*)rproc
->priv
;
967 qproc
->dev
= &pdev
->dev
;
968 qproc
->rproc
= rproc
;
969 platform_set_drvdata(pdev
, qproc
);
971 init_completion(&qproc
->start_done
);
972 init_completion(&qproc
->stop_done
);
974 ret
= q6v5_init_mem(qproc
, pdev
);
978 ret
= q6v5_alloc_memory_region(qproc
);
982 ret
= q6v5_init_clocks(&pdev
->dev
, qproc
->proxy_clks
,
983 desc
->proxy_clk_names
);
985 dev_err(&pdev
->dev
, "Failed to get proxy clocks.\n");
988 qproc
->proxy_clk_count
= ret
;
990 ret
= q6v5_init_clocks(&pdev
->dev
, qproc
->active_clks
,
991 desc
->active_clk_names
);
993 dev_err(&pdev
->dev
, "Failed to get active clocks.\n");
996 qproc
->active_clk_count
= ret
;
998 ret
= q6v5_regulator_init(&pdev
->dev
, qproc
->proxy_regs
,
1001 dev_err(&pdev
->dev
, "Failed to get proxy regulators.\n");
1004 qproc
->proxy_reg_count
= ret
;
1006 ret
= q6v5_regulator_init(&pdev
->dev
, qproc
->active_regs
,
1007 desc
->active_supply
);
1009 dev_err(&pdev
->dev
, "Failed to get active regulators.\n");
1012 qproc
->active_reg_count
= ret
;
1014 ret
= q6v5_init_reset(qproc
);
1018 ret
= q6v5_request_irq(qproc
, pdev
, "wdog", q6v5_wdog_interrupt
);
1022 ret
= q6v5_request_irq(qproc
, pdev
, "fatal", q6v5_fatal_interrupt
);
1026 ret
= q6v5_request_irq(qproc
, pdev
, "handover", q6v5_handover_interrupt
);
1030 ret
= q6v5_request_irq(qproc
, pdev
, "stop-ack", q6v5_stop_ack_interrupt
);
1034 qproc
->state
= qcom_smem_state_get(&pdev
->dev
, "stop", &qproc
->stop_bit
);
1035 if (IS_ERR(qproc
->state
)) {
1036 ret
= PTR_ERR(qproc
->state
);
1040 qcom_add_smd_subdev(rproc
, &qproc
->smd_subdev
);
1042 ret
= rproc_add(rproc
);
1054 static int q6v5_remove(struct platform_device
*pdev
)
1056 struct q6v5
*qproc
= platform_get_drvdata(pdev
);
1058 rproc_del(qproc
->rproc
);
1060 qcom_remove_smd_subdev(qproc
->rproc
, &qproc
->smd_subdev
);
1061 rproc_free(qproc
->rproc
);
1066 static const struct rproc_hexagon_res msm8916_mss
= {
1067 .hexagon_mba_image
= "mba.mbn",
1068 .proxy_supply
= (struct qcom_mss_reg_res
[]) {
1083 .proxy_clk_names
= (char*[]){
1087 .active_clk_names
= (char*[]){
1095 static const struct rproc_hexagon_res msm8974_mss
= {
1096 .hexagon_mba_image
= "mba.b00",
1097 .proxy_supply
= (struct qcom_mss_reg_res
[]) {
1112 .active_supply
= (struct qcom_mss_reg_res
[]) {
1120 .proxy_clk_names
= (char*[]){
1124 .active_clk_names
= (char*[]){
1132 static const struct of_device_id q6v5_of_match
[] = {
1133 { .compatible
= "qcom,q6v5-pil", .data
= &msm8916_mss
},
1134 { .compatible
= "qcom,msm8916-mss-pil", .data
= &msm8916_mss
},
1135 { .compatible
= "qcom,msm8974-mss-pil", .data
= &msm8974_mss
},
1138 MODULE_DEVICE_TABLE(of
, q6v5_of_match
);
1140 static struct platform_driver q6v5_driver
= {
1141 .probe
= q6v5_probe
,
1142 .remove
= q6v5_remove
,
1144 .name
= "qcom-q6v5-pil",
1145 .of_match_table
= q6v5_of_match
,
1148 module_platform_driver(q6v5_driver
);
1150 MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
1151 MODULE_LICENSE("GPL v2");