2 * Allwinner A20/A31 SoCs NMI IRQ chip driver.
4 * Carlo Caione <carlo.caione@gmail.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #define DRV_NAME "sunxi-nmi"
12 #define pr_fmt(fmt) DRV_NAME ": " fmt
14 #include <linux/bitops.h>
15 #include <linux/device.h>
17 #include <linux/irq.h>
18 #include <linux/interrupt.h>
19 #include <linux/irqdomain.h>
20 #include <linux/of_irq.h>
21 #include <linux/of_address.h>
22 #include <linux/of_platform.h>
23 #include <linux/irqchip.h>
24 #include <linux/irqchip/chained_irq.h>
26 #define SUNXI_NMI_SRC_TYPE_MASK 0x00000003
28 #define SUNXI_NMI_IRQ_BIT BIT(0)
30 #define SUN6I_R_INTC_CTRL 0x0c
31 #define SUN6I_R_INTC_PENDING 0x10
32 #define SUN6I_R_INTC_ENABLE 0x40
35 * For deprecated sun6i-a31-sc-nmi compatible.
36 * Registers are offset by 0x0c.
38 #define SUN6I_R_INTC_NMI_OFFSET 0x0c
39 #define SUN6I_NMI_CTRL (SUN6I_R_INTC_CTRL - SUN6I_R_INTC_NMI_OFFSET)
40 #define SUN6I_NMI_PENDING (SUN6I_R_INTC_PENDING - SUN6I_R_INTC_NMI_OFFSET)
41 #define SUN6I_NMI_ENABLE (SUN6I_R_INTC_ENABLE - SUN6I_R_INTC_NMI_OFFSET)
43 #define SUN7I_NMI_CTRL 0x00
44 #define SUN7I_NMI_PENDING 0x04
45 #define SUN7I_NMI_ENABLE 0x08
47 #define SUN9I_NMI_CTRL 0x00
48 #define SUN9I_NMI_ENABLE 0x04
49 #define SUN9I_NMI_PENDING 0x08
52 SUNXI_SRC_TYPE_LEVEL_LOW
= 0,
53 SUNXI_SRC_TYPE_EDGE_FALLING
,
54 SUNXI_SRC_TYPE_LEVEL_HIGH
,
55 SUNXI_SRC_TYPE_EDGE_RISING
,
58 struct sunxi_sc_nmi_reg_offs
{
64 static const struct sunxi_sc_nmi_reg_offs sun6i_r_intc_reg_offs __initconst
= {
65 .ctrl
= SUN6I_R_INTC_CTRL
,
66 .pend
= SUN6I_R_INTC_PENDING
,
67 .enable
= SUN6I_R_INTC_ENABLE
,
70 static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst
= {
71 .ctrl
= SUN6I_NMI_CTRL
,
72 .pend
= SUN6I_NMI_PENDING
,
73 .enable
= SUN6I_NMI_ENABLE
,
76 static const struct sunxi_sc_nmi_reg_offs sun7i_reg_offs __initconst
= {
77 .ctrl
= SUN7I_NMI_CTRL
,
78 .pend
= SUN7I_NMI_PENDING
,
79 .enable
= SUN7I_NMI_ENABLE
,
82 static const struct sunxi_sc_nmi_reg_offs sun9i_reg_offs __initconst
= {
83 .ctrl
= SUN9I_NMI_CTRL
,
84 .pend
= SUN9I_NMI_PENDING
,
85 .enable
= SUN9I_NMI_ENABLE
,
88 static inline void sunxi_sc_nmi_write(struct irq_chip_generic
*gc
, u32 off
,
91 irq_reg_writel(gc
, val
, off
);
94 static inline u32
sunxi_sc_nmi_read(struct irq_chip_generic
*gc
, u32 off
)
96 return irq_reg_readl(gc
, off
);
99 static void sunxi_sc_nmi_handle_irq(struct irq_desc
*desc
)
101 struct irq_domain
*domain
= irq_desc_get_handler_data(desc
);
102 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
103 unsigned int virq
= irq_find_mapping(domain
, 0);
105 chained_irq_enter(chip
, desc
);
106 generic_handle_irq(virq
);
107 chained_irq_exit(chip
, desc
);
110 static int sunxi_sc_nmi_set_type(struct irq_data
*data
, unsigned int flow_type
)
112 struct irq_chip_generic
*gc
= irq_data_get_irq_chip_data(data
);
113 struct irq_chip_type
*ct
= gc
->chip_types
;
115 u32 ctrl_off
= ct
->regs
.type
;
116 unsigned int src_type
;
121 switch (flow_type
& IRQF_TRIGGER_MASK
) {
122 case IRQ_TYPE_EDGE_FALLING
:
123 src_type
= SUNXI_SRC_TYPE_EDGE_FALLING
;
125 case IRQ_TYPE_EDGE_RISING
:
126 src_type
= SUNXI_SRC_TYPE_EDGE_RISING
;
128 case IRQ_TYPE_LEVEL_HIGH
:
129 src_type
= SUNXI_SRC_TYPE_LEVEL_HIGH
;
132 case IRQ_TYPE_LEVEL_LOW
:
133 src_type
= SUNXI_SRC_TYPE_LEVEL_LOW
;
137 pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
142 irqd_set_trigger_type(data
, flow_type
);
143 irq_setup_alt_chip(data
, flow_type
);
145 for (i
= 0; i
< gc
->num_ct
; i
++, ct
++)
146 if (ct
->type
& flow_type
)
147 ctrl_off
= ct
->regs
.type
;
149 src_type_reg
= sunxi_sc_nmi_read(gc
, ctrl_off
);
150 src_type_reg
&= ~SUNXI_NMI_SRC_TYPE_MASK
;
151 src_type_reg
|= src_type
;
152 sunxi_sc_nmi_write(gc
, ctrl_off
, src_type_reg
);
156 return IRQ_SET_MASK_OK
;
159 static int __init
sunxi_sc_nmi_irq_init(struct device_node
*node
,
160 const struct sunxi_sc_nmi_reg_offs
*reg_offs
)
162 struct irq_domain
*domain
;
163 struct irq_chip_generic
*gc
;
165 unsigned int clr
= IRQ_NOREQUEST
| IRQ_NOPROBE
| IRQ_NOAUTOEN
;
169 domain
= irq_domain_add_linear(node
, 1, &irq_generic_chip_ops
, NULL
);
171 pr_err("Could not register interrupt domain.\n");
175 ret
= irq_alloc_domain_generic_chips(domain
, 1, 2, DRV_NAME
,
176 handle_fasteoi_irq
, clr
, 0,
177 IRQ_GC_INIT_MASK_CACHE
);
179 pr_err("Could not allocate generic interrupt chip.\n");
180 goto fail_irqd_remove
;
183 irq
= irq_of_parse_and_map(node
, 0);
185 pr_err("unable to parse irq\n");
187 goto fail_irqd_remove
;
190 gc
= irq_get_domain_generic_chip(domain
, 0);
191 gc
->reg_base
= of_io_request_and_map(node
, 0, of_node_full_name(node
));
192 if (IS_ERR(gc
->reg_base
)) {
193 pr_err("unable to map resource\n");
194 ret
= PTR_ERR(gc
->reg_base
);
195 goto fail_irqd_remove
;
198 gc
->chip_types
[0].type
= IRQ_TYPE_LEVEL_MASK
;
199 gc
->chip_types
[0].chip
.irq_mask
= irq_gc_mask_clr_bit
;
200 gc
->chip_types
[0].chip
.irq_unmask
= irq_gc_mask_set_bit
;
201 gc
->chip_types
[0].chip
.irq_eoi
= irq_gc_ack_set_bit
;
202 gc
->chip_types
[0].chip
.irq_set_type
= sunxi_sc_nmi_set_type
;
203 gc
->chip_types
[0].chip
.flags
= IRQCHIP_EOI_THREADED
| IRQCHIP_EOI_IF_HANDLED
;
204 gc
->chip_types
[0].regs
.ack
= reg_offs
->pend
;
205 gc
->chip_types
[0].regs
.mask
= reg_offs
->enable
;
206 gc
->chip_types
[0].regs
.type
= reg_offs
->ctrl
;
208 gc
->chip_types
[1].type
= IRQ_TYPE_EDGE_BOTH
;
209 gc
->chip_types
[1].chip
.name
= gc
->chip_types
[0].chip
.name
;
210 gc
->chip_types
[1].chip
.irq_ack
= irq_gc_ack_set_bit
;
211 gc
->chip_types
[1].chip
.irq_mask
= irq_gc_mask_clr_bit
;
212 gc
->chip_types
[1].chip
.irq_unmask
= irq_gc_mask_set_bit
;
213 gc
->chip_types
[1].chip
.irq_set_type
= sunxi_sc_nmi_set_type
;
214 gc
->chip_types
[1].regs
.ack
= reg_offs
->pend
;
215 gc
->chip_types
[1].regs
.mask
= reg_offs
->enable
;
216 gc
->chip_types
[1].regs
.type
= reg_offs
->ctrl
;
217 gc
->chip_types
[1].handler
= handle_edge_irq
;
219 /* Disable any active interrupts */
220 sunxi_sc_nmi_write(gc
, reg_offs
->enable
, 0);
222 /* Clear any pending NMI interrupts */
223 sunxi_sc_nmi_write(gc
, reg_offs
->pend
, SUNXI_NMI_IRQ_BIT
);
225 irq_set_chained_handler_and_data(irq
, sunxi_sc_nmi_handle_irq
, domain
);
230 irq_domain_remove(domain
);
235 static int __init
sun6i_r_intc_irq_init(struct device_node
*node
,
236 struct device_node
*parent
)
238 return sunxi_sc_nmi_irq_init(node
, &sun6i_r_intc_reg_offs
);
240 IRQCHIP_DECLARE(sun6i_r_intc
, "allwinner,sun6i-a31-r-intc",
241 sun6i_r_intc_irq_init
);
243 static int __init
sun6i_sc_nmi_irq_init(struct device_node
*node
,
244 struct device_node
*parent
)
246 return sunxi_sc_nmi_irq_init(node
, &sun6i_reg_offs
);
248 IRQCHIP_DECLARE(sun6i_sc_nmi
, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init
);
250 static int __init
sun7i_sc_nmi_irq_init(struct device_node
*node
,
251 struct device_node
*parent
)
253 return sunxi_sc_nmi_irq_init(node
, &sun7i_reg_offs
);
255 IRQCHIP_DECLARE(sun7i_sc_nmi
, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init
);
257 static int __init
sun9i_nmi_irq_init(struct device_node
*node
,
258 struct device_node
*parent
)
260 return sunxi_sc_nmi_irq_init(node
, &sun9i_reg_offs
);
262 IRQCHIP_DECLARE(sun9i_nmi
, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init
);