2 * SuperH interrupt controller module
4 * Copyright (c) 2007 Magnus Damm
5 * Based on sh_timer.c and arm_timer.c by Paul Brook
6 * Copyright (c) 2005-2006 CodeSourcery.
8 * This code is licensed under the GPL.
11 #include "qemu/osdep.h"
14 #include "hw/sh4/sh_intc.h"
16 #include "hw/sh4/sh.h"
19 void sh_intc_toggle_source(struct intc_source
*source
,
20 int enable_adj
, int assert_adj
)
22 int enable_changed
= 0;
23 int pending_changed
= 0;
26 if (source
->enable_count
== source
->enable_max
&& enable_adj
== -1) {
29 source
->enable_count
+= enable_adj
;
31 if (source
->enable_count
== source
->enable_max
) {
34 source
->asserted
+= assert_adj
;
36 old_pending
= source
->pending
;
37 source
->pending
= source
->asserted
&&
38 (source
->enable_count
== source
->enable_max
);
40 if (old_pending
!= source
->pending
) {
43 if (pending_changed
) {
44 if (source
->pending
) {
45 source
->parent
->pending
++;
46 if (source
->parent
->pending
== 1) {
47 cpu_interrupt(first_cpu
, CPU_INTERRUPT_HARD
);
50 source
->parent
->pending
--;
51 if (source
->parent
->pending
== 0) {
52 cpu_reset_interrupt(first_cpu
, CPU_INTERRUPT_HARD
);
57 if (enable_changed
|| assert_adj
|| pending_changed
) {
58 trace_sh_intc_sources(source
->parent
->pending
, source
->asserted
,
59 source
->enable_count
, source
->enable_max
,
60 source
->vect
, source
->asserted
? "asserted " :
61 assert_adj
? "deasserted" : "",
62 enable_changed
== 1 ? "enabled " :
63 enable_changed
== -1 ? "disabled " : "",
64 source
->pending
? "pending" : "");
68 static void sh_intc_set_irq(void *opaque
, int n
, int level
)
70 struct intc_desc
*desc
= opaque
;
71 struct intc_source
*source
= &desc
->sources
[n
];
73 if (level
&& !source
->asserted
) {
74 sh_intc_toggle_source(source
, 0, 1);
75 } else if (!level
&& source
->asserted
) {
76 sh_intc_toggle_source(source
, 0, -1);
80 int sh_intc_get_pending_vector(struct intc_desc
*desc
, int imask
)
84 /* slow: use a linked lists of pending sources instead */
85 /* wrong: take interrupt priority into account (one list per priority) */
88 return -1; /* FIXME, update code to include priority per source */
91 for (i
= 0; i
< desc
->nr_sources
; i
++) {
92 struct intc_source
*source
= &desc
->sources
[i
];
94 if (source
->pending
) {
95 trace_sh_intc_pending(desc
->pending
, source
->vect
);
99 g_assert_not_reached();
106 INTC_MODE_ENABLE_REG
,
109 #define INTC_MODE_IS_PRIO 0x80
111 static SHIntCMode
sh_intc_mode(unsigned long address
, unsigned long set_reg
,
112 unsigned long clr_reg
)
114 if (address
!= A7ADDR(set_reg
) && address
!= A7ADDR(clr_reg
)) {
115 return INTC_MODE_NONE
;
117 if (set_reg
&& clr_reg
) {
118 return address
== A7ADDR(set_reg
) ?
119 INTC_MODE_DUAL_SET
: INTC_MODE_DUAL_CLR
;
121 return set_reg
? INTC_MODE_ENABLE_REG
: INTC_MODE_MASK_REG
;
124 static void sh_intc_locate(struct intc_desc
*desc
,
125 unsigned long address
,
126 unsigned long **datap
,
135 /* this is slow but works for now */
137 if (desc
->mask_regs
) {
138 for (i
= 0; i
< desc
->nr_mask_regs
; i
++) {
139 struct intc_mask_reg
*mr
= &desc
->mask_regs
[i
];
141 mode
= sh_intc_mode(address
, mr
->set_reg
, mr
->clr_reg
);
142 if (mode
!= INTC_MODE_NONE
) {
145 *enums
= mr
->enum_ids
;
146 *first
= mr
->reg_width
- 1;
153 if (desc
->prio_regs
) {
154 for (i
= 0; i
< desc
->nr_prio_regs
; i
++) {
155 struct intc_prio_reg
*pr
= &desc
->prio_regs
[i
];
157 mode
= sh_intc_mode(address
, pr
->set_reg
, pr
->clr_reg
);
158 if (mode
!= INTC_MODE_NONE
) {
159 *modep
= mode
| INTC_MODE_IS_PRIO
;
161 *enums
= pr
->enum_ids
;
162 *first
= pr
->reg_width
/ pr
->field_width
- 1;
163 *width
= pr
->field_width
;
168 g_assert_not_reached();
171 static void sh_intc_toggle_mask(struct intc_desc
*desc
, intc_enum id
,
172 int enable
, int is_group
)
174 struct intc_source
*source
= &desc
->sources
[id
];
179 if (!source
->next_enum_id
&& (!source
->enable_max
|| !source
->vect
)) {
180 qemu_log_mask(LOG_UNIMP
,
181 "sh_intc: reserved interrupt source %d modified\n", id
);
186 sh_intc_toggle_source(source
, enable
? 1 : -1, 0);
189 if ((is_group
|| !source
->vect
) && source
->next_enum_id
) {
190 sh_intc_toggle_mask(desc
, source
->next_enum_id
, enable
, 1);
194 trace_sh_intc_set(id
, !!enable
);
198 static uint64_t sh_intc_read(void *opaque
, hwaddr offset
, unsigned size
)
200 struct intc_desc
*desc
= opaque
;
205 unsigned long *valuep
;
207 sh_intc_locate(desc
, (unsigned long)offset
, &valuep
,
208 &enum_ids
, &first
, &width
, &mode
);
209 trace_sh_intc_read(size
, (uint64_t)offset
, *valuep
);
213 static void sh_intc_write(void *opaque
, hwaddr offset
,
214 uint64_t value
, unsigned size
)
216 struct intc_desc
*desc
= opaque
;
221 unsigned long *valuep
;
225 trace_sh_intc_write(size
, (uint64_t)offset
, value
);
226 sh_intc_locate(desc
, (unsigned long)offset
, &valuep
,
227 &enum_ids
, &first
, &width
, &mode
);
229 case INTC_MODE_ENABLE_REG
| INTC_MODE_IS_PRIO
:
231 case INTC_MODE_DUAL_SET
:
234 case INTC_MODE_DUAL_CLR
:
235 value
= *valuep
& ~value
;
238 g_assert_not_reached();
241 for (k
= 0; k
<= first
; k
++) {
242 mask
= (1 << width
) - 1;
243 mask
<<= (first
- k
) * width
;
245 if ((*valuep
& mask
) != (value
& mask
)) {
246 sh_intc_toggle_mask(desc
, enum_ids
[k
], value
& mask
, 0);
253 static const MemoryRegionOps sh_intc_ops
= {
254 .read
= sh_intc_read
,
255 .write
= sh_intc_write
,
256 .endianness
= DEVICE_NATIVE_ENDIAN
,
259 static void sh_intc_register_source(struct intc_desc
*desc
,
261 struct intc_group
*groups
,
267 if (desc
->mask_regs
) {
268 for (i
= 0; i
< desc
->nr_mask_regs
; i
++) {
269 struct intc_mask_reg
*mr
= &desc
->mask_regs
[i
];
271 for (k
= 0; k
< ARRAY_SIZE(mr
->enum_ids
); k
++) {
272 id
= mr
->enum_ids
[k
];
273 if (id
&& id
== source
) {
274 desc
->sources
[id
].enable_max
++;
280 if (desc
->prio_regs
) {
281 for (i
= 0; i
< desc
->nr_prio_regs
; i
++) {
282 struct intc_prio_reg
*pr
= &desc
->prio_regs
[i
];
284 for (k
= 0; k
< ARRAY_SIZE(pr
->enum_ids
); k
++) {
285 id
= pr
->enum_ids
[k
];
286 if (id
&& id
== source
) {
287 desc
->sources
[id
].enable_max
++;
294 for (i
= 0; i
< nr_groups
; i
++) {
295 struct intc_group
*gr
= &groups
[i
];
297 for (k
= 0; k
< ARRAY_SIZE(gr
->enum_ids
); k
++) {
298 id
= gr
->enum_ids
[k
];
299 if (id
&& id
== source
) {
300 desc
->sources
[id
].enable_max
++;
308 void sh_intc_register_sources(struct intc_desc
*desc
,
309 struct intc_vect
*vectors
,
311 struct intc_group
*groups
,
316 struct intc_source
*s
;
318 for (i
= 0; i
< nr_vectors
; i
++) {
319 struct intc_vect
*vect
= &vectors
[i
];
321 sh_intc_register_source(desc
, vect
->enum_id
, groups
, nr_groups
);
324 s
= &desc
->sources
[id
];
325 s
->vect
= vect
->vect
;
326 trace_sh_intc_register("source", vect
->enum_id
, s
->vect
,
327 s
->enable_count
, s
->enable_max
);
332 for (i
= 0; i
< nr_groups
; i
++) {
333 struct intc_group
*gr
= &groups
[i
];
336 s
= &desc
->sources
[id
];
337 s
->next_enum_id
= gr
->enum_ids
[0];
339 for (k
= 1; k
< ARRAY_SIZE(gr
->enum_ids
); k
++) {
340 if (gr
->enum_ids
[k
]) {
341 id
= gr
->enum_ids
[k
- 1];
342 s
= &desc
->sources
[id
];
343 s
->next_enum_id
= gr
->enum_ids
[k
];
346 trace_sh_intc_register("group", gr
->enum_id
, 0xffff,
347 s
->enable_count
, s
->enable_max
);
352 static unsigned int sh_intc_register(MemoryRegion
*sysmem
,
353 struct intc_desc
*desc
,
354 const unsigned long address
,
357 const unsigned int index
)
360 MemoryRegion
*iomem
, *iomem_p4
, *iomem_a7
;
366 iomem
= &desc
->iomem
;
367 iomem_p4
= &desc
->iomem_aliases
[index
];
368 iomem_a7
= iomem_p4
+ 1;
370 snprintf(name
, sizeof(name
), "intc-%s-%s-%s", type
, action
, "p4");
371 memory_region_init_alias(iomem_p4
, NULL
, name
, iomem
, A7ADDR(address
), 4);
372 memory_region_add_subregion(sysmem
, P4ADDR(address
), iomem_p4
);
374 snprintf(name
, sizeof(name
), "intc-%s-%s-%s", type
, action
, "a7");
375 memory_region_init_alias(iomem_a7
, NULL
, name
, iomem
, A7ADDR(address
), 4);
376 memory_region_add_subregion(sysmem
, A7ADDR(address
), iomem_a7
);
378 /* used to increment aliases index */
382 int sh_intc_init(MemoryRegion
*sysmem
,
383 struct intc_desc
*desc
,
385 struct intc_mask_reg
*mask_regs
,
387 struct intc_prio_reg
*prio_regs
,
393 desc
->nr_sources
= nr_sources
;
394 desc
->mask_regs
= mask_regs
;
395 desc
->nr_mask_regs
= nr_mask_regs
;
396 desc
->prio_regs
= prio_regs
;
397 desc
->nr_prio_regs
= nr_prio_regs
;
398 /* Allocate 4 MemoryRegions per register (2 actions * 2 aliases) */
399 desc
->iomem_aliases
= g_new0(MemoryRegion
,
400 (nr_mask_regs
+ nr_prio_regs
) * 4);
401 desc
->sources
= g_new0(struct intc_source
, nr_sources
);
402 for (i
= 0; i
< nr_sources
; i
++) {
403 desc
->sources
[i
].parent
= desc
;
405 desc
->irqs
= qemu_allocate_irqs(sh_intc_set_irq
, desc
, nr_sources
);
406 memory_region_init_io(&desc
->iomem
, NULL
, &sh_intc_ops
, desc
, "intc",
409 if (desc
->mask_regs
) {
410 for (i
= 0; i
< desc
->nr_mask_regs
; i
++) {
411 struct intc_mask_reg
*mr
= &desc
->mask_regs
[i
];
413 j
+= sh_intc_register(sysmem
, desc
, mr
->set_reg
, "mask", "set", j
);
414 j
+= sh_intc_register(sysmem
, desc
, mr
->clr_reg
, "mask", "clr", j
);
418 if (desc
->prio_regs
) {
419 for (i
= 0; i
< desc
->nr_prio_regs
; i
++) {
420 struct intc_prio_reg
*pr
= &desc
->prio_regs
[i
];
422 j
+= sh_intc_register(sysmem
, desc
, pr
->set_reg
, "prio", "set", j
);
423 j
+= sh_intc_register(sysmem
, desc
, pr
->clr_reg
, "prio", "clr", j
);
431 * Assert level <n> IRL interrupt.
432 * 0:deassert. 1:lowest priority,... 15:highest priority
434 void sh_intc_set_irl(void *opaque
, int n
, int level
)
436 struct intc_source
*s
= opaque
;
437 int i
, irl
= level
^ 15;
438 intc_enum id
= s
->next_enum_id
;
440 for (i
= 0; id
; id
= s
->next_enum_id
, i
++) {
441 s
= &s
->parent
->sources
[id
];
443 sh_intc_toggle_source(s
, s
->enable_count
? 0 : 1,
444 s
->asserted
? 0 : 1);
445 } else if (s
->asserted
) {
446 sh_intc_toggle_source(s
, 0, -1);