hw/intc/sh_intc: Use existing macro instead of local one
[qemu/ar7.git] / hw / intc / sh_intc.c
bloba98953d665f65e4e13629abc72586bef264d67ff
1 /*
2 * SuperH interrupt controller module
4 * Copyright (c) 2007 Magnus Damm
5 * Based on sh_timer.c and arm_timer.c by Paul Brook
6 * Copyright (c) 2005-2006 CodeSourcery.
8 * This code is licensed under the GPL.
9 */
11 #include "qemu/osdep.h"
12 #include "qemu/log.h"
13 #include "cpu.h"
14 #include "hw/sh4/sh_intc.h"
15 #include "hw/irq.h"
16 #include "hw/sh4/sh.h"
17 #include "trace.h"
19 void sh_intc_toggle_source(struct intc_source *source,
20 int enable_adj, int assert_adj)
22 int enable_changed = 0;
23 int pending_changed = 0;
24 int old_pending;
26 if ((source->enable_count == source->enable_max) && (enable_adj == -1)) {
27 enable_changed = -1;
29 source->enable_count += enable_adj;
31 if (source->enable_count == source->enable_max) {
32 enable_changed = 1;
34 source->asserted += assert_adj;
36 old_pending = source->pending;
37 source->pending = source->asserted &&
38 (source->enable_count == source->enable_max);
40 if (old_pending != source->pending) {
41 pending_changed = 1;
43 if (pending_changed) {
44 if (source->pending) {
45 source->parent->pending++;
46 if (source->parent->pending == 1) {
47 cpu_interrupt(first_cpu, CPU_INTERRUPT_HARD);
49 } else {
50 source->parent->pending--;
51 if (source->parent->pending == 0) {
52 cpu_reset_interrupt(first_cpu, CPU_INTERRUPT_HARD);
57 if (enable_changed || assert_adj || pending_changed) {
58 trace_sh_intc_sources(source->parent->pending, source->asserted,
59 source->enable_count, source->enable_max,
60 source->vect, source->asserted ? "asserted " :
61 assert_adj ? "deasserted" : "",
62 enable_changed == 1 ? "enabled " :
63 enable_changed == -1 ? "disabled " : "",
64 source->pending ? "pending" : "");
68 static void sh_intc_set_irq(void *opaque, int n, int level)
70 struct intc_desc *desc = opaque;
71 struct intc_source *source = &(desc->sources[n]);
73 if (level && !source->asserted) {
74 sh_intc_toggle_source(source, 0, 1);
75 } else if (!level && source->asserted) {
76 sh_intc_toggle_source(source, 0, -1);
80 int sh_intc_get_pending_vector(struct intc_desc *desc, int imask)
82 unsigned int i;
84 /* slow: use a linked lists of pending sources instead */
85 /* wrong: take interrupt priority into account (one list per priority) */
87 if (imask == 0x0f) {
88 return -1; /* FIXME, update code to include priority per source */
91 for (i = 0; i < desc->nr_sources; i++) {
92 struct intc_source *source = desc->sources + i;
94 if (source->pending) {
95 trace_sh_intc_pending(desc->pending, source->vect);
96 return source->vect;
100 abort();
103 #define INTC_MODE_NONE 0
104 #define INTC_MODE_DUAL_SET 1
105 #define INTC_MODE_DUAL_CLR 2
106 #define INTC_MODE_ENABLE_REG 3
107 #define INTC_MODE_MASK_REG 4
108 #define INTC_MODE_IS_PRIO 8
110 static unsigned int sh_intc_mode(unsigned long address,
111 unsigned long set_reg, unsigned long clr_reg)
113 if ((address != A7ADDR(set_reg)) &&
114 (address != A7ADDR(clr_reg)))
115 return INTC_MODE_NONE;
117 if (set_reg && clr_reg) {
118 if (address == A7ADDR(set_reg)) {
119 return INTC_MODE_DUAL_SET;
120 } else {
121 return INTC_MODE_DUAL_CLR;
125 if (set_reg) {
126 return INTC_MODE_ENABLE_REG;
127 } else {
128 return INTC_MODE_MASK_REG;
132 static void sh_intc_locate(struct intc_desc *desc,
133 unsigned long address,
134 unsigned long **datap,
135 intc_enum **enums,
136 unsigned int *first,
137 unsigned int *width,
138 unsigned int *modep)
140 unsigned int i, mode;
142 /* this is slow but works for now */
144 if (desc->mask_regs) {
145 for (i = 0; i < desc->nr_mask_regs; i++) {
146 struct intc_mask_reg *mr = desc->mask_regs + i;
148 mode = sh_intc_mode(address, mr->set_reg, mr->clr_reg);
149 if (mode == INTC_MODE_NONE) {
150 continue;
152 *modep = mode;
153 *datap = &mr->value;
154 *enums = mr->enum_ids;
155 *first = mr->reg_width - 1;
156 *width = 1;
157 return;
161 if (desc->prio_regs) {
162 for (i = 0; i < desc->nr_prio_regs; i++) {
163 struct intc_prio_reg *pr = desc->prio_regs + i;
165 mode = sh_intc_mode(address, pr->set_reg, pr->clr_reg);
166 if (mode == INTC_MODE_NONE) {
167 continue;
169 *modep = mode | INTC_MODE_IS_PRIO;
170 *datap = &pr->value;
171 *enums = pr->enum_ids;
172 *first = (pr->reg_width / pr->field_width) - 1;
173 *width = pr->field_width;
174 return;
178 abort();
181 static void sh_intc_toggle_mask(struct intc_desc *desc, intc_enum id,
182 int enable, int is_group)
184 struct intc_source *source = desc->sources + id;
186 if (!id) {
187 return;
189 if (!source->next_enum_id && (!source->enable_max || !source->vect)) {
190 qemu_log_mask(LOG_UNIMP,
191 "sh_intc: reserved interrupt source %d modified\n", id);
192 return;
195 if (source->vect) {
196 sh_intc_toggle_source(source, enable ? 1 : -1, 0);
199 if ((is_group || !source->vect) && source->next_enum_id) {
200 sh_intc_toggle_mask(desc, source->next_enum_id, enable, 1);
203 if (!source->vect) {
204 trace_sh_intc_set(id, !!enable);
208 static uint64_t sh_intc_read(void *opaque, hwaddr offset,
209 unsigned size)
211 struct intc_desc *desc = opaque;
212 intc_enum *enum_ids = NULL;
213 unsigned int first = 0;
214 unsigned int width = 0;
215 unsigned int mode = 0;
216 unsigned long *valuep;
218 sh_intc_locate(desc, (unsigned long)offset, &valuep,
219 &enum_ids, &first, &width, &mode);
220 trace_sh_intc_read(size, (uint64_t)offset, *valuep);
221 return *valuep;
224 static void sh_intc_write(void *opaque, hwaddr offset,
225 uint64_t value, unsigned size)
227 struct intc_desc *desc = opaque;
228 intc_enum *enum_ids = NULL;
229 unsigned int first = 0;
230 unsigned int width = 0;
231 unsigned int mode = 0;
232 unsigned int k;
233 unsigned long *valuep;
234 unsigned long mask;
236 trace_sh_intc_write(size, (uint64_t)offset, value);
237 sh_intc_locate(desc, (unsigned long)offset, &valuep,
238 &enum_ids, &first, &width, &mode);
239 switch (mode) {
240 case INTC_MODE_ENABLE_REG | INTC_MODE_IS_PRIO:
241 break;
242 case INTC_MODE_DUAL_SET:
243 value |= *valuep;
244 break;
245 case INTC_MODE_DUAL_CLR:
246 value = *valuep & ~value;
247 break;
248 default:
249 abort();
252 for (k = 0; k <= first; k++) {
253 mask = ((1 << width) - 1) << ((first - k) * width);
255 if ((*valuep & mask) == (value & mask)) {
256 continue;
258 sh_intc_toggle_mask(desc, enum_ids[k], value & mask, 0);
261 *valuep = value;
264 static const MemoryRegionOps sh_intc_ops = {
265 .read = sh_intc_read,
266 .write = sh_intc_write,
267 .endianness = DEVICE_NATIVE_ENDIAN,
270 struct intc_source *sh_intc_source(struct intc_desc *desc, intc_enum id)
272 if (id) {
273 return desc->sources + id;
275 return NULL;
278 static unsigned int sh_intc_register(MemoryRegion *sysmem,
279 struct intc_desc *desc,
280 const unsigned long address,
281 const char *type,
282 const char *action,
283 const unsigned int index)
285 char name[60];
286 MemoryRegion *iomem, *iomem_p4, *iomem_a7;
288 if (!address) {
289 return 0;
292 iomem = &desc->iomem;
293 iomem_p4 = desc->iomem_aliases + index;
294 iomem_a7 = iomem_p4 + 1;
296 #define SH_INTC_IOMEM_FORMAT "interrupt-controller-%s-%s-%s"
297 snprintf(name, sizeof(name), SH_INTC_IOMEM_FORMAT, type, action, "p4");
298 memory_region_init_alias(iomem_p4, NULL, name, iomem, A7ADDR(address), 4);
299 memory_region_add_subregion(sysmem, P4ADDR(address), iomem_p4);
301 snprintf(name, sizeof(name), SH_INTC_IOMEM_FORMAT, type, action, "a7");
302 memory_region_init_alias(iomem_a7, NULL, name, iomem, A7ADDR(address), 4);
303 memory_region_add_subregion(sysmem, A7ADDR(address), iomem_a7);
304 #undef SH_INTC_IOMEM_FORMAT
306 /* used to increment aliases index */
307 return 2;
310 static void sh_intc_register_source(struct intc_desc *desc,
311 intc_enum source,
312 struct intc_group *groups,
313 int nr_groups)
315 unsigned int i, k;
316 struct intc_source *s;
318 if (desc->mask_regs) {
319 for (i = 0; i < desc->nr_mask_regs; i++) {
320 struct intc_mask_reg *mr = desc->mask_regs + i;
322 for (k = 0; k < ARRAY_SIZE(mr->enum_ids); k++) {
323 if (mr->enum_ids[k] != source) {
324 continue;
326 s = sh_intc_source(desc, mr->enum_ids[k]);
327 if (s) {
328 s->enable_max++;
334 if (desc->prio_regs) {
335 for (i = 0; i < desc->nr_prio_regs; i++) {
336 struct intc_prio_reg *pr = desc->prio_regs + i;
338 for (k = 0; k < ARRAY_SIZE(pr->enum_ids); k++) {
339 if (pr->enum_ids[k] != source) {
340 continue;
342 s = sh_intc_source(desc, pr->enum_ids[k]);
343 if (s) {
344 s->enable_max++;
350 if (groups) {
351 for (i = 0; i < nr_groups; i++) {
352 struct intc_group *gr = groups + i;
354 for (k = 0; k < ARRAY_SIZE(gr->enum_ids); k++) {
355 if (gr->enum_ids[k] != source) {
356 continue;
358 s = sh_intc_source(desc, gr->enum_ids[k]);
359 if (s) {
360 s->enable_max++;
368 void sh_intc_register_sources(struct intc_desc *desc,
369 struct intc_vect *vectors,
370 int nr_vectors,
371 struct intc_group *groups,
372 int nr_groups)
374 unsigned int i, k;
375 struct intc_source *s;
377 for (i = 0; i < nr_vectors; i++) {
378 struct intc_vect *vect = vectors + i;
380 sh_intc_register_source(desc, vect->enum_id, groups, nr_groups);
381 s = sh_intc_source(desc, vect->enum_id);
382 if (s) {
383 s->vect = vect->vect;
384 trace_sh_intc_register("source", vect->enum_id, s->vect,
385 s->enable_count, s->enable_max);
389 if (groups) {
390 for (i = 0; i < nr_groups; i++) {
391 struct intc_group *gr = groups + i;
393 s = sh_intc_source(desc, gr->enum_id);
394 s->next_enum_id = gr->enum_ids[0];
396 for (k = 1; k < ARRAY_SIZE(gr->enum_ids); k++) {
397 if (!gr->enum_ids[k]) {
398 continue;
400 s = sh_intc_source(desc, gr->enum_ids[k - 1]);
401 s->next_enum_id = gr->enum_ids[k];
403 trace_sh_intc_register("group", gr->enum_id, 0xffff,
404 s->enable_count, s->enable_max);
409 int sh_intc_init(MemoryRegion *sysmem,
410 struct intc_desc *desc,
411 int nr_sources,
412 struct intc_mask_reg *mask_regs,
413 int nr_mask_regs,
414 struct intc_prio_reg *prio_regs,
415 int nr_prio_regs)
417 unsigned int i, j;
419 desc->pending = 0;
420 desc->nr_sources = nr_sources;
421 desc->mask_regs = mask_regs;
422 desc->nr_mask_regs = nr_mask_regs;
423 desc->prio_regs = prio_regs;
424 desc->nr_prio_regs = nr_prio_regs;
425 /* Allocate 4 MemoryRegions per register (2 actions * 2 aliases) */
426 desc->iomem_aliases = g_new0(MemoryRegion,
427 (nr_mask_regs + nr_prio_regs) * 4);
429 j = 0;
430 i = sizeof(struct intc_source) * nr_sources;
431 desc->sources = g_malloc0(i);
433 for (i = 0; i < desc->nr_sources; i++) {
434 struct intc_source *source = desc->sources + i;
436 source->parent = desc;
439 desc->irqs = qemu_allocate_irqs(sh_intc_set_irq, desc, nr_sources);
441 memory_region_init_io(&desc->iomem, NULL, &sh_intc_ops, desc,
442 "interrupt-controller", 0x100000000ULL);
444 #define INT_REG_PARAMS(reg_struct, type, action, j) \
445 reg_struct->action##_reg, #type, #action, j
446 if (desc->mask_regs) {
447 for (i = 0; i < desc->nr_mask_regs; i++) {
448 struct intc_mask_reg *mr = desc->mask_regs + i;
450 j += sh_intc_register(sysmem, desc,
451 INT_REG_PARAMS(mr, mask, set, j));
452 j += sh_intc_register(sysmem, desc,
453 INT_REG_PARAMS(mr, mask, clr, j));
457 if (desc->prio_regs) {
458 for (i = 0; i < desc->nr_prio_regs; i++) {
459 struct intc_prio_reg *pr = desc->prio_regs + i;
461 j += sh_intc_register(sysmem, desc,
462 INT_REG_PARAMS(pr, prio, set, j));
463 j += sh_intc_register(sysmem, desc,
464 INT_REG_PARAMS(pr, prio, clr, j));
467 #undef INT_REG_PARAMS
469 return 0;
473 * Assert level <n> IRL interrupt.
474 * 0:deassert. 1:lowest priority,... 15:highest priority
476 void sh_intc_set_irl(void *opaque, int n, int level)
478 struct intc_source *s = opaque;
479 int i, irl = level ^ 15;
480 for (i = 0; (s = sh_intc_source(s->parent, s->next_enum_id)); i++) {
481 if (i == irl) {
482 sh_intc_toggle_source(s, s->enable_count ? 0 : 1,
483 s->asserted ? 0 : 1);
484 } else if (s->asserted) {
485 sh_intc_toggle_source(s, 0, -1);