2 * RISC-V IMSIC (Incoming Message Signaled Interrupt Controller)
4 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
22 #include "qemu/module.h"
23 #include "qemu/error-report.h"
24 #include "qemu/bswap.h"
25 #include "exec/address-spaces.h"
26 #include "hw/sysbus.h"
27 #include "hw/pci/msi.h"
28 #include "hw/boards.h"
29 #include "hw/qdev-properties.h"
30 #include "hw/intc/riscv_imsic.h"
32 #include "target/riscv/cpu.h"
33 #include "target/riscv/cpu_bits.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/kvm.h"
36 #include "migration/vmstate.h"
38 #define IMSIC_MMIO_PAGE_LE 0x00
39 #define IMSIC_MMIO_PAGE_BE 0x04
41 #define IMSIC_MIN_ID ((IMSIC_EIPx_BITS * 2) - 1)
42 #define IMSIC_MAX_ID (IMSIC_TOPEI_IID_MASK)
44 #define IMSIC_EISTATE_PENDING (1U << 0)
45 #define IMSIC_EISTATE_ENABLED (1U << 1)
46 #define IMSIC_EISTATE_ENPEND (IMSIC_EISTATE_ENABLED | \
47 IMSIC_EISTATE_PENDING)
49 static uint32_t riscv_imsic_topei(RISCVIMSICState
*imsic
, uint32_t page
)
51 uint32_t i
, max_irq
, base
;
53 base
= page
* imsic
->num_irqs
;
54 max_irq
= (imsic
->eithreshold
[page
] &&
55 (imsic
->eithreshold
[page
] <= imsic
->num_irqs
)) ?
56 imsic
->eithreshold
[page
] : imsic
->num_irqs
;
57 for (i
= 1; i
< max_irq
; i
++) {
58 if ((imsic
->eistate
[base
+ i
] & IMSIC_EISTATE_ENPEND
) ==
59 IMSIC_EISTATE_ENPEND
) {
60 return (i
<< IMSIC_TOPEI_IID_SHIFT
) | i
;
67 static void riscv_imsic_update(RISCVIMSICState
*imsic
, uint32_t page
)
69 if (imsic
->eidelivery
[page
] && riscv_imsic_topei(imsic
, page
)) {
70 qemu_irq_raise(imsic
->external_irqs
[page
]);
72 qemu_irq_lower(imsic
->external_irqs
[page
]);
76 static int riscv_imsic_eidelivery_rmw(RISCVIMSICState
*imsic
, uint32_t page
,
81 target_ulong old_val
= imsic
->eidelivery
[page
];
88 imsic
->eidelivery
[page
] = (old_val
& ~wr_mask
) | (new_val
& wr_mask
);
90 riscv_imsic_update(imsic
, page
);
94 static int riscv_imsic_eithreshold_rmw(RISCVIMSICState
*imsic
, uint32_t page
,
99 target_ulong old_val
= imsic
->eithreshold
[page
];
105 wr_mask
&= IMSIC_MAX_ID
;
106 imsic
->eithreshold
[page
] = (old_val
& ~wr_mask
) | (new_val
& wr_mask
);
108 riscv_imsic_update(imsic
, page
);
112 static int riscv_imsic_topei_rmw(RISCVIMSICState
*imsic
, uint32_t page
,
113 target_ulong
*val
, target_ulong new_val
,
114 target_ulong wr_mask
)
116 uint32_t base
, topei
= riscv_imsic_topei(imsic
, page
);
118 /* Read pending and enabled interrupt with highest priority */
123 /* Writes ignore value and clear top pending interrupt */
124 if (topei
&& wr_mask
) {
125 topei
>>= IMSIC_TOPEI_IID_SHIFT
;
126 base
= page
* imsic
->num_irqs
;
128 imsic
->eistate
[base
+ topei
] &= ~IMSIC_EISTATE_PENDING
;
131 riscv_imsic_update(imsic
, page
);
137 static int riscv_imsic_eix_rmw(RISCVIMSICState
*imsic
,
138 uint32_t xlen
, uint32_t page
,
139 uint32_t num
, bool pend
, target_ulong
*val
,
140 target_ulong new_val
, target_ulong wr_mask
)
144 uint32_t state
= (pend
) ? IMSIC_EISTATE_PENDING
: IMSIC_EISTATE_ENABLED
;
152 if (num
>= (imsic
->num_irqs
/ xlen
)) {
156 base
= (page
* imsic
->num_irqs
) + (num
* xlen
);
160 for (i
= 0; i
< xlen
; i
++) {
161 mask
= (target_ulong
)1 << i
;
162 *val
|= (imsic
->eistate
[base
+ i
] & state
) ? mask
: 0;
166 for (i
= 0; i
< xlen
; i
++) {
167 /* Bit0 of eip0 and eie0 are read-only zero */
172 mask
= (target_ulong
)1 << i
;
173 if (wr_mask
& mask
) {
174 if (new_val
& mask
) {
175 imsic
->eistate
[base
+ i
] |= state
;
177 imsic
->eistate
[base
+ i
] &= ~state
;
182 riscv_imsic_update(imsic
, page
);
186 static int riscv_imsic_rmw(void *arg
, target_ulong reg
, target_ulong
*val
,
187 target_ulong new_val
, target_ulong wr_mask
)
189 RISCVIMSICState
*imsic
= arg
;
190 uint32_t isel
, priv
, virt
, vgein
, xlen
, page
;
192 priv
= AIA_IREG_PRIV(reg
);
193 virt
= AIA_IREG_VIRT(reg
);
194 isel
= AIA_IREG_ISEL(reg
);
195 vgein
= AIA_IREG_VGEIN(reg
);
196 xlen
= AIA_IREG_XLEN(reg
);
199 if (priv
== PRV_M
&& !virt
) {
207 if (vgein
&& vgein
< imsic
->num_pages
) {
221 case ISELECT_IMSIC_EIDELIVERY
:
222 return riscv_imsic_eidelivery_rmw(imsic
, page
, val
,
224 case ISELECT_IMSIC_EITHRESHOLD
:
225 return riscv_imsic_eithreshold_rmw(imsic
, page
, val
,
227 case ISELECT_IMSIC_TOPEI
:
228 return riscv_imsic_topei_rmw(imsic
, page
, val
, new_val
, wr_mask
);
229 case ISELECT_IMSIC_EIP0
... ISELECT_IMSIC_EIP63
:
230 return riscv_imsic_eix_rmw(imsic
, xlen
, page
,
231 isel
- ISELECT_IMSIC_EIP0
,
232 true, val
, new_val
, wr_mask
);
233 case ISELECT_IMSIC_EIE0
... ISELECT_IMSIC_EIE63
:
234 return riscv_imsic_eix_rmw(imsic
, xlen
, page
,
235 isel
- ISELECT_IMSIC_EIE0
,
236 false, val
, new_val
, wr_mask
);
242 qemu_log_mask(LOG_GUEST_ERROR
,
243 "%s: Invalid register priv=%d virt=%d isel=%d vgein=%d\n",
244 __func__
, priv
, virt
, isel
, vgein
);
248 static uint64_t riscv_imsic_read(void *opaque
, hwaddr addr
, unsigned size
)
250 RISCVIMSICState
*imsic
= opaque
;
252 /* Reads must be 4 byte words */
253 if ((addr
& 0x3) != 0) {
257 /* Reads cannot be out of range */
258 if (addr
> IMSIC_MMIO_SIZE(imsic
->num_pages
)) {
265 qemu_log_mask(LOG_GUEST_ERROR
,
266 "%s: Invalid register read 0x%" HWADDR_PRIx
"\n",
271 static void riscv_imsic_write(void *opaque
, hwaddr addr
, uint64_t value
,
274 RISCVIMSICState
*imsic
= opaque
;
277 /* Writes must be 4 byte words */
278 if ((addr
& 0x3) != 0) {
282 /* Writes cannot be out of range */
283 if (addr
> IMSIC_MMIO_SIZE(imsic
->num_pages
)) {
287 #if defined(CONFIG_KVM)
288 if (kvm_irqchip_in_kernel()) {
291 msi
.address_lo
= extract64(imsic
->mmio
.addr
+ addr
, 0, 32);
292 msi
.address_hi
= extract64(imsic
->mmio
.addr
+ addr
, 32, 32);
293 msi
.data
= le32_to_cpu(value
);
295 kvm_vm_ioctl(kvm_state
, KVM_SIGNAL_MSI
, &msi
);
301 /* Writes only supported for MSI little-endian registers */
302 page
= addr
>> IMSIC_MMIO_PAGE_SHIFT
;
303 if ((addr
& (IMSIC_MMIO_PAGE_SZ
- 1)) == IMSIC_MMIO_PAGE_LE
) {
304 if (value
&& (value
< imsic
->num_irqs
)) {
305 imsic
->eistate
[(page
* imsic
->num_irqs
) + value
] |=
306 IMSIC_EISTATE_PENDING
;
310 /* Update CPU external interrupt status */
311 riscv_imsic_update(imsic
, page
);
316 qemu_log_mask(LOG_GUEST_ERROR
,
317 "%s: Invalid register write 0x%" HWADDR_PRIx
"\n",
321 static const MemoryRegionOps riscv_imsic_ops
= {
322 .read
= riscv_imsic_read
,
323 .write
= riscv_imsic_write
,
324 .endianness
= DEVICE_LITTLE_ENDIAN
,
326 .min_access_size
= 4,
331 static void riscv_imsic_realize(DeviceState
*dev
, Error
**errp
)
333 RISCVIMSICState
*imsic
= RISCV_IMSIC(dev
);
334 RISCVCPU
*rcpu
= RISCV_CPU(cpu_by_arch_id(imsic
->hartid
));
335 CPUState
*cpu
= cpu_by_arch_id(imsic
->hartid
);
336 CPURISCVState
*env
= cpu
? cpu_env(cpu
) : NULL
;
338 if (!kvm_irqchip_in_kernel()) {
339 imsic
->num_eistate
= imsic
->num_pages
* imsic
->num_irqs
;
340 imsic
->eidelivery
= g_new0(uint32_t, imsic
->num_pages
);
341 imsic
->eithreshold
= g_new0(uint32_t, imsic
->num_pages
);
342 imsic
->eistate
= g_new0(uint32_t, imsic
->num_eistate
);
345 memory_region_init_io(&imsic
->mmio
, OBJECT(dev
), &riscv_imsic_ops
,
346 imsic
, TYPE_RISCV_IMSIC
,
347 IMSIC_MMIO_SIZE(imsic
->num_pages
));
348 sysbus_init_mmio(SYS_BUS_DEVICE(dev
), &imsic
->mmio
);
350 /* Claim the CPU interrupt to be triggered by this IMSIC */
351 if (riscv_cpu_claim_interrupts(rcpu
,
352 (imsic
->mmode
) ? MIP_MEIP
: MIP_SEIP
) < 0) {
353 error_setg(errp
, "%s already claimed",
354 (imsic
->mmode
) ? "MEIP" : "SEIP");
358 /* Create output IRQ lines */
359 imsic
->external_irqs
= g_malloc(sizeof(qemu_irq
) * imsic
->num_pages
);
360 qdev_init_gpio_out(dev
, imsic
->external_irqs
, imsic
->num_pages
);
362 /* Force select AIA feature and setup CSR read-modify-write callback */
365 rcpu
->cfg
.ext_ssaia
= true;
366 riscv_cpu_set_geilen(env
, imsic
->num_pages
- 1);
368 rcpu
->cfg
.ext_smaia
= true;
370 riscv_cpu_set_aia_ireg_rmw_fn(env
, (imsic
->mmode
) ? PRV_M
: PRV_S
,
371 riscv_imsic_rmw
, imsic
);
374 msi_nonbroken
= true;
377 static Property riscv_imsic_properties
[] = {
378 DEFINE_PROP_BOOL("mmode", RISCVIMSICState
, mmode
, 0),
379 DEFINE_PROP_UINT32("hartid", RISCVIMSICState
, hartid
, 0),
380 DEFINE_PROP_UINT32("num-pages", RISCVIMSICState
, num_pages
, 0),
381 DEFINE_PROP_UINT32("num-irqs", RISCVIMSICState
, num_irqs
, 0),
382 DEFINE_PROP_END_OF_LIST(),
385 static const VMStateDescription vmstate_riscv_imsic
= {
386 .name
= "riscv_imsic",
388 .minimum_version_id
= 1,
389 .fields
= (const VMStateField
[]) {
390 VMSTATE_VARRAY_UINT32(eidelivery
, RISCVIMSICState
,
392 vmstate_info_uint32
, uint32_t),
393 VMSTATE_VARRAY_UINT32(eithreshold
, RISCVIMSICState
,
395 vmstate_info_uint32
, uint32_t),
396 VMSTATE_VARRAY_UINT32(eistate
, RISCVIMSICState
,
398 vmstate_info_uint32
, uint32_t),
399 VMSTATE_END_OF_LIST()
403 static void riscv_imsic_class_init(ObjectClass
*klass
, void *data
)
405 DeviceClass
*dc
= DEVICE_CLASS(klass
);
407 device_class_set_props(dc
, riscv_imsic_properties
);
408 dc
->realize
= riscv_imsic_realize
;
409 dc
->vmsd
= &vmstate_riscv_imsic
;
412 static const TypeInfo riscv_imsic_info
= {
413 .name
= TYPE_RISCV_IMSIC
,
414 .parent
= TYPE_SYS_BUS_DEVICE
,
415 .instance_size
= sizeof(RISCVIMSICState
),
416 .class_init
= riscv_imsic_class_init
,
419 static void riscv_imsic_register_types(void)
421 type_register_static(&riscv_imsic_info
);
424 type_init(riscv_imsic_register_types
)
427 * Create IMSIC device.
429 DeviceState
*riscv_imsic_create(hwaddr addr
, uint32_t hartid
, bool mmode
,
430 uint32_t num_pages
, uint32_t num_ids
)
432 DeviceState
*dev
= qdev_new(TYPE_RISCV_IMSIC
);
433 CPUState
*cpu
= cpu_by_arch_id(hartid
);
436 assert(!(addr
& (IMSIC_MMIO_PAGE_SZ
- 1)));
438 assert(num_pages
== 1);
440 assert(num_pages
>= 1 && num_pages
<= (IRQ_LOCAL_GUEST_MAX
+ 1));
442 assert(IMSIC_MIN_ID
<= num_ids
);
443 assert(num_ids
<= IMSIC_MAX_ID
);
444 assert((num_ids
& IMSIC_MIN_ID
) == IMSIC_MIN_ID
);
446 qdev_prop_set_bit(dev
, "mmode", mmode
);
447 qdev_prop_set_uint32(dev
, "hartid", hartid
);
448 qdev_prop_set_uint32(dev
, "num-pages", num_pages
);
449 qdev_prop_set_uint32(dev
, "num-irqs", num_ids
+ 1);
451 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
452 sysbus_mmio_map(SYS_BUS_DEVICE(dev
), 0, addr
);
454 for (i
= 0; i
< num_pages
; i
++) {
456 qdev_connect_gpio_out_named(dev
, NULL
, i
,
457 qdev_get_gpio_in(DEVICE(cpu
),
458 (mmode
) ? IRQ_M_EXT
: IRQ_S_EXT
));
460 qdev_connect_gpio_out_named(dev
, NULL
, i
,
461 qdev_get_gpio_in(DEVICE(cpu
),
462 IRQ_LOCAL_MAX
+ i
- 1));