2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 NetApp, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * This file and its contents are supplied under the terms of the
32 * Common Development and Distribution License ("CDDL"), version 1.0.
33 * You may only use this file in accordance with the terms of version
36 * A full copy of the text of the CDDL should have accompanied this
37 * source. A copy of the CDDL is also available via the Internet at
38 * http://www.illumos.org/license/CDDL.
40 * Copyright 2014 Pluribus Networks Inc.
41 * Copyright 2018 Joyent, Inc.
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/linker_set.h>
63 #include <machine/vmm.h>
77 #define CONF1_ADDR_PORT 0x0cf8
78 #define CONF1_DATA_PORT 0x0cfc
80 #define CONF1_ENABLE 0x80000000ul
82 #define MAXBUSES (PCI_BUSMAX + 1)
83 #define MAXSLOTS (PCI_SLOTMAX + 1)
84 #define MAXFUNCS (PCI_FUNCMAX + 1)
86 #define GB (1024 * 1024 * 1024UL)
90 struct pci_devemu
*fi_pde
;
91 struct pci_devinst
*fi_devi
;
101 struct intxinfo si_intpins
[4];
102 struct funcinfo si_funcs
[MAXFUNCS
];
106 uint16_t iobase
, iolimit
; /* I/O window */
107 uint32_t membase32
, memlimit32
; /* mmio window below 4GB */
108 uint64_t membase64
, memlimit64
; /* mmio window above 4GB */
109 struct slotinfo slotinfo
[MAXSLOTS
];
112 static struct businfo
*pci_businfo
[MAXBUSES
];
114 SET_DECLARE(pci_devemu_set
, struct pci_devemu
);
116 static uint64_t pci_emul_iobase
;
117 static uint8_t *pci_emul_rombase
;
118 static uint64_t pci_emul_romoffset
;
119 static uint8_t *pci_emul_romlim
;
120 static uint64_t pci_emul_membase32
;
121 static uint64_t pci_emul_membase64
;
122 static uint64_t pci_emul_memlim64
;
124 struct pci_bar_allocation
{
125 TAILQ_ENTRY(pci_bar_allocation
) chain
;
126 struct pci_devinst
*pdi
;
128 enum pcibar_type type
;
132 static TAILQ_HEAD(pci_bar_list
, pci_bar_allocation
) pci_bars
=
133 TAILQ_HEAD_INITIALIZER(pci_bars
);
135 #define PCI_EMUL_IOBASE 0x2000
136 #define PCI_EMUL_IOLIMIT 0x10000
138 #define PCI_EMUL_ROMSIZE 0x10000000
140 #define PCI_EMUL_ECFG_BASE 0xE0000000 /* 3.5GB */
141 #define PCI_EMUL_ECFG_SIZE (MAXBUSES * 1024 * 1024) /* 1MB per bus */
142 SYSRES_MEM(PCI_EMUL_ECFG_BASE
, PCI_EMUL_ECFG_SIZE
);
145 * OVMF always uses 0xC0000000 as base address for 32 bit PCI MMIO. Don't
146 * change this address without changing it in OVMF.
148 #define PCI_EMUL_MEMBASE32 0xC0000000
149 #define PCI_EMUL_MEMLIMIT32 PCI_EMUL_ECFG_BASE
150 #define PCI_EMUL_MEMSIZE64 (32*GB)
152 static struct pci_devemu
*pci_emul_finddev(const char *name
);
153 static void pci_lintr_route(struct pci_devinst
*pi
);
154 static void pci_lintr_update(struct pci_devinst
*pi
);
155 static void pci_cfgrw(struct vmctx
*ctx
, int in
, int bus
, int slot
,
156 int func
, int coff
, int bytes
, uint32_t *val
);
159 CFGWRITE(struct pci_devinst
*pi
, int coff
, uint32_t val
, int bytes
)
163 pci_set_cfgdata8(pi
, coff
, val
);
165 pci_set_cfgdata16(pi
, coff
, val
);
167 pci_set_cfgdata32(pi
, coff
, val
);
170 static __inline
uint32_t
171 CFGREAD(struct pci_devinst
*pi
, int coff
, int bytes
)
175 return (pci_get_cfgdata8(pi
, coff
));
177 return (pci_get_cfgdata16(pi
, coff
));
179 return (pci_get_cfgdata32(pi
, coff
));
183 is_pcir_bar(int coff
)
185 return (coff
>= PCIR_BAR(0) && coff
< PCIR_BAR(PCI_BARMAX
+ 1));
189 is_pcir_bios(int coff
)
191 return (coff
>= PCIR_BIOS
&& coff
< PCIR_BIOS
+ 4);
199 * Slot options are in the form:
201 * <bus>:<slot>:<func>,<emul>[,<config>]
202 * <slot>[:<func>],<emul>[,<config>]
206 * emul is a string describing the type of PCI device e.g. virtio-net
207 * config is an optional string, depending on the device, that can be
208 * used for configuration.
214 pci_parse_slot_usage(char *aopt
)
217 EPRINTLN("Invalid PCI slot info field \"%s\"", aopt
);
221 * Helper function to parse a list of comma-separated options where
222 * each option is formatted as "name[=value]". If no value is
223 * provided, the option is treated as a boolean and is given a value
227 pci_parse_legacy_config(nvlist_t
*nvl
, const char *opt
)
229 char *config
, *name
, *tofree
, *value
;
234 config
= tofree
= strdup(opt
);
235 while ((name
= strsep(&config
, ",")) != NULL
) {
236 value
= strchr(name
, '=');
240 set_config_value_node(nvl
, name
, value
);
242 set_config_bool_node(nvl
, name
, true);
249 * PCI device configuration is stored in MIBs that encode the device's
252 * pci.<bus>.<slot>.<func>
254 * Where "bus", "slot", and "func" are all decimal values without
255 * leading zeroes. Each valid device must have a "device" node which
256 * identifies the driver model of the device.
258 * Device backends can provide a parser for the "config" string. If
259 * a custom parser is not provided, pci_parse_legacy_config() is used
260 * to parse the string.
263 pci_parse_slot(char *opt
)
265 char node_name
[sizeof("pci.XXX.XX.X")];
266 struct pci_devemu
*pde
;
267 char *emul
, *config
, *str
, *cp
;
268 int error
, bnum
, snum
, fnum
;
274 emul
= config
= NULL
;
275 if ((cp
= strchr(str
, ',')) != NULL
) {
278 if ((cp
= strchr(emul
, ',')) != NULL
) {
283 pci_parse_slot_usage(opt
);
287 /* <bus>:<slot>:<func> */
288 if (sscanf(str
, "%d:%d:%d", &bnum
, &snum
, &fnum
) != 3) {
291 if (sscanf(str
, "%d:%d", &snum
, &fnum
) != 2) {
294 if (sscanf(str
, "%d", &snum
) != 1) {
300 if (bnum
< 0 || bnum
>= MAXBUSES
|| snum
< 0 || snum
>= MAXSLOTS
||
301 fnum
< 0 || fnum
>= MAXFUNCS
) {
302 pci_parse_slot_usage(opt
);
306 pde
= pci_emul_finddev(emul
);
308 EPRINTLN("pci slot %d:%d:%d: unknown device \"%s\"", bnum
, snum
,
313 snprintf(node_name
, sizeof(node_name
), "pci.%d.%d.%d", bnum
, snum
,
315 nvl
= find_config_node(node_name
);
317 EPRINTLN("pci slot %d:%d:%d already occupied!", bnum
, snum
,
321 nvl
= create_config_node(node_name
);
322 if (pde
->pe_alias
!= NULL
)
323 set_config_value_node(nvl
, "device", pde
->pe_alias
);
325 set_config_value_node(nvl
, "device", pde
->pe_emu
);
327 if (pde
->pe_legacy_config
!= NULL
)
328 error
= pde
->pe_legacy_config(nvl
, config
);
330 error
= pci_parse_legacy_config(nvl
, config
);
337 pci_print_supported_devices(void)
339 struct pci_devemu
**pdpp
, *pdp
;
341 SET_FOREACH(pdpp
, pci_devemu_set
) {
343 printf("%s\n", pdp
->pe_emu
);
348 pci_valid_pba_offset(struct pci_devinst
*pi
, uint64_t offset
)
351 if (offset
< pi
->pi_msix
.pba_offset
)
354 if (offset
>= pi
->pi_msix
.pba_offset
+ pi
->pi_msix
.pba_size
) {
362 pci_emul_msix_twrite(struct pci_devinst
*pi
, uint64_t offset
, int size
,
365 int msix_entry_offset
;
369 /* support only 4 or 8 byte writes */
370 if (size
!= 4 && size
!= 8)
374 * Return if table index is beyond what device supports
376 tab_index
= offset
/ MSIX_TABLE_ENTRY_SIZE
;
377 if (tab_index
>= pi
->pi_msix
.table_count
)
380 msix_entry_offset
= offset
% MSIX_TABLE_ENTRY_SIZE
;
382 /* support only aligned writes */
383 if ((msix_entry_offset
% size
) != 0)
386 dest
= (char *)(pi
->pi_msix
.table
+ tab_index
);
387 dest
+= msix_entry_offset
;
390 *((uint32_t *)dest
) = value
;
392 *((uint64_t *)dest
) = value
;
398 pci_emul_msix_tread(struct pci_devinst
*pi
, uint64_t offset
, int size
)
401 int msix_entry_offset
;
403 uint64_t retval
= ~0;
406 * The PCI standard only allows 4 and 8 byte accesses to the MSI-X
407 * table but we also allow 1 byte access to accommodate reads from
410 if (size
!= 1 && size
!= 4 && size
!= 8)
413 msix_entry_offset
= offset
% MSIX_TABLE_ENTRY_SIZE
;
415 /* support only aligned reads */
416 if ((msix_entry_offset
% size
) != 0) {
420 tab_index
= offset
/ MSIX_TABLE_ENTRY_SIZE
;
422 if (tab_index
< pi
->pi_msix
.table_count
) {
423 /* valid MSI-X Table access */
424 dest
= (char *)(pi
->pi_msix
.table
+ tab_index
);
425 dest
+= msix_entry_offset
;
428 retval
= *((uint8_t *)dest
);
430 retval
= *((uint32_t *)dest
);
432 retval
= *((uint64_t *)dest
);
433 } else if (pci_valid_pba_offset(pi
, offset
)) {
434 /* return 0 for PBA access */
442 pci_msix_table_bar(struct pci_devinst
*pi
)
445 if (pi
->pi_msix
.table
!= NULL
)
446 return (pi
->pi_msix
.table_bar
);
452 pci_msix_pba_bar(struct pci_devinst
*pi
)
455 if (pi
->pi_msix
.table
!= NULL
)
456 return (pi
->pi_msix
.pba_bar
);
462 pci_emul_io_handler(struct vmctx
*ctx
, int in
, int port
,
463 int bytes
, uint32_t *eax
, void *arg
)
465 struct pci_devinst
*pdi
= arg
;
466 struct pci_devemu
*pe
= pdi
->pi_d
;
472 for (i
= 0; i
<= PCI_BARMAX
; i
++) {
473 if (pdi
->pi_bar
[i
].type
== PCIBAR_IO
&&
474 (uint64_t)port
>= pdi
->pi_bar
[i
].addr
&&
475 (uint64_t)port
+ bytes
<=
476 pdi
->pi_bar
[i
].addr
+ pdi
->pi_bar
[i
].size
) {
477 offset
= port
- pdi
->pi_bar
[i
].addr
;
479 *eax
= (*pe
->pe_barread
)(ctx
, pdi
, i
,
482 (*pe
->pe_barwrite
)(ctx
, pdi
, i
, offset
,
491 pci_emul_mem_handler(struct vmctx
*ctx
, int vcpu __unused
, int dir
,
492 uint64_t addr
, int size
, uint64_t *val
, void *arg1
, long arg2
)
494 struct pci_devinst
*pdi
= arg1
;
495 struct pci_devemu
*pe
= pdi
->pi_d
;
497 int bidx
= (int) arg2
;
499 assert(bidx
<= PCI_BARMAX
);
500 assert(pdi
->pi_bar
[bidx
].type
== PCIBAR_MEM32
||
501 pdi
->pi_bar
[bidx
].type
== PCIBAR_MEM64
);
502 assert(addr
>= pdi
->pi_bar
[bidx
].addr
&&
503 addr
+ size
<= pdi
->pi_bar
[bidx
].addr
+ pdi
->pi_bar
[bidx
].size
);
505 offset
= addr
- pdi
->pi_bar
[bidx
].addr
;
507 if (dir
== MEM_F_WRITE
) {
509 (*pe
->pe_barwrite
)(ctx
, pdi
, bidx
, offset
,
510 4, *val
& 0xffffffff);
511 (*pe
->pe_barwrite
)(ctx
, pdi
, bidx
, offset
+ 4,
514 (*pe
->pe_barwrite
)(ctx
, pdi
, bidx
, offset
,
519 *val
= (*pe
->pe_barread
)(ctx
, pdi
, bidx
,
521 *val
|= (*pe
->pe_barread
)(ctx
, pdi
, bidx
,
522 offset
+ 4, 4) << 32;
524 *val
= (*pe
->pe_barread
)(ctx
, pdi
, bidx
,
534 pci_emul_alloc_resource(uint64_t *baseptr
, uint64_t limit
, uint64_t size
,
539 assert((size
& (size
- 1)) == 0); /* must be a power of 2 */
541 base
= roundup2(*baseptr
, size
);
543 if (base
+ size
<= limit
) {
545 *baseptr
= base
+ size
;
552 * Register (or unregister) the MMIO or I/O region associated with the BAR
553 * register 'idx' of an emulated pci device.
556 modify_bar_registration(struct pci_devinst
*pi
, int idx
, int registration
)
558 struct pci_devemu
*pe
;
560 struct inout_port iop
;
564 switch (pi
->pi_bar
[idx
].type
) {
566 bzero(&iop
, sizeof(struct inout_port
));
567 iop
.name
= pi
->pi_name
;
568 iop
.port
= pi
->pi_bar
[idx
].addr
;
569 iop
.size
= pi
->pi_bar
[idx
].size
;
571 iop
.flags
= IOPORT_F_INOUT
;
572 iop
.handler
= pci_emul_io_handler
;
574 error
= register_inout(&iop
);
576 error
= unregister_inout(&iop
);
577 if (pe
->pe_baraddr
!= NULL
)
578 (*pe
->pe_baraddr
)(pi
->pi_vmctx
, pi
, idx
, registration
,
579 pi
->pi_bar
[idx
].addr
);
583 bzero(&mr
, sizeof(struct mem_range
));
584 mr
.name
= pi
->pi_name
;
585 mr
.base
= pi
->pi_bar
[idx
].addr
;
586 mr
.size
= pi
->pi_bar
[idx
].size
;
589 mr
.handler
= pci_emul_mem_handler
;
592 error
= register_mem(&mr
);
594 error
= unregister_mem(&mr
);
595 if (pe
->pe_baraddr
!= NULL
)
596 (*pe
->pe_baraddr
)(pi
->pi_vmctx
, pi
, idx
, registration
,
597 pi
->pi_bar
[idx
].addr
);
601 if (pe
->pe_baraddr
!= NULL
)
602 (*pe
->pe_baraddr
)(pi
->pi_vmctx
, pi
, idx
, registration
,
603 pi
->pi_bar
[idx
].addr
);
613 unregister_bar(struct pci_devinst
*pi
, int idx
)
616 modify_bar_registration(pi
, idx
, 0);
620 register_bar(struct pci_devinst
*pi
, int idx
)
623 modify_bar_registration(pi
, idx
, 1);
626 /* Is the ROM enabled for the emulated pci device? */
628 romen(struct pci_devinst
*pi
)
630 return (pi
->pi_bar
[PCI_ROM_IDX
].lobits
& PCIM_BIOS_ENABLE
) ==
634 /* Are we decoding i/o port accesses for the emulated pci device? */
636 porten(struct pci_devinst
*pi
)
640 cmd
= pci_get_cfgdata16(pi
, PCIR_COMMAND
);
642 return (cmd
& PCIM_CMD_PORTEN
);
645 /* Are we decoding memory accesses for the emulated pci device? */
647 memen(struct pci_devinst
*pi
)
651 cmd
= pci_get_cfgdata16(pi
, PCIR_COMMAND
);
653 return (cmd
& PCIM_CMD_MEMEN
);
657 * Update the MMIO or I/O address that is decoded by the BAR register.
659 * If the pci device has enabled the address space decoding then intercept
660 * the address range decoded by the BAR register.
663 update_bar_address(struct pci_devinst
*pi
, uint64_t addr
, int idx
, int type
)
667 if (pi
->pi_bar
[idx
].type
== PCIBAR_IO
)
673 unregister_bar(pi
, idx
);
678 pi
->pi_bar
[idx
].addr
= addr
;
681 pi
->pi_bar
[idx
].addr
&= ~0xffffffffUL
;
682 pi
->pi_bar
[idx
].addr
|= addr
;
685 pi
->pi_bar
[idx
].addr
&= 0xffffffff;
686 pi
->pi_bar
[idx
].addr
|= addr
;
693 register_bar(pi
, idx
);
697 pci_emul_alloc_bar(struct pci_devinst
*pdi
, int idx
, enum pcibar_type type
,
700 assert((type
== PCIBAR_ROM
) || (idx
>= 0 && idx
<= PCI_BARMAX
));
701 assert((type
!= PCIBAR_ROM
) || (idx
== PCI_ROM_IDX
));
703 if ((size
& (size
- 1)) != 0)
704 size
= 1UL << flsl(size
); /* round up to a power of 2 */
706 /* Enforce minimum BAR sizes required by the PCI standard */
707 if (type
== PCIBAR_IO
) {
710 } else if (type
== PCIBAR_ROM
) {
711 if (size
< ~PCIM_BIOS_ADDR_MASK
+ 1)
712 size
= ~PCIM_BIOS_ADDR_MASK
+ 1;
719 * To reduce fragmentation of the MMIO space, we allocate the BARs by
720 * size. Therefore, don't allocate the BAR yet. We create a list of all
721 * BAR allocation which is sorted by BAR size. When all PCI devices are
722 * initialized, we will assign an address to the BARs.
725 /* create a new list entry */
726 struct pci_bar_allocation
*const new_bar
= malloc(sizeof(*new_bar
));
727 memset(new_bar
, 0, sizeof(*new_bar
));
730 new_bar
->type
= type
;
731 new_bar
->size
= size
;
734 * Search for a BAR which size is lower than the size of our newly
737 struct pci_bar_allocation
*bar
= NULL
;
738 TAILQ_FOREACH(bar
, &pci_bars
, chain
) {
739 if (bar
->size
< size
) {
746 * Either the list is empty or new BAR is the smallest BAR of
747 * the list. Append it to the end of our list.
749 TAILQ_INSERT_TAIL(&pci_bars
, new_bar
, chain
);
752 * The found BAR is smaller than our new BAR. For that reason,
753 * insert our new BAR before the found BAR.
755 TAILQ_INSERT_BEFORE(bar
, new_bar
, chain
);
759 * pci_passthru devices synchronize their physical and virtual command
760 * register on init. For that reason, the virtual cmd reg should be
761 * updated as early as possible.
766 enbit
= PCIM_CMD_PORTEN
;
770 enbit
= PCIM_CMD_MEMEN
;
777 const uint16_t cmd
= pci_get_cfgdata16(pdi
, PCIR_COMMAND
);
778 pci_set_cfgdata16(pdi
, PCIR_COMMAND
, cmd
| enbit
);
784 pci_emul_assign_bar(struct pci_devinst
*const pdi
, const int idx
,
785 const enum pcibar_type type
, const uint64_t size
)
788 uint64_t *baseptr
, limit
, addr
, mask
, lobits
, bar
;
793 addr
= mask
= lobits
= 0;
796 baseptr
= &pci_emul_iobase
;
797 limit
= PCI_EMUL_IOLIMIT
;
798 mask
= PCIM_BAR_IO_BASE
;
799 lobits
= PCIM_BAR_IO_SPACE
;
804 * Some drivers do not work well if the 64-bit BAR is allocated
805 * above 4GB. Allow for this by allocating small requests under
806 * 4GB unless then allocation size is larger than some arbitrary
807 * number (128MB currently).
809 if (size
> 128 * 1024 * 1024) {
810 baseptr
= &pci_emul_membase64
;
811 limit
= pci_emul_memlim64
;
812 mask
= PCIM_BAR_MEM_BASE
;
813 lobits
= PCIM_BAR_MEM_SPACE
| PCIM_BAR_MEM_64
|
814 PCIM_BAR_MEM_PREFETCH
;
816 baseptr
= &pci_emul_membase32
;
817 limit
= PCI_EMUL_MEMLIMIT32
;
818 mask
= PCIM_BAR_MEM_BASE
;
819 lobits
= PCIM_BAR_MEM_SPACE
| PCIM_BAR_MEM_64
;
823 baseptr
= &pci_emul_membase32
;
824 limit
= PCI_EMUL_MEMLIMIT32
;
825 mask
= PCIM_BAR_MEM_BASE
;
826 lobits
= PCIM_BAR_MEM_SPACE
| PCIM_BAR_MEM_32
;
829 /* do not claim memory for ROM. OVMF will do it for us. */
832 mask
= PCIM_BIOS_ADDR_MASK
;
836 printf("pci_emul_alloc_base: invalid bar type %d\n", type
);
844 if (baseptr
!= NULL
) {
845 error
= pci_emul_alloc_resource(baseptr
, limit
, size
, &addr
);
852 pdi
->pi_bar
[idx
].type
= type
;
853 pdi
->pi_bar
[idx
].addr
= addr
;
854 pdi
->pi_bar
[idx
].size
= size
;
856 * passthru devices are using same lobits as physical device they set
859 if (pdi
->pi_bar
[idx
].lobits
!= 0) {
860 lobits
= pdi
->pi_bar
[idx
].lobits
;
862 pdi
->pi_bar
[idx
].lobits
= lobits
;
865 /* Initialize the BAR register in config space */
866 bar
= (addr
& mask
) | lobits
;
867 pci_set_cfgdata32(pdi
, PCIR_BAR(idx
), bar
);
869 if (type
== PCIBAR_MEM64
) {
870 assert(idx
+ 1 <= PCI_BARMAX
);
871 pdi
->pi_bar
[idx
+ 1].type
= PCIBAR_MEMHI64
;
872 pci_set_cfgdata32(pdi
, PCIR_BAR(idx
+ 1), bar
>> 32);
875 if (type
!= PCIBAR_ROM
) {
876 register_bar(pdi
, idx
);
883 pci_emul_alloc_rom(struct pci_devinst
*const pdi
, const uint64_t size
,
886 /* allocate ROM space once on first call */
887 if (pci_emul_rombase
== 0) {
888 pci_emul_rombase
= vm_create_devmem(pdi
->pi_vmctx
, VM_PCIROM
,
889 "pcirom", PCI_EMUL_ROMSIZE
);
890 if (pci_emul_rombase
== MAP_FAILED
) {
891 warnx("%s: failed to create rom segment", __func__
);
894 pci_emul_romlim
= pci_emul_rombase
+ PCI_EMUL_ROMSIZE
;
895 pci_emul_romoffset
= 0;
898 /* ROM size should be a power of 2 and greater than 2 KB */
899 const uint64_t rom_size
= MAX(1UL << flsl(size
),
900 ~PCIM_BIOS_ADDR_MASK
+ 1);
902 /* check if ROM fits into ROM space */
903 if (pci_emul_romoffset
+ rom_size
> PCI_EMUL_ROMSIZE
) {
904 warnx("%s: no space left in rom segment:", __func__
);
905 warnx("%16lu bytes left",
906 PCI_EMUL_ROMSIZE
- pci_emul_romoffset
);
907 warnx("%16lu bytes required by %d/%d/%d", rom_size
, pdi
->pi_bus
,
908 pdi
->pi_slot
, pdi
->pi_func
);
912 /* allocate ROM BAR */
913 const int error
= pci_emul_alloc_bar(pdi
, PCI_ROM_IDX
, PCIBAR_ROM
,
919 *addr
= pci_emul_rombase
+ pci_emul_romoffset
;
921 /* save offset into ROM Space */
922 pdi
->pi_romoffset
= pci_emul_romoffset
;
924 /* increase offset for next ROM */
925 pci_emul_romoffset
+= rom_size
;
930 #define CAP_START_OFFSET 0x40
932 pci_emul_add_capability(struct pci_devinst
*pi
, u_char
*capdata
, int caplen
)
934 int i
, capoff
, reallen
;
939 reallen
= roundup2(caplen
, 4); /* dword aligned */
941 sts
= pci_get_cfgdata16(pi
, PCIR_STATUS
);
942 if ((sts
& PCIM_STATUS_CAPPRESENT
) == 0)
943 capoff
= CAP_START_OFFSET
;
945 capoff
= pi
->pi_capend
+ 1;
947 /* Check if we have enough space */
948 if (capoff
+ reallen
> PCI_REGMAX
+ 1)
951 /* Set the previous capability pointer */
952 if ((sts
& PCIM_STATUS_CAPPRESENT
) == 0) {
953 pci_set_cfgdata8(pi
, PCIR_CAP_PTR
, capoff
);
954 pci_set_cfgdata16(pi
, PCIR_STATUS
, sts
|PCIM_STATUS_CAPPRESENT
);
956 pci_set_cfgdata8(pi
, pi
->pi_prevcap
+ 1, capoff
);
958 /* Copy the capability */
959 for (i
= 0; i
< caplen
; i
++)
960 pci_set_cfgdata8(pi
, capoff
+ i
, capdata
[i
]);
962 /* Set the next capability pointer */
963 pci_set_cfgdata8(pi
, capoff
+ 1, 0);
965 pi
->pi_prevcap
= capoff
;
966 pi
->pi_capend
= capoff
+ reallen
- 1;
970 static struct pci_devemu
*
971 pci_emul_finddev(const char *name
)
973 struct pci_devemu
**pdpp
, *pdp
;
975 SET_FOREACH(pdpp
, pci_devemu_set
) {
977 if (!strcmp(pdp
->pe_emu
, name
)) {
986 pci_emul_init(struct vmctx
*ctx
, struct pci_devemu
*pde
, int bus
, int slot
,
987 int func
, struct funcinfo
*fi
)
989 struct pci_devinst
*pdi
;
992 pdi
= calloc(1, sizeof(struct pci_devinst
));
998 pthread_mutex_init(&pdi
->pi_lintr
.lock
, NULL
);
999 pdi
->pi_lintr
.pin
= 0;
1000 pdi
->pi_lintr
.state
= IDLE
;
1001 pdi
->pi_lintr
.pirq_pin
= 0;
1002 pdi
->pi_lintr
.ioapic_irq
= 0;
1004 snprintf(pdi
->pi_name
, PI_NAMESZ
, "%s-pci-%d", pde
->pe_emu
, slot
);
1006 /* Disable legacy interrupts */
1007 pci_set_cfgdata8(pdi
, PCIR_INTLINE
, 255);
1008 pci_set_cfgdata8(pdi
, PCIR_INTPIN
, 0);
1010 pci_set_cfgdata8(pdi
, PCIR_COMMAND
, PCIM_CMD_BUSMASTEREN
);
1012 err
= (*pde
->pe_init
)(ctx
, pdi
, fi
->fi_config
);
1022 pci_populate_msicap(struct msicap
*msicap
, int msgnum
, int nextptr
)
1026 /* Number of msi messages must be a power of 2 between 1 and 32 */
1027 assert((msgnum
& (msgnum
- 1)) == 0 && msgnum
>= 1 && msgnum
<= 32);
1028 mmc
= ffs(msgnum
) - 1;
1030 bzero(msicap
, sizeof(struct msicap
));
1031 msicap
->capid
= PCIY_MSI
;
1032 msicap
->nextptr
= nextptr
;
1033 msicap
->msgctrl
= PCIM_MSICTRL_64BIT
| (mmc
<< 1);
1037 pci_emul_add_msicap(struct pci_devinst
*pi
, int msgnum
)
1039 struct msicap msicap
;
1041 pci_populate_msicap(&msicap
, msgnum
, 0);
1043 return (pci_emul_add_capability(pi
, (u_char
*)&msicap
, sizeof(msicap
)));
1047 pci_populate_msixcap(struct msixcap
*msixcap
, int msgnum
, int barnum
,
1048 uint32_t msix_tab_size
)
1051 assert(msix_tab_size
% 4096 == 0);
1053 bzero(msixcap
, sizeof(struct msixcap
));
1054 msixcap
->capid
= PCIY_MSIX
;
1057 * Message Control Register, all fields set to
1058 * zero except for the Table Size.
1059 * Note: Table size N is encoded as N-1
1061 msixcap
->msgctrl
= msgnum
- 1;
1065 * - MSI-X table start at offset 0
1066 * - PBA table starts at a 4K aligned offset after the MSI-X table
1068 msixcap
->table_info
= barnum
& PCIM_MSIX_BIR_MASK
;
1069 msixcap
->pba_info
= msix_tab_size
| (barnum
& PCIM_MSIX_BIR_MASK
);
1073 pci_msix_table_init(struct pci_devinst
*pi
, int table_entries
)
1077 assert(table_entries
> 0);
1078 assert(table_entries
<= MAX_MSIX_TABLE_ENTRIES
);
1080 table_size
= table_entries
* MSIX_TABLE_ENTRY_SIZE
;
1081 pi
->pi_msix
.table
= calloc(1, table_size
);
1083 /* set mask bit of vector control register */
1084 for (i
= 0; i
< table_entries
; i
++)
1085 pi
->pi_msix
.table
[i
].vector_control
|= PCIM_MSIX_VCTRL_MASK
;
1089 pci_emul_add_msixcap(struct pci_devinst
*pi
, int msgnum
, int barnum
)
1092 struct msixcap msixcap
;
1094 assert(msgnum
>= 1 && msgnum
<= MAX_MSIX_TABLE_ENTRIES
);
1095 assert(barnum
>= 0 && barnum
<= PCIR_MAX_BAR_0
);
1097 tab_size
= msgnum
* MSIX_TABLE_ENTRY_SIZE
;
1099 /* Align table size to nearest 4K */
1100 tab_size
= roundup2(tab_size
, 4096);
1102 pi
->pi_msix
.table_bar
= barnum
;
1103 pi
->pi_msix
.pba_bar
= barnum
;
1104 pi
->pi_msix
.table_offset
= 0;
1105 pi
->pi_msix
.table_count
= msgnum
;
1106 pi
->pi_msix
.pba_offset
= tab_size
;
1107 pi
->pi_msix
.pba_size
= PBA_SIZE(msgnum
);
1109 pci_msix_table_init(pi
, msgnum
);
1111 pci_populate_msixcap(&msixcap
, msgnum
, barnum
, tab_size
);
1113 /* allocate memory for MSI-X Table and PBA */
1114 pci_emul_alloc_bar(pi
, barnum
, PCIBAR_MEM32
,
1115 tab_size
+ pi
->pi_msix
.pba_size
);
1117 return (pci_emul_add_capability(pi
, (u_char
*)&msixcap
,
1122 msixcap_cfgwrite(struct pci_devinst
*pi
, int capoff
, int offset
,
1123 int bytes
, uint32_t val
)
1125 uint16_t msgctrl
, rwmask
;
1128 off
= offset
- capoff
;
1129 /* Message Control Register */
1130 if (off
== 2 && bytes
== 2) {
1131 rwmask
= PCIM_MSIXCTRL_MSIX_ENABLE
| PCIM_MSIXCTRL_FUNCTION_MASK
;
1132 msgctrl
= pci_get_cfgdata16(pi
, offset
);
1134 msgctrl
|= val
& rwmask
;
1137 pi
->pi_msix
.enabled
= val
& PCIM_MSIXCTRL_MSIX_ENABLE
;
1138 pi
->pi_msix
.function_mask
= val
& PCIM_MSIXCTRL_FUNCTION_MASK
;
1139 pci_lintr_update(pi
);
1142 CFGWRITE(pi
, offset
, val
, bytes
);
1146 msicap_cfgwrite(struct pci_devinst
*pi
, int capoff
, int offset
,
1147 int bytes
, uint32_t val
)
1149 uint16_t msgctrl
, rwmask
, msgdata
, mme
;
1153 * If guest is writing to the message control register make sure
1154 * we do not overwrite read-only fields.
1156 if ((offset
- capoff
) == 2 && bytes
== 2) {
1157 rwmask
= PCIM_MSICTRL_MME_MASK
| PCIM_MSICTRL_MSI_ENABLE
;
1158 msgctrl
= pci_get_cfgdata16(pi
, offset
);
1160 msgctrl
|= val
& rwmask
;
1163 CFGWRITE(pi
, offset
, val
, bytes
);
1165 msgctrl
= pci_get_cfgdata16(pi
, capoff
+ 2);
1166 addrlo
= pci_get_cfgdata32(pi
, capoff
+ 4);
1167 if (msgctrl
& PCIM_MSICTRL_64BIT
)
1168 msgdata
= pci_get_cfgdata16(pi
, capoff
+ 12);
1170 msgdata
= pci_get_cfgdata16(pi
, capoff
+ 8);
1172 mme
= msgctrl
& PCIM_MSICTRL_MME_MASK
;
1173 pi
->pi_msi
.enabled
= msgctrl
& PCIM_MSICTRL_MSI_ENABLE
? 1 : 0;
1174 if (pi
->pi_msi
.enabled
) {
1175 pi
->pi_msi
.addr
= addrlo
;
1176 pi
->pi_msi
.msg_data
= msgdata
;
1177 pi
->pi_msi
.maxmsgnum
= 1 << (mme
>> 4);
1179 pi
->pi_msi
.maxmsgnum
= 0;
1181 pci_lintr_update(pi
);
1185 pciecap_cfgwrite(struct pci_devinst
*pi
, int capoff __unused
, int offset
,
1186 int bytes
, uint32_t val
)
1189 /* XXX don't write to the readonly parts */
1190 CFGWRITE(pi
, offset
, val
, bytes
);
1193 #define PCIECAP_VERSION 0x2
1195 pci_emul_add_pciecap(struct pci_devinst
*pi
, int type
)
1198 struct pciecap pciecap
;
1200 bzero(&pciecap
, sizeof(pciecap
));
1203 * Use the integrated endpoint type for endpoints on a root complex bus.
1205 * NB: bhyve currently only supports a single PCI bus that is the root
1206 * complex bus, so all endpoints are integrated.
1208 if ((type
== PCIEM_TYPE_ENDPOINT
) && (pi
->pi_bus
== 0))
1209 type
= PCIEM_TYPE_ROOT_INT_EP
;
1211 pciecap
.capid
= PCIY_EXPRESS
;
1212 pciecap
.pcie_capabilities
= PCIECAP_VERSION
| type
;
1213 if (type
!= PCIEM_TYPE_ROOT_INT_EP
) {
1214 pciecap
.link_capabilities
= 0x411; /* gen1, x1 */
1215 pciecap
.link_status
= 0x11; /* gen1, x1 */
1218 err
= pci_emul_add_capability(pi
, (u_char
*)&pciecap
, sizeof(pciecap
));
1223 * This function assumes that 'coff' is in the capabilities region of the
1224 * config space. A capoff parameter of zero will force a search for the
1228 pci_emul_capwrite(struct pci_devinst
*pi
, int offset
, int bytes
, uint32_t val
,
1229 uint8_t capoff
, int capid
)
1233 /* Do not allow un-aligned writes */
1234 if ((offset
& (bytes
- 1)) != 0)
1238 /* Find the capability that we want to update */
1239 capoff
= CAP_START_OFFSET
;
1241 nextoff
= pci_get_cfgdata8(pi
, capoff
+ 1);
1244 if (offset
>= capoff
&& offset
< nextoff
)
1249 assert(offset
>= capoff
);
1250 capid
= pci_get_cfgdata8(pi
, capoff
);
1254 * Capability ID and Next Capability Pointer are readonly.
1255 * However, some o/s's do 4-byte writes that include these.
1256 * For this case, trim the write back to 2 bytes and adjust
1259 if (offset
== capoff
|| offset
== capoff
+ 1) {
1260 if (offset
== capoff
&& bytes
== 4) {
1270 msicap_cfgwrite(pi
, capoff
, offset
, bytes
, val
);
1273 msixcap_cfgwrite(pi
, capoff
, offset
, bytes
, val
);
1276 pciecap_cfgwrite(pi
, capoff
, offset
, bytes
, val
);
1284 pci_emul_iscap(struct pci_devinst
*pi
, int offset
)
1288 sts
= pci_get_cfgdata16(pi
, PCIR_STATUS
);
1289 if ((sts
& PCIM_STATUS_CAPPRESENT
) != 0) {
1290 if (offset
>= CAP_START_OFFSET
&& offset
<= pi
->pi_capend
)
1297 pci_emul_fallback_handler(struct vmctx
*ctx __unused
, int vcpu __unused
,
1298 int dir
, uint64_t addr __unused
, int size __unused
, uint64_t *val
,
1299 void *arg1 __unused
, long arg2 __unused
)
1302 * Ignore writes; return 0xff's for reads. The mem read code
1303 * will take care of truncating to the correct size.
1305 if (dir
== MEM_F_READ
) {
1306 *val
= 0xffffffffffffffff;
1313 pci_emul_ecfg_handler(struct vmctx
*ctx
, int vcpu __unused
, int dir
,
1314 uint64_t addr
, int bytes
, uint64_t *val
, void *arg1 __unused
,
1317 int bus
, slot
, func
, coff
, in
;
1319 coff
= addr
& 0xfff;
1320 func
= (addr
>> 12) & 0x7;
1321 slot
= (addr
>> 15) & 0x1f;
1322 bus
= (addr
>> 20) & 0xff;
1323 in
= (dir
== MEM_F_READ
);
1326 pci_cfgrw(ctx
, in
, bus
, slot
, func
, coff
, bytes
, (uint32_t *)val
);
1334 return (PCI_EMUL_ECFG_BASE
);
1337 #define BUSIO_ROUNDUP 32
1338 #define BUSMEM32_ROUNDUP (1024 * 1024)
1339 #define BUSMEM64_ROUNDUP (512 * 1024 * 1024)
1342 init_pci(struct vmctx
*ctx
)
1344 char node_name
[sizeof("pci.XXX.XX.X")];
1345 struct mem_range mr
;
1346 struct pci_devemu
*pde
;
1348 struct slotinfo
*si
;
1349 struct funcinfo
*fi
;
1353 int bus
, slot
, func
;
1356 if (vm_get_lowmem_limit(ctx
) > PCI_EMUL_MEMBASE32
)
1357 errx(EX_OSERR
, "Invalid lowmem limit");
1359 pci_emul_iobase
= PCI_EMUL_IOBASE
;
1360 pci_emul_membase32
= PCI_EMUL_MEMBASE32
;
1362 pci_emul_membase64
= 4*GB
+ vm_get_highmem_size(ctx
);
1363 pci_emul_membase64
= roundup2(pci_emul_membase64
, PCI_EMUL_MEMSIZE64
);
1364 pci_emul_memlim64
= pci_emul_membase64
+ PCI_EMUL_MEMSIZE64
;
1366 for (bus
= 0; bus
< MAXBUSES
; bus
++) {
1367 snprintf(node_name
, sizeof(node_name
), "pci.%d", bus
);
1368 nvl
= find_config_node(node_name
);
1371 pci_businfo
[bus
] = calloc(1, sizeof(struct businfo
));
1372 bi
= pci_businfo
[bus
];
1375 * Keep track of the i/o and memory resources allocated to
1378 bi
->iobase
= pci_emul_iobase
;
1379 bi
->membase32
= pci_emul_membase32
;
1380 bi
->membase64
= pci_emul_membase64
;
1382 /* first run: init devices */
1383 for (slot
= 0; slot
< MAXSLOTS
; slot
++) {
1384 si
= &bi
->slotinfo
[slot
];
1385 for (func
= 0; func
< MAXFUNCS
; func
++) {
1386 fi
= &si
->si_funcs
[func
];
1387 snprintf(node_name
, sizeof(node_name
),
1388 "pci.%d.%d.%d", bus
, slot
, func
);
1389 nvl
= find_config_node(node_name
);
1393 fi
->fi_config
= nvl
;
1394 emul
= get_config_value_node(nvl
, "device");
1396 EPRINTLN("pci slot %d:%d:%d: missing "
1397 "\"device\" value", bus
, slot
, func
);
1400 pde
= pci_emul_finddev(emul
);
1402 EPRINTLN("pci slot %d:%d:%d: unknown "
1403 "device \"%s\"", bus
, slot
, func
,
1407 if (pde
->pe_alias
!= NULL
) {
1408 EPRINTLN("pci slot %d:%d:%d: legacy "
1409 "device \"%s\", use \"%s\" instead",
1410 bus
, slot
, func
, emul
,
1415 error
= pci_emul_init(ctx
, pde
, bus
, slot
,
1422 /* second run: assign BARs and free list */
1423 struct pci_bar_allocation
*bar
;
1424 struct pci_bar_allocation
*bar_tmp
;
1425 TAILQ_FOREACH_SAFE(bar
, &pci_bars
, chain
, bar_tmp
) {
1426 pci_emul_assign_bar(bar
->pdi
, bar
->idx
, bar
->type
,
1430 TAILQ_INIT(&pci_bars
);
1433 * Add some slop to the I/O and memory resources decoded by
1434 * this bus to give a guest some flexibility if it wants to
1435 * reprogram the BARs.
1437 pci_emul_iobase
+= BUSIO_ROUNDUP
;
1438 pci_emul_iobase
= roundup2(pci_emul_iobase
, BUSIO_ROUNDUP
);
1439 bi
->iolimit
= pci_emul_iobase
;
1441 pci_emul_membase32
+= BUSMEM32_ROUNDUP
;
1442 pci_emul_membase32
= roundup2(pci_emul_membase32
,
1444 bi
->memlimit32
= pci_emul_membase32
;
1446 pci_emul_membase64
+= BUSMEM64_ROUNDUP
;
1447 pci_emul_membase64
= roundup2(pci_emul_membase64
,
1449 bi
->memlimit64
= pci_emul_membase64
;
1453 * PCI backends are initialized before routing INTx interrupts
1454 * so that LPC devices are able to reserve ISA IRQs before
1455 * routing PIRQ pins.
1457 for (bus
= 0; bus
< MAXBUSES
; bus
++) {
1458 if ((bi
= pci_businfo
[bus
]) == NULL
)
1461 for (slot
= 0; slot
< MAXSLOTS
; slot
++) {
1462 si
= &bi
->slotinfo
[slot
];
1463 for (func
= 0; func
< MAXFUNCS
; func
++) {
1464 fi
= &si
->si_funcs
[func
];
1465 if (fi
->fi_devi
== NULL
)
1467 pci_lintr_route(fi
->fi_devi
);
1474 * The guest physical memory map looks like the following:
1475 * [0, lowmem) guest system memory
1476 * [lowmem, 0xC0000000) memory hole (may be absent)
1477 * [0xC0000000, 0xE0000000) PCI hole (32-bit BAR allocation)
1478 * [0xE0000000, 0xF0000000) PCI extended config window
1479 * [0xF0000000, 4GB) LAPIC, IOAPIC, HPET, firmware
1480 * [4GB, 4GB + highmem)
1484 * Accesses to memory addresses that are not allocated to system
1485 * memory or PCI devices return 0xff's.
1487 lowmem
= vm_get_lowmem_size(ctx
);
1488 bzero(&mr
, sizeof(struct mem_range
));
1489 mr
.name
= "PCI hole";
1490 mr
.flags
= MEM_F_RW
| MEM_F_IMMUTABLE
;
1492 mr
.size
= (4ULL * 1024 * 1024 * 1024) - lowmem
;
1493 mr
.handler
= pci_emul_fallback_handler
;
1494 error
= register_mem_fallback(&mr
);
1497 /* PCI extended config space */
1498 bzero(&mr
, sizeof(struct mem_range
));
1499 mr
.name
= "PCI ECFG";
1500 mr
.flags
= MEM_F_RW
| MEM_F_IMMUTABLE
;
1501 mr
.base
= PCI_EMUL_ECFG_BASE
;
1502 mr
.size
= PCI_EMUL_ECFG_SIZE
;
1503 mr
.handler
= pci_emul_ecfg_handler
;
1504 error
= register_mem(&mr
);
1511 pci_apic_prt_entry(int bus __unused
, int slot
, int pin
, int pirq_pin __unused
,
1512 int ioapic_irq
, void *arg __unused
)
1515 dsdt_line(" Package ()");
1517 dsdt_line(" 0x%X,", slot
<< 16 | 0xffff);
1518 dsdt_line(" 0x%02X,", pin
- 1);
1519 dsdt_line(" Zero,");
1520 dsdt_line(" 0x%X", ioapic_irq
);
1525 pci_pirq_prt_entry(int bus __unused
, int slot
, int pin
, int pirq_pin
,
1526 int ioapic_irq __unused
, void *arg __unused
)
1530 name
= lpc_pirq_name(pirq_pin
);
1533 dsdt_line(" Package ()");
1535 dsdt_line(" 0x%X,", slot
<< 16 | 0xffff);
1536 dsdt_line(" 0x%02X,", pin
- 1);
1537 dsdt_line(" %s,", name
);
1544 * A bhyve virtual machine has a flat PCI hierarchy with a root port
1545 * corresponding to each PCI bus.
1548 pci_bus_write_dsdt(int bus
)
1551 struct slotinfo
*si
;
1552 struct pci_devinst
*pi
;
1553 int count
, func
, slot
;
1556 * If there are no devices on this 'bus' then just return.
1558 if ((bi
= pci_businfo
[bus
]) == NULL
) {
1560 * Bus 0 is special because it decodes the I/O ports used
1561 * for PCI config space access even if there are no devices
1568 dsdt_line(" Device (PC%02X)", bus
);
1570 dsdt_line(" Name (_HID, EisaId (\"PNP0A03\"))");
1572 dsdt_line(" Method (_BBN, 0, NotSerialized)");
1574 dsdt_line(" Return (0x%08X)", bus
);
1576 dsdt_line(" Name (_CRS, ResourceTemplate ()");
1578 dsdt_line(" WordBusNumber (ResourceProducer, MinFixed, "
1579 "MaxFixed, PosDecode,");
1580 dsdt_line(" 0x0000, // Granularity");
1581 dsdt_line(" 0x%04X, // Range Minimum", bus
);
1582 dsdt_line(" 0x%04X, // Range Maximum", bus
);
1583 dsdt_line(" 0x0000, // Translation Offset");
1584 dsdt_line(" 0x0001, // Length");
1589 dsdt_fixed_ioport(0xCF8, 8);
1592 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, "
1593 "PosDecode, EntireRange,");
1594 dsdt_line(" 0x0000, // Granularity");
1595 dsdt_line(" 0x0000, // Range Minimum");
1596 dsdt_line(" 0x0CF7, // Range Maximum");
1597 dsdt_line(" 0x0000, // Translation Offset");
1598 dsdt_line(" 0x0CF8, // Length");
1599 dsdt_line(" ,, , TypeStatic)");
1601 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, "
1602 "PosDecode, EntireRange,");
1603 dsdt_line(" 0x0000, // Granularity");
1604 dsdt_line(" 0x0D00, // Range Minimum");
1605 dsdt_line(" 0x%04X, // Range Maximum",
1606 PCI_EMUL_IOBASE
- 1);
1607 dsdt_line(" 0x0000, // Translation Offset");
1608 dsdt_line(" 0x%04X, // Length",
1609 PCI_EMUL_IOBASE
- 0x0D00);
1610 dsdt_line(" ,, , TypeStatic)");
1620 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, "
1621 "PosDecode, EntireRange,");
1622 dsdt_line(" 0x0000, // Granularity");
1623 dsdt_line(" 0x%04X, // Range Minimum", bi
->iobase
);
1624 dsdt_line(" 0x%04X, // Range Maximum",
1626 dsdt_line(" 0x0000, // Translation Offset");
1627 dsdt_line(" 0x%04X, // Length",
1628 bi
->iolimit
- bi
->iobase
);
1629 dsdt_line(" ,, , TypeStatic)");
1631 /* mmio window (32-bit) */
1632 dsdt_line(" DWordMemory (ResourceProducer, PosDecode, "
1633 "MinFixed, MaxFixed, NonCacheable, ReadWrite,");
1634 dsdt_line(" 0x00000000, // Granularity");
1635 dsdt_line(" 0x%08X, // Range Minimum\n", bi
->membase32
);
1636 dsdt_line(" 0x%08X, // Range Maximum\n",
1637 bi
->memlimit32
- 1);
1638 dsdt_line(" 0x00000000, // Translation Offset");
1639 dsdt_line(" 0x%08X, // Length\n",
1640 bi
->memlimit32
- bi
->membase32
);
1641 dsdt_line(" ,, , AddressRangeMemory, TypeStatic)");
1643 /* mmio window (64-bit) */
1644 dsdt_line(" QWordMemory (ResourceProducer, PosDecode, "
1645 "MinFixed, MaxFixed, NonCacheable, ReadWrite,");
1646 dsdt_line(" 0x0000000000000000, // Granularity");
1647 dsdt_line(" 0x%016lX, // Range Minimum\n", bi
->membase64
);
1648 dsdt_line(" 0x%016lX, // Range Maximum\n",
1649 bi
->memlimit64
- 1);
1650 dsdt_line(" 0x0000000000000000, // Translation Offset");
1651 dsdt_line(" 0x%016lX, // Length\n",
1652 bi
->memlimit64
- bi
->membase64
);
1653 dsdt_line(" ,, , AddressRangeMemory, TypeStatic)");
1656 count
= pci_count_lintr(bus
);
1659 dsdt_line("Name (PPRT, Package ()");
1661 pci_walk_lintr(bus
, pci_pirq_prt_entry
, NULL
);
1663 dsdt_line("Name (APRT, Package ()");
1665 pci_walk_lintr(bus
, pci_apic_prt_entry
, NULL
);
1667 dsdt_line("Method (_PRT, 0, NotSerialized)");
1669 dsdt_line(" If (PICM)");
1671 dsdt_line(" Return (APRT)");
1675 dsdt_line(" Return (PPRT)");
1682 for (slot
= 0; slot
< MAXSLOTS
; slot
++) {
1683 si
= &bi
->slotinfo
[slot
];
1684 for (func
= 0; func
< MAXFUNCS
; func
++) {
1685 pi
= si
->si_funcs
[func
].fi_devi
;
1686 if (pi
!= NULL
&& pi
->pi_d
->pe_write_dsdt
!= NULL
)
1687 pi
->pi_d
->pe_write_dsdt(pi
);
1696 pci_write_dsdt(void)
1701 dsdt_line("Name (PICM, 0x00)");
1702 dsdt_line("Method (_PIC, 1, NotSerialized)");
1704 dsdt_line(" Store (Arg0, PICM)");
1707 dsdt_line("Scope (_SB)");
1709 for (bus
= 0; bus
< MAXBUSES
; bus
++)
1710 pci_bus_write_dsdt(bus
);
1716 pci_bus_configured(int bus
)
1718 assert(bus
>= 0 && bus
< MAXBUSES
);
1719 return (pci_businfo
[bus
] != NULL
);
1723 pci_msi_enabled(struct pci_devinst
*pi
)
1725 return (pi
->pi_msi
.enabled
);
1729 pci_msi_maxmsgnum(struct pci_devinst
*pi
)
1731 if (pi
->pi_msi
.enabled
)
1732 return (pi
->pi_msi
.maxmsgnum
);
1738 pci_msix_enabled(struct pci_devinst
*pi
)
1741 return (pi
->pi_msix
.enabled
&& !pi
->pi_msi
.enabled
);
1745 pci_generate_msix(struct pci_devinst
*pi
, int index
)
1747 struct msix_table_entry
*mte
;
1749 if (!pci_msix_enabled(pi
))
1752 if (pi
->pi_msix
.function_mask
)
1755 if (index
>= pi
->pi_msix
.table_count
)
1758 mte
= &pi
->pi_msix
.table
[index
];
1759 if ((mte
->vector_control
& PCIM_MSIX_VCTRL_MASK
) == 0) {
1760 /* XXX Set PBA bit if interrupt is disabled */
1761 vm_lapic_msi(pi
->pi_vmctx
, mte
->addr
, mte
->msg_data
);
1766 pci_generate_msi(struct pci_devinst
*pi
, int index
)
1769 if (pci_msi_enabled(pi
) && index
< pci_msi_maxmsgnum(pi
)) {
1770 vm_lapic_msi(pi
->pi_vmctx
, pi
->pi_msi
.addr
,
1771 pi
->pi_msi
.msg_data
+ index
);
1776 pci_lintr_permitted(struct pci_devinst
*pi
)
1780 cmd
= pci_get_cfgdata16(pi
, PCIR_COMMAND
);
1781 return (!(pi
->pi_msi
.enabled
|| pi
->pi_msix
.enabled
||
1782 (cmd
& PCIM_CMD_INTxDIS
)));
1786 pci_lintr_request(struct pci_devinst
*pi
)
1789 struct slotinfo
*si
;
1790 int bestpin
, bestcount
, pin
;
1792 bi
= pci_businfo
[pi
->pi_bus
];
1796 * Just allocate a pin from our slot. The pin will be
1797 * assigned IRQs later when interrupts are routed.
1799 si
= &bi
->slotinfo
[pi
->pi_slot
];
1801 bestcount
= si
->si_intpins
[0].ii_count
;
1802 for (pin
= 1; pin
< 4; pin
++) {
1803 if (si
->si_intpins
[pin
].ii_count
< bestcount
) {
1805 bestcount
= si
->si_intpins
[pin
].ii_count
;
1809 si
->si_intpins
[bestpin
].ii_count
++;
1810 pi
->pi_lintr
.pin
= bestpin
+ 1;
1811 pci_set_cfgdata8(pi
, PCIR_INTPIN
, bestpin
+ 1);
1815 pci_lintr_route(struct pci_devinst
*pi
)
1818 struct intxinfo
*ii
;
1820 if (pi
->pi_lintr
.pin
== 0)
1823 bi
= pci_businfo
[pi
->pi_bus
];
1825 ii
= &bi
->slotinfo
[pi
->pi_slot
].si_intpins
[pi
->pi_lintr
.pin
- 1];
1828 * Attempt to allocate an I/O APIC pin for this intpin if one
1829 * is not yet assigned.
1831 if (ii
->ii_ioapic_irq
== 0)
1832 ii
->ii_ioapic_irq
= ioapic_pci_alloc_irq(pi
);
1833 assert(ii
->ii_ioapic_irq
> 0);
1836 * Attempt to allocate a PIRQ pin for this intpin if one is
1839 if (ii
->ii_pirq_pin
== 0)
1840 ii
->ii_pirq_pin
= pirq_alloc_pin(pi
);
1841 assert(ii
->ii_pirq_pin
> 0);
1843 pi
->pi_lintr
.ioapic_irq
= ii
->ii_ioapic_irq
;
1844 pi
->pi_lintr
.pirq_pin
= ii
->ii_pirq_pin
;
1845 pci_set_cfgdata8(pi
, PCIR_INTLINE
, pirq_irq(ii
->ii_pirq_pin
));
1849 pci_lintr_assert(struct pci_devinst
*pi
)
1852 assert(pi
->pi_lintr
.pin
> 0);
1854 pthread_mutex_lock(&pi
->pi_lintr
.lock
);
1855 if (pi
->pi_lintr
.state
== IDLE
) {
1856 if (pci_lintr_permitted(pi
)) {
1857 pi
->pi_lintr
.state
= ASSERTED
;
1860 pi
->pi_lintr
.state
= PENDING
;
1862 pthread_mutex_unlock(&pi
->pi_lintr
.lock
);
1866 pci_lintr_deassert(struct pci_devinst
*pi
)
1869 assert(pi
->pi_lintr
.pin
> 0);
1871 pthread_mutex_lock(&pi
->pi_lintr
.lock
);
1872 if (pi
->pi_lintr
.state
== ASSERTED
) {
1873 pi
->pi_lintr
.state
= IDLE
;
1874 pci_irq_deassert(pi
);
1875 } else if (pi
->pi_lintr
.state
== PENDING
)
1876 pi
->pi_lintr
.state
= IDLE
;
1877 pthread_mutex_unlock(&pi
->pi_lintr
.lock
);
1881 pci_lintr_update(struct pci_devinst
*pi
)
1884 pthread_mutex_lock(&pi
->pi_lintr
.lock
);
1885 if (pi
->pi_lintr
.state
== ASSERTED
&& !pci_lintr_permitted(pi
)) {
1886 pci_irq_deassert(pi
);
1887 pi
->pi_lintr
.state
= PENDING
;
1888 } else if (pi
->pi_lintr
.state
== PENDING
&& pci_lintr_permitted(pi
)) {
1889 pi
->pi_lintr
.state
= ASSERTED
;
1892 pthread_mutex_unlock(&pi
->pi_lintr
.lock
);
1894 if (pi
->pi_d
->pe_lintrupdate
!= NULL
) {
1895 pi
->pi_d
->pe_lintrupdate(pi
);
1897 #endif /* __FreeBSD__ */
1901 pci_count_lintr(int bus
)
1903 int count
, slot
, pin
;
1904 struct slotinfo
*slotinfo
;
1907 if (pci_businfo
[bus
] != NULL
) {
1908 for (slot
= 0; slot
< MAXSLOTS
; slot
++) {
1909 slotinfo
= &pci_businfo
[bus
]->slotinfo
[slot
];
1910 for (pin
= 0; pin
< 4; pin
++) {
1911 if (slotinfo
->si_intpins
[pin
].ii_count
!= 0)
1920 pci_walk_lintr(int bus
, pci_lintr_cb cb
, void *arg
)
1923 struct slotinfo
*si
;
1924 struct intxinfo
*ii
;
1927 if ((bi
= pci_businfo
[bus
]) == NULL
)
1930 for (slot
= 0; slot
< MAXSLOTS
; slot
++) {
1931 si
= &bi
->slotinfo
[slot
];
1932 for (pin
= 0; pin
< 4; pin
++) {
1933 ii
= &si
->si_intpins
[pin
];
1934 if (ii
->ii_count
!= 0)
1935 cb(bus
, slot
, pin
+ 1, ii
->ii_pirq_pin
,
1936 ii
->ii_ioapic_irq
, arg
);
1942 * Return 1 if the emulated device in 'slot' is a multi-function device.
1943 * Return 0 otherwise.
1946 pci_emul_is_mfdev(int bus
, int slot
)
1949 struct slotinfo
*si
;
1953 if ((bi
= pci_businfo
[bus
]) != NULL
) {
1954 si
= &bi
->slotinfo
[slot
];
1955 for (f
= 0; f
< MAXFUNCS
; f
++) {
1956 if (si
->si_funcs
[f
].fi_devi
!= NULL
) {
1961 return (numfuncs
> 1);
1965 * Ensure that the PCIM_MFDEV bit is properly set (or unset) depending on
1966 * whether or not is a multi-function being emulated in the pci 'slot'.
1969 pci_emul_hdrtype_fixup(int bus
, int slot
, int off
, int bytes
, uint32_t *rv
)
1973 if (off
<= PCIR_HDRTYPE
&& off
+ bytes
> PCIR_HDRTYPE
) {
1974 mfdev
= pci_emul_is_mfdev(bus
, slot
);
1984 *rv
&= ~(PCIM_MFDEV
<< 16);
1986 *rv
|= (PCIM_MFDEV
<< 16);
1994 * Update device state in response to changes to the PCI command
1998 pci_emul_cmd_changed(struct pci_devinst
*pi
, uint16_t old
)
2001 uint16_t changed
, new;
2003 new = pci_get_cfgdata16(pi
, PCIR_COMMAND
);
2004 changed
= old
^ new;
2007 * If the MMIO or I/O address space decoding has changed then
2008 * register/unregister all BARs that decode that address space.
2010 for (i
= 0; i
<= PCI_BARMAX_WITH_ROM
; i
++) {
2011 switch (pi
->pi_bar
[i
].type
) {
2013 case PCIBAR_MEMHI64
:
2016 /* I/O address space decoding changed? */
2017 if (changed
& PCIM_CMD_PORTEN
) {
2018 if (new & PCIM_CMD_PORTEN
)
2019 register_bar(pi
, i
);
2021 unregister_bar(pi
, i
);
2025 /* skip (un-)register of ROM if it disabled */
2031 /* MMIO address space decoding changed? */
2032 if (changed
& PCIM_CMD_MEMEN
) {
2033 if (new & PCIM_CMD_MEMEN
)
2034 register_bar(pi
, i
);
2036 unregister_bar(pi
, i
);
2045 * If INTx has been unmasked and is pending, assert the
2048 pci_lintr_update(pi
);
2052 pci_emul_cmdsts_write(struct pci_devinst
*pi
, int coff
, uint32_t new, int bytes
)
2055 uint32_t cmd
, old
, readonly
;
2057 cmd
= pci_get_cfgdata16(pi
, PCIR_COMMAND
); /* stash old value */
2060 * From PCI Local Bus Specification 3.0 sections 6.2.2 and 6.2.3.
2062 * XXX Bits 8, 11, 12, 13, 14 and 15 in the status register are
2063 * 'write 1 to clear'. However these bits are not set to '1' by
2064 * any device emulation so it is simpler to treat them as readonly.
2066 rshift
= (coff
& 0x3) * 8;
2067 readonly
= 0xFFFFF880 >> rshift
;
2069 old
= CFGREAD(pi
, coff
, bytes
);
2071 new |= (old
& readonly
);
2072 CFGWRITE(pi
, coff
, new, bytes
); /* update config */
2074 pci_emul_cmd_changed(pi
, cmd
);
2078 pci_cfgrw(struct vmctx
*ctx
, int in
, int bus
, int slot
, int func
,
2079 int coff
, int bytes
, uint32_t *eax
)
2082 struct slotinfo
*si
;
2083 struct pci_devinst
*pi
;
2084 struct pci_devemu
*pe
;
2086 uint64_t addr
, mask
;
2089 if ((bi
= pci_businfo
[bus
]) != NULL
) {
2090 si
= &bi
->slotinfo
[slot
];
2091 pi
= si
->si_funcs
[func
].fi_devi
;
2096 * Just return if there is no device at this slot:func or if the
2097 * the guest is doing an un-aligned access.
2099 if (pi
== NULL
|| (bytes
!= 1 && bytes
!= 2 && bytes
!= 4) ||
2100 (coff
& (bytes
- 1)) != 0) {
2107 * Ignore all writes beyond the standard config space and return all
2110 if (coff
>= PCI_REGMAX
+ 1) {
2114 * Extended capabilities begin at offset 256 in config
2115 * space. Absence of extended capabilities is signaled
2116 * with all 0s in the extended capability header at
2119 if (coff
<= PCI_REGMAX
+ 4)
2131 /* Let the device emulation override the default handler */
2132 if (pe
->pe_cfgread
!= NULL
) {
2133 needcfg
= pe
->pe_cfgread(ctx
, pi
, coff
, bytes
, eax
);
2139 *eax
= CFGREAD(pi
, coff
, bytes
);
2141 pci_emul_hdrtype_fixup(bus
, slot
, coff
, bytes
, eax
);
2143 /* Let the device emulation override the default handler */
2144 if (pe
->pe_cfgwrite
!= NULL
&&
2145 (*pe
->pe_cfgwrite
)(ctx
, pi
, coff
, bytes
, *eax
) == 0)
2149 * Special handling for write to BAR and ROM registers
2151 if (is_pcir_bar(coff
) || is_pcir_bios(coff
)) {
2153 * Ignore writes to BAR registers that are not
2156 if (bytes
!= 4 || (coff
& 0x3) != 0)
2159 if (is_pcir_bar(coff
)) {
2160 idx
= (coff
- PCIR_BAR(0)) / 4;
2161 } else if (is_pcir_bios(coff
)) {
2164 errx(4, "%s: invalid BAR offset %d", __func__
,
2168 mask
= ~(pi
->pi_bar
[idx
].size
- 1);
2169 switch (pi
->pi_bar
[idx
].type
) {
2171 pi
->pi_bar
[idx
].addr
= bar
= 0;
2176 bar
= addr
| pi
->pi_bar
[idx
].lobits
;
2178 * Register the new BAR value for interception
2180 if (addr
!= pi
->pi_bar
[idx
].addr
) {
2181 update_bar_address(pi
, addr
, idx
,
2186 addr
= bar
= *eax
& mask
;
2187 bar
|= pi
->pi_bar
[idx
].lobits
;
2188 if (addr
!= pi
->pi_bar
[idx
].addr
) {
2189 update_bar_address(pi
, addr
, idx
,
2194 addr
= bar
= *eax
& mask
;
2195 bar
|= pi
->pi_bar
[idx
].lobits
;
2196 if (addr
!= (uint32_t)pi
->pi_bar
[idx
].addr
) {
2197 update_bar_address(pi
, addr
, idx
,
2201 case PCIBAR_MEMHI64
:
2202 mask
= ~(pi
->pi_bar
[idx
- 1].size
- 1);
2203 addr
= ((uint64_t)*eax
<< 32) & mask
;
2205 if (bar
!= pi
->pi_bar
[idx
- 1].addr
>> 32) {
2206 update_bar_address(pi
, addr
, idx
- 1,
2211 addr
= bar
= *eax
& mask
;
2212 if (memen(pi
) && romen(pi
)) {
2213 unregister_bar(pi
, idx
);
2215 pi
->pi_bar
[idx
].addr
= addr
;
2216 pi
->pi_bar
[idx
].lobits
= *eax
&
2218 /* romen could have changed it value */
2219 if (memen(pi
) && romen(pi
)) {
2220 register_bar(pi
, idx
);
2222 bar
|= pi
->pi_bar
[idx
].lobits
;
2227 pci_set_cfgdata32(pi
, coff
, bar
);
2229 } else if (pci_emul_iscap(pi
, coff
)) {
2230 pci_emul_capwrite(pi
, coff
, bytes
, *eax
, 0, 0);
2231 } else if (coff
>= PCIR_COMMAND
&& coff
< PCIR_REVID
) {
2232 pci_emul_cmdsts_write(pi
, coff
, *eax
, bytes
);
2234 CFGWRITE(pi
, coff
, *eax
, bytes
);
2239 static int cfgenable
, cfgbus
, cfgslot
, cfgfunc
, cfgoff
;
2242 pci_emul_cfgaddr(struct vmctx
*ctx __unused
, int in
,
2243 int port __unused
, int bytes
, uint32_t *eax
, void *arg __unused
)
2249 *eax
= (bytes
== 2) ? 0xffff : 0xff;
2254 x
= (cfgbus
<< 16) | (cfgslot
<< 11) | (cfgfunc
<< 8) | cfgoff
;
2260 cfgenable
= (x
& CONF1_ENABLE
) == CONF1_ENABLE
;
2261 cfgoff
= (x
& PCI_REGMAX
) & ~0x03;
2262 cfgfunc
= (x
>> 8) & PCI_FUNCMAX
;
2263 cfgslot
= (x
>> 11) & PCI_SLOTMAX
;
2264 cfgbus
= (x
>> 16) & PCI_BUSMAX
;
2269 INOUT_PORT(pci_cfgaddr
, CONF1_ADDR_PORT
, IOPORT_F_INOUT
, pci_emul_cfgaddr
);
2272 pci_emul_cfgdata(struct vmctx
*ctx
, int in
, int port
,
2273 int bytes
, uint32_t *eax
, void *arg __unused
)
2277 assert(bytes
== 1 || bytes
== 2 || bytes
== 4);
2279 coff
= cfgoff
+ (port
- CONF1_DATA_PORT
);
2281 pci_cfgrw(ctx
, in
, cfgbus
, cfgslot
, cfgfunc
, coff
, bytes
,
2284 /* Ignore accesses to cfgdata if not enabled by cfgaddr */
2291 INOUT_PORT(pci_cfgdata
, CONF1_DATA_PORT
+0, IOPORT_F_INOUT
, pci_emul_cfgdata
);
2292 INOUT_PORT(pci_cfgdata
, CONF1_DATA_PORT
+1, IOPORT_F_INOUT
, pci_emul_cfgdata
);
2293 INOUT_PORT(pci_cfgdata
, CONF1_DATA_PORT
+2, IOPORT_F_INOUT
, pci_emul_cfgdata
);
2294 INOUT_PORT(pci_cfgdata
, CONF1_DATA_PORT
+3, IOPORT_F_INOUT
, pci_emul_cfgdata
);
2296 #define PCI_EMUL_TEST
2297 #ifdef PCI_EMUL_TEST
2299 * Define a dummy test device
2303 struct pci_emul_dsoftc
{
2304 uint8_t ioregs
[DIOSZ
];
2305 uint8_t memregs
[2][DMEMSZ
];
2308 #define PCI_EMUL_MSI_MSGS 4
2309 #define PCI_EMUL_MSIX_MSGS 16
2312 pci_emul_dinit(struct vmctx
*ctx __unused
, struct pci_devinst
*pi
,
2313 nvlist_t
*nvl __unused
)
2316 struct pci_emul_dsoftc
*sc
;
2318 sc
= calloc(1, sizeof(struct pci_emul_dsoftc
));
2322 pci_set_cfgdata16(pi
, PCIR_DEVICE
, 0x0001);
2323 pci_set_cfgdata16(pi
, PCIR_VENDOR
, 0x10DD);
2324 pci_set_cfgdata8(pi
, PCIR_CLASS
, 0x02);
2326 error
= pci_emul_add_msicap(pi
, PCI_EMUL_MSI_MSGS
);
2329 error
= pci_emul_alloc_bar(pi
, 0, PCIBAR_IO
, DIOSZ
);
2332 error
= pci_emul_alloc_bar(pi
, 1, PCIBAR_MEM32
, DMEMSZ
);
2335 error
= pci_emul_alloc_bar(pi
, 2, PCIBAR_MEM32
, DMEMSZ
);
2342 pci_emul_diow(struct vmctx
*ctx __unused
,
2343 struct pci_devinst
*pi
, int baridx
, uint64_t offset
, int size
,
2347 struct pci_emul_dsoftc
*sc
= pi
->pi_arg
;
2350 if (offset
+ size
> DIOSZ
) {
2351 printf("diow: iow too large, offset %ld size %d\n",
2357 sc
->ioregs
[offset
] = value
& 0xff;
2358 } else if (size
== 2) {
2359 *(uint16_t *)&sc
->ioregs
[offset
] = value
& 0xffff;
2360 } else if (size
== 4) {
2361 *(uint32_t *)&sc
->ioregs
[offset
] = value
;
2363 printf("diow: iow unknown size %d\n", size
);
2367 * Special magic value to generate an interrupt
2369 if (offset
== 4 && size
== 4 && pci_msi_enabled(pi
))
2370 pci_generate_msi(pi
, value
% pci_msi_maxmsgnum(pi
));
2372 if (value
== 0xabcdef) {
2373 for (i
= 0; i
< pci_msi_maxmsgnum(pi
); i
++)
2374 pci_generate_msi(pi
, i
);
2378 if (baridx
== 1 || baridx
== 2) {
2379 if (offset
+ size
> DMEMSZ
) {
2380 printf("diow: memw too large, offset %ld size %d\n",
2385 i
= baridx
- 1; /* 'memregs' index */
2388 sc
->memregs
[i
][offset
] = value
;
2389 } else if (size
== 2) {
2390 *(uint16_t *)&sc
->memregs
[i
][offset
] = value
;
2391 } else if (size
== 4) {
2392 *(uint32_t *)&sc
->memregs
[i
][offset
] = value
;
2393 } else if (size
== 8) {
2394 *(uint64_t *)&sc
->memregs
[i
][offset
] = value
;
2396 printf("diow: memw unknown size %d\n", size
);
2400 * magic interrupt ??
2404 if (baridx
> 2 || baridx
< 0) {
2405 printf("diow: unknown bar idx %d\n", baridx
);
2410 pci_emul_dior(struct vmctx
*ctx __unused
,
2411 struct pci_devinst
*pi
, int baridx
, uint64_t offset
, int size
)
2413 struct pci_emul_dsoftc
*sc
= pi
->pi_arg
;
2419 if (offset
+ size
> DIOSZ
) {
2420 printf("dior: ior too large, offset %ld size %d\n",
2427 value
= sc
->ioregs
[offset
];
2428 } else if (size
== 2) {
2429 value
= *(uint16_t *) &sc
->ioregs
[offset
];
2430 } else if (size
== 4) {
2431 value
= *(uint32_t *) &sc
->ioregs
[offset
];
2433 printf("dior: ior unknown size %d\n", size
);
2437 if (baridx
== 1 || baridx
== 2) {
2438 if (offset
+ size
> DMEMSZ
) {
2439 printf("dior: memr too large, offset %ld size %d\n",
2444 i
= baridx
- 1; /* 'memregs' index */
2447 value
= sc
->memregs
[i
][offset
];
2448 } else if (size
== 2) {
2449 value
= *(uint16_t *) &sc
->memregs
[i
][offset
];
2450 } else if (size
== 4) {
2451 value
= *(uint32_t *) &sc
->memregs
[i
][offset
];
2452 } else if (size
== 8) {
2453 value
= *(uint64_t *) &sc
->memregs
[i
][offset
];
2455 printf("dior: ior unknown size %d\n", size
);
2460 if (baridx
> 2 || baridx
< 0) {
2461 printf("dior: unknown bar idx %d\n", baridx
);
2468 static const struct pci_devemu pci_dummy
= {
2470 .pe_init
= pci_emul_dinit
,
2471 .pe_barwrite
= pci_emul_diow
,
2472 .pe_barread
= pci_emul_dior
,
2474 PCI_EMUL_SET(pci_dummy
);
2476 #endif /* PCI_EMUL_TEST */