2 * QEMU MCH/ICH9 PCI Bridge Emulation
4 * Copyright (c) 2006 Fabrice Bellard
5 * Copyright (c) 2009, 2010, 2011
6 * Isaku Yamahata <yamahata at valinux co jp>
7 * VA Linux Systems Japan K.K.
8 * Copyright (C) 2012 Jason Baron <jbaron@redhat.com>
10 * This is based on piix.c, but heavily modified.
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this software and associated documentation files (the "Software"), to deal
14 * in the Software without restriction, including without limitation the rights
15 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 * copies of the Software, and to permit persons to whom the Software is
17 * furnished to do so, subject to the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 #include "hw/pci-host/q35.h"
32 #include "qapi/visitor.h"
34 /****************************************************************************
38 static void q35_host_realize(DeviceState
*dev
, Error
**errp
)
40 PCIHostState
*pci
= PCI_HOST_BRIDGE(dev
);
41 Q35PCIHost
*s
= Q35_HOST_DEVICE(dev
);
42 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
44 sysbus_add_io(sbd
, MCH_HOST_BRIDGE_CONFIG_ADDR
, &pci
->conf_mem
);
45 sysbus_init_ioports(sbd
, MCH_HOST_BRIDGE_CONFIG_ADDR
, 4);
47 sysbus_add_io(sbd
, MCH_HOST_BRIDGE_CONFIG_DATA
, &pci
->data_mem
);
48 sysbus_init_ioports(sbd
, MCH_HOST_BRIDGE_CONFIG_DATA
, 4);
50 pci
->bus
= pci_bus_new(DEVICE(s
), "pcie.0",
51 s
->mch
.pci_address_space
, s
->mch
.address_space_io
,
53 qdev_set_parent_bus(DEVICE(&s
->mch
), BUS(pci
->bus
));
54 qdev_init_nofail(DEVICE(&s
->mch
));
57 static const char *q35_host_root_bus_path(PCIHostState
*host_bridge
,
60 Q35PCIHost
*s
= Q35_HOST_DEVICE(host_bridge
);
62 /* For backwards compat with old device paths */
63 if (s
->mch
.short_root_bus
) {
69 static void q35_host_get_pci_hole_start(Object
*obj
, Visitor
*v
,
70 void *opaque
, const char *name
,
73 Q35PCIHost
*s
= Q35_HOST_DEVICE(obj
);
74 uint32_t value
= s
->mch
.pci_info
.w32
.begin
;
76 visit_type_uint32(v
, &value
, name
, errp
);
79 static void q35_host_get_pci_hole_end(Object
*obj
, Visitor
*v
,
80 void *opaque
, const char *name
,
83 Q35PCIHost
*s
= Q35_HOST_DEVICE(obj
);
84 uint32_t value
= s
->mch
.pci_info
.w32
.end
;
86 visit_type_uint32(v
, &value
, name
, errp
);
89 static void q35_host_get_pci_hole64_start(Object
*obj
, Visitor
*v
,
90 void *opaque
, const char *name
,
93 PCIHostState
*h
= PCI_HOST_BRIDGE(obj
);
96 pci_bus_get_w64_range(h
->bus
, &w64
);
98 visit_type_uint64(v
, &w64
.begin
, name
, errp
);
101 static void q35_host_get_pci_hole64_end(Object
*obj
, Visitor
*v
,
102 void *opaque
, const char *name
,
105 PCIHostState
*h
= PCI_HOST_BRIDGE(obj
);
108 pci_bus_get_w64_range(h
->bus
, &w64
);
110 visit_type_uint64(v
, &w64
.end
, name
, errp
);
113 static void q35_host_get_mmcfg_size(Object
*obj
, Visitor
*v
,
114 void *opaque
, const char *name
,
117 PCIExpressHost
*e
= PCIE_HOST_BRIDGE(obj
);
118 uint32_t value
= e
->size
;
120 visit_type_uint32(v
, &value
, name
, errp
);
123 static Property mch_props
[] = {
124 DEFINE_PROP_UINT64(PCIE_HOST_MCFG_BASE
, Q35PCIHost
, parent_obj
.base_addr
,
125 MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT
),
126 DEFINE_PROP_SIZE(PCI_HOST_PROP_PCI_HOLE64_SIZE
, Q35PCIHost
,
127 mch
.pci_hole64_size
, DEFAULT_PCI_HOLE64_SIZE
),
128 DEFINE_PROP_UINT32("short_root_bus", Q35PCIHost
, mch
.short_root_bus
, 0),
129 DEFINE_PROP_END_OF_LIST(),
132 static void q35_host_class_init(ObjectClass
*klass
, void *data
)
134 DeviceClass
*dc
= DEVICE_CLASS(klass
);
135 PCIHostBridgeClass
*hc
= PCI_HOST_BRIDGE_CLASS(klass
);
137 hc
->root_bus_path
= q35_host_root_bus_path
;
138 dc
->realize
= q35_host_realize
;
139 dc
->props
= mch_props
;
140 set_bit(DEVICE_CATEGORY_BRIDGE
, dc
->categories
);
144 static void q35_host_initfn(Object
*obj
)
146 Q35PCIHost
*s
= Q35_HOST_DEVICE(obj
);
147 PCIHostState
*phb
= PCI_HOST_BRIDGE(obj
);
149 memory_region_init_io(&phb
->conf_mem
, obj
, &pci_host_conf_le_ops
, phb
,
151 memory_region_init_io(&phb
->data_mem
, obj
, &pci_host_data_le_ops
, phb
,
154 object_initialize(&s
->mch
, sizeof(s
->mch
), TYPE_MCH_PCI_DEVICE
);
155 object_property_add_child(OBJECT(s
), "mch", OBJECT(&s
->mch
), NULL
);
156 qdev_prop_set_uint32(DEVICE(&s
->mch
), "addr", PCI_DEVFN(0, 0));
157 qdev_prop_set_bit(DEVICE(&s
->mch
), "multifunction", false);
159 object_property_add(obj
, PCI_HOST_PROP_PCI_HOLE_START
, "int",
160 q35_host_get_pci_hole_start
,
161 NULL
, NULL
, NULL
, NULL
);
163 object_property_add(obj
, PCI_HOST_PROP_PCI_HOLE_END
, "int",
164 q35_host_get_pci_hole_end
,
165 NULL
, NULL
, NULL
, NULL
);
167 object_property_add(obj
, PCI_HOST_PROP_PCI_HOLE64_START
, "int",
168 q35_host_get_pci_hole64_start
,
169 NULL
, NULL
, NULL
, NULL
);
171 object_property_add(obj
, PCI_HOST_PROP_PCI_HOLE64_END
, "int",
172 q35_host_get_pci_hole64_end
,
173 NULL
, NULL
, NULL
, NULL
);
175 object_property_add(obj
, PCIE_HOST_MCFG_SIZE
, "int",
176 q35_host_get_mmcfg_size
,
177 NULL
, NULL
, NULL
, NULL
);
179 /* Leave enough space for the biggest MCFG BAR */
180 /* TODO: this matches current bios behaviour, but
181 * it's not a power of two, which means an MTRR
182 * can't cover it exactly.
184 s
->mch
.pci_info
.w32
.begin
= MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT
+
185 MCH_HOST_BRIDGE_PCIEXBAR_MAX
;
186 s
->mch
.pci_info
.w32
.end
= IO_APIC_DEFAULT_ADDRESS
;
189 static const TypeInfo q35_host_info
= {
190 .name
= TYPE_Q35_HOST_DEVICE
,
191 .parent
= TYPE_PCIE_HOST_BRIDGE
,
192 .instance_size
= sizeof(Q35PCIHost
),
193 .instance_init
= q35_host_initfn
,
194 .class_init
= q35_host_class_init
,
197 /****************************************************************************
201 static uint64_t tseg_blackhole_read(void *ptr
, hwaddr reg
, unsigned size
)
206 static void tseg_blackhole_write(void *opaque
, hwaddr addr
, uint64_t val
,
212 static const MemoryRegionOps tseg_blackhole_ops
= {
213 .read
= tseg_blackhole_read
,
214 .write
= tseg_blackhole_write
,
215 .endianness
= DEVICE_NATIVE_ENDIAN
,
216 .valid
.min_access_size
= 1,
217 .valid
.max_access_size
= 4,
218 .impl
.min_access_size
= 4,
219 .impl
.max_access_size
= 4,
220 .endianness
= DEVICE_LITTLE_ENDIAN
,
224 static void mch_update_pciexbar(MCHPCIState
*mch
)
226 PCIDevice
*pci_dev
= PCI_DEVICE(mch
);
227 BusState
*bus
= qdev_get_parent_bus(DEVICE(mch
));
228 PCIExpressHost
*pehb
= PCIE_HOST_BRIDGE(bus
->parent
);
236 pciexbar
= pci_get_quad(pci_dev
->config
+ MCH_HOST_BRIDGE_PCIEXBAR
);
237 enable
= pciexbar
& MCH_HOST_BRIDGE_PCIEXBAREN
;
238 addr_mask
= MCH_HOST_BRIDGE_PCIEXBAR_ADMSK
;
239 switch (pciexbar
& MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_MASK
) {
240 case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_256M
:
241 length
= 256 * 1024 * 1024;
243 case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_128M
:
244 length
= 128 * 1024 * 1024;
245 addr_mask
|= MCH_HOST_BRIDGE_PCIEXBAR_128ADMSK
|
246 MCH_HOST_BRIDGE_PCIEXBAR_64ADMSK
;
248 case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_64M
:
249 length
= 64 * 1024 * 1024;
250 addr_mask
|= MCH_HOST_BRIDGE_PCIEXBAR_64ADMSK
;
252 case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_RVD
:
259 addr
= pciexbar
& addr_mask
;
260 pcie_host_mmcfg_update(pehb
, enable
, addr
, length
);
261 /* Leave enough space for the MCFG BAR */
263 * TODO: this matches current bios behaviour, but it's not a power of two,
264 * which means an MTRR can't cover it exactly.
267 mch
->pci_info
.w32
.begin
= addr
+ length
;
269 mch
->pci_info
.w32
.begin
= MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT
;
274 static void mch_update_pam(MCHPCIState
*mch
)
276 PCIDevice
*pd
= PCI_DEVICE(mch
);
279 memory_region_transaction_begin();
280 for (i
= 0; i
< 13; i
++) {
281 pam_update(&mch
->pam_regions
[i
], i
,
282 pd
->config
[MCH_HOST_BRIDGE_PAM0
+ ((i
+ 1) / 2)]);
284 memory_region_transaction_commit();
288 static void mch_update_smram(MCHPCIState
*mch
)
290 PCIDevice
*pd
= PCI_DEVICE(mch
);
291 bool h_smrame
= (pd
->config
[MCH_HOST_BRIDGE_ESMRAMC
] & MCH_HOST_BRIDGE_ESMRAMC_H_SMRAME
);
294 /* implement SMRAM.D_LCK */
295 if (pd
->config
[MCH_HOST_BRIDGE_SMRAM
] & MCH_HOST_BRIDGE_SMRAM_D_LCK
) {
296 pd
->config
[MCH_HOST_BRIDGE_SMRAM
] &= ~MCH_HOST_BRIDGE_SMRAM_D_OPEN
;
297 pd
->wmask
[MCH_HOST_BRIDGE_SMRAM
] = MCH_HOST_BRIDGE_SMRAM_WMASK_LCK
;
298 pd
->wmask
[MCH_HOST_BRIDGE_ESMRAMC
] = MCH_HOST_BRIDGE_ESMRAMC_WMASK_LCK
;
301 memory_region_transaction_begin();
303 if (pd
->config
[MCH_HOST_BRIDGE_SMRAM
] & SMRAM_D_OPEN
) {
304 /* Hide (!) low SMRAM if H_SMRAME = 1 */
305 memory_region_set_enabled(&mch
->smram_region
, h_smrame
);
306 /* Show high SMRAM if H_SMRAME = 1 */
307 memory_region_set_enabled(&mch
->open_high_smram
, h_smrame
);
309 /* Hide high SMRAM and low SMRAM */
310 memory_region_set_enabled(&mch
->smram_region
, true);
311 memory_region_set_enabled(&mch
->open_high_smram
, false);
314 if (pd
->config
[MCH_HOST_BRIDGE_SMRAM
] & SMRAM_G_SMRAME
) {
315 memory_region_set_enabled(&mch
->low_smram
, !h_smrame
);
316 memory_region_set_enabled(&mch
->high_smram
, h_smrame
);
318 memory_region_set_enabled(&mch
->low_smram
, false);
319 memory_region_set_enabled(&mch
->high_smram
, false);
322 if (pd
->config
[MCH_HOST_BRIDGE_ESMRAMC
] & MCH_HOST_BRIDGE_ESMRAMC_T_EN
) {
323 switch (pd
->config
[MCH_HOST_BRIDGE_ESMRAMC
] &
324 MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_MASK
) {
325 case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_1MB
:
326 tseg_size
= 1024 * 1024;
328 case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_2MB
:
329 tseg_size
= 1024 * 1024 * 2;
331 case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_8MB
:
332 tseg_size
= 1024 * 1024 * 8;
341 memory_region_del_subregion(mch
->system_memory
, &mch
->tseg_blackhole
);
342 memory_region_set_enabled(&mch
->tseg_blackhole
, tseg_size
);
343 memory_region_set_size(&mch
->tseg_blackhole
, tseg_size
);
344 memory_region_add_subregion_overlap(mch
->system_memory
,
345 mch
->below_4g_mem_size
- tseg_size
,
346 &mch
->tseg_blackhole
, 1);
348 memory_region_set_enabled(&mch
->tseg_window
, tseg_size
);
349 memory_region_set_size(&mch
->tseg_window
, tseg_size
);
350 memory_region_set_address(&mch
->tseg_window
,
351 mch
->below_4g_mem_size
- tseg_size
);
352 memory_region_set_alias_offset(&mch
->tseg_window
,
353 mch
->below_4g_mem_size
- tseg_size
);
355 memory_region_transaction_commit();
358 static void mch_write_config(PCIDevice
*d
,
359 uint32_t address
, uint32_t val
, int len
)
361 MCHPCIState
*mch
= MCH_PCI_DEVICE(d
);
363 pci_default_write_config(d
, address
, val
, len
);
365 if (ranges_overlap(address
, len
, MCH_HOST_BRIDGE_PAM0
,
366 MCH_HOST_BRIDGE_PAM_SIZE
)) {
370 if (ranges_overlap(address
, len
, MCH_HOST_BRIDGE_PCIEXBAR
,
371 MCH_HOST_BRIDGE_PCIEXBAR_SIZE
)) {
372 mch_update_pciexbar(mch
);
375 if (ranges_overlap(address
, len
, MCH_HOST_BRIDGE_SMRAM
,
376 MCH_HOST_BRIDGE_SMRAM_SIZE
)) {
377 mch_update_smram(mch
);
381 static void mch_update(MCHPCIState
*mch
)
383 mch_update_pciexbar(mch
);
385 mch_update_smram(mch
);
388 static int mch_post_load(void *opaque
, int version_id
)
390 MCHPCIState
*mch
= opaque
;
395 static const VMStateDescription vmstate_mch
= {
398 .minimum_version_id
= 1,
399 .post_load
= mch_post_load
,
400 .fields
= (VMStateField
[]) {
401 VMSTATE_PCI_DEVICE(parent_obj
, MCHPCIState
),
402 /* Used to be smm_enabled, which was basically always zero because
403 * SeaBIOS hardly uses SMM. SMRAM is now handled by CPU code.
406 VMSTATE_END_OF_LIST()
410 static void mch_reset(DeviceState
*qdev
)
412 PCIDevice
*d
= PCI_DEVICE(qdev
);
413 MCHPCIState
*mch
= MCH_PCI_DEVICE(d
);
415 pci_set_quad(d
->config
+ MCH_HOST_BRIDGE_PCIEXBAR
,
416 MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT
);
418 d
->config
[MCH_HOST_BRIDGE_SMRAM
] = MCH_HOST_BRIDGE_SMRAM_DEFAULT
;
419 d
->config
[MCH_HOST_BRIDGE_ESMRAMC
] = MCH_HOST_BRIDGE_ESMRAMC_DEFAULT
;
420 d
->wmask
[MCH_HOST_BRIDGE_SMRAM
] = MCH_HOST_BRIDGE_SMRAM_WMASK
;
421 d
->wmask
[MCH_HOST_BRIDGE_ESMRAMC
] = MCH_HOST_BRIDGE_ESMRAMC_WMASK
;
426 static AddressSpace
*q35_host_dma_iommu(PCIBus
*bus
, void *opaque
, int devfn
)
428 IntelIOMMUState
*s
= opaque
;
429 VTDAddressSpace
**pvtd_as
;
430 int bus_num
= pci_bus_num(bus
);
432 assert(0 <= bus_num
&& bus_num
<= VTD_PCI_BUS_MAX
);
433 assert(0 <= devfn
&& devfn
<= VTD_PCI_DEVFN_MAX
);
435 pvtd_as
= s
->address_spaces
[bus_num
];
437 /* No corresponding free() */
438 pvtd_as
= g_malloc0(sizeof(VTDAddressSpace
*) * VTD_PCI_DEVFN_MAX
);
439 s
->address_spaces
[bus_num
] = pvtd_as
;
441 if (!pvtd_as
[devfn
]) {
442 pvtd_as
[devfn
] = g_malloc0(sizeof(VTDAddressSpace
));
444 pvtd_as
[devfn
]->bus_num
= (uint8_t)bus_num
;
445 pvtd_as
[devfn
]->devfn
= (uint8_t)devfn
;
446 pvtd_as
[devfn
]->iommu_state
= s
;
447 pvtd_as
[devfn
]->context_cache_entry
.context_cache_gen
= 0;
448 memory_region_init_iommu(&pvtd_as
[devfn
]->iommu
, OBJECT(s
),
449 &s
->iommu_ops
, "intel_iommu", UINT64_MAX
);
450 address_space_init(&pvtd_as
[devfn
]->as
,
451 &pvtd_as
[devfn
]->iommu
, "intel_iommu");
453 return &pvtd_as
[devfn
]->as
;
456 static void mch_init_dmar(MCHPCIState
*mch
)
458 PCIBus
*pci_bus
= PCI_BUS(qdev_get_parent_bus(DEVICE(mch
)));
460 mch
->iommu
= INTEL_IOMMU_DEVICE(qdev_create(NULL
, TYPE_INTEL_IOMMU_DEVICE
));
461 object_property_add_child(OBJECT(mch
), "intel-iommu",
462 OBJECT(mch
->iommu
), NULL
);
463 qdev_init_nofail(DEVICE(mch
->iommu
));
464 sysbus_mmio_map(SYS_BUS_DEVICE(mch
->iommu
), 0, Q35_HOST_BRIDGE_IOMMU_ADDR
);
466 pci_setup_iommu(pci_bus
, q35_host_dma_iommu
, mch
->iommu
);
469 static void mch_realize(PCIDevice
*d
, Error
**errp
)
472 MCHPCIState
*mch
= MCH_PCI_DEVICE(d
);
474 /* setup pci memory mapping */
475 pc_pci_as_mapping_init(OBJECT(mch
), mch
->system_memory
,
476 mch
->pci_address_space
);
478 /* if *disabled* show SMRAM to all CPUs */
479 memory_region_init_alias(&mch
->smram_region
, OBJECT(mch
), "smram-region",
480 mch
->pci_address_space
, 0xa0000, 0x20000);
481 memory_region_add_subregion_overlap(mch
->system_memory
, 0xa0000,
482 &mch
->smram_region
, 1);
483 memory_region_set_enabled(&mch
->smram_region
, true);
485 memory_region_init_alias(&mch
->open_high_smram
, OBJECT(mch
), "smram-open-high",
486 mch
->ram_memory
, 0xa0000, 0x20000);
487 memory_region_add_subregion_overlap(mch
->system_memory
, 0xfeda0000,
488 &mch
->open_high_smram
, 1);
489 memory_region_set_enabled(&mch
->open_high_smram
, false);
491 /* smram, as seen by SMM CPUs */
492 memory_region_init(&mch
->smram
, OBJECT(mch
), "smram", 1ull << 32);
493 memory_region_set_enabled(&mch
->smram
, true);
494 memory_region_init_alias(&mch
->low_smram
, OBJECT(mch
), "smram-low",
495 mch
->ram_memory
, 0xa0000, 0x20000);
496 memory_region_set_enabled(&mch
->low_smram
, true);
497 memory_region_add_subregion(&mch
->smram
, 0xa0000, &mch
->low_smram
);
498 memory_region_init_alias(&mch
->high_smram
, OBJECT(mch
), "smram-high",
499 mch
->ram_memory
, 0xa0000, 0x20000);
500 memory_region_set_enabled(&mch
->high_smram
, true);
501 memory_region_add_subregion(&mch
->smram
, 0xfeda0000, &mch
->high_smram
);
503 memory_region_init_io(&mch
->tseg_blackhole
, OBJECT(mch
),
504 &tseg_blackhole_ops
, NULL
,
505 "tseg-blackhole", 0);
506 memory_region_set_enabled(&mch
->tseg_blackhole
, false);
507 memory_region_add_subregion_overlap(mch
->system_memory
,
508 mch
->below_4g_mem_size
,
509 &mch
->tseg_blackhole
, 1);
511 memory_region_init_alias(&mch
->tseg_window
, OBJECT(mch
), "tseg-window",
512 mch
->ram_memory
, mch
->below_4g_mem_size
, 0);
513 memory_region_set_enabled(&mch
->tseg_window
, false);
514 memory_region_add_subregion(&mch
->smram
, mch
->below_4g_mem_size
,
516 object_property_add_const_link(qdev_get_machine(), "smram",
517 OBJECT(&mch
->smram
), &error_abort
);
519 init_pam(DEVICE(mch
), mch
->ram_memory
, mch
->system_memory
,
520 mch
->pci_address_space
, &mch
->pam_regions
[0],
521 PAM_BIOS_BASE
, PAM_BIOS_SIZE
);
522 for (i
= 0; i
< 12; ++i
) {
523 init_pam(DEVICE(mch
), mch
->ram_memory
, mch
->system_memory
,
524 mch
->pci_address_space
, &mch
->pam_regions
[i
+1],
525 PAM_EXPAN_BASE
+ i
* PAM_EXPAN_SIZE
, PAM_EXPAN_SIZE
);
527 /* Intel IOMMU (VT-d) */
528 if (machine_iommu(current_machine
)) {
533 uint64_t mch_mcfg_base(void)
536 Object
*o
= object_resolve_path_type("", TYPE_MCH_PCI_DEVICE
, &ambiguous
);
540 return MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT
;
543 static void mch_class_init(ObjectClass
*klass
, void *data
)
545 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
546 DeviceClass
*dc
= DEVICE_CLASS(klass
);
548 k
->realize
= mch_realize
;
549 k
->config_write
= mch_write_config
;
550 dc
->reset
= mch_reset
;
551 set_bit(DEVICE_CATEGORY_BRIDGE
, dc
->categories
);
552 dc
->desc
= "Host bridge";
553 dc
->vmsd
= &vmstate_mch
;
554 k
->vendor_id
= PCI_VENDOR_ID_INTEL
;
555 k
->device_id
= PCI_DEVICE_ID_INTEL_Q35_MCH
;
556 k
->revision
= MCH_HOST_BRIDGE_REVISION_DEFAULT
;
557 k
->class_id
= PCI_CLASS_BRIDGE_HOST
;
559 * PCI-facing part of the host bridge, not usable without the
560 * host-facing part, which can't be device_add'ed, yet.
562 dc
->cannot_instantiate_with_device_add_yet
= true;
565 static const TypeInfo mch_info
= {
566 .name
= TYPE_MCH_PCI_DEVICE
,
567 .parent
= TYPE_PCI_DEVICE
,
568 .instance_size
= sizeof(MCHPCIState
),
569 .class_init
= mch_class_init
,
572 static void q35_register(void)
574 type_register_static(&mch_info
);
575 type_register_static(&q35_host_info
);
578 type_init(q35_register
);