2 * QEMU INTEL 82574 GbE NIC emulation
4 * Software developer's manuals:
5 * http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf
7 * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
8 * Developed by Daynix Computing LTD (http://www.daynix.com)
11 * Dmitry Fleytman <dmitry@daynix.com>
12 * Leonid Bloch <leonid@daynix.com>
13 * Yan Vugenfirer <yan@daynix.com>
15 * Based on work done by:
16 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
17 * Copyright (c) 2008 Qumranet
18 * Based on work done by:
19 * Copyright (c) 2007 Dan Aloni
20 * Copyright (c) 2004 Antony T Curtis
22 * This library is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU Lesser General Public
24 * License as published by the Free Software Foundation; either
25 * version 2 of the License, or (at your option) any later version.
27 * This library is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
30 * Lesser General Public License for more details.
32 * You should have received a copy of the GNU Lesser General Public
33 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36 #include "qemu/osdep.h"
39 #include "qemu/range.h"
40 #include "sysemu/sysemu.h"
41 #include "hw/pci/msi.h"
42 #include "hw/pci/msix.h"
44 #include "hw/net/e1000_regs.h"
46 #include "e1000x_common.h"
47 #include "e1000e_core.h"
51 #define TYPE_E1000E "e1000e"
52 #define E1000E(obj) OBJECT_CHECK(E1000EState, (obj), TYPE_E1000E)
54 typedef struct E1000EState
{
69 uint16_t subsys_ven_used
;
78 #define E1000E_MMIO_IDX 0
79 #define E1000E_FLASH_IDX 1
80 #define E1000E_IO_IDX 2
81 #define E1000E_MSIX_IDX 3
83 #define E1000E_MMIO_SIZE (128 * 1024)
84 #define E1000E_FLASH_SIZE (128 * 1024)
85 #define E1000E_IO_SIZE (32)
86 #define E1000E_MSIX_SIZE (16 * 1024)
88 #define E1000E_MSIX_TABLE (0x0000)
89 #define E1000E_MSIX_PBA (0x2000)
92 e1000e_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
94 E1000EState
*s
= opaque
;
95 return e1000e_core_read(&s
->core
, addr
, size
);
99 e1000e_mmio_write(void *opaque
, hwaddr addr
,
100 uint64_t val
, unsigned size
)
102 E1000EState
*s
= opaque
;
103 e1000e_core_write(&s
->core
, addr
, val
, size
);
107 e1000e_io_get_reg_index(E1000EState
*s
, uint32_t *idx
)
109 if (s
->ioaddr
< 0x1FFFF) {
114 if (s
->ioaddr
< 0x7FFFF) {
115 trace_e1000e_wrn_io_addr_undefined(s
->ioaddr
);
119 if (s
->ioaddr
< 0xFFFFF) {
120 trace_e1000e_wrn_io_addr_flash(s
->ioaddr
);
124 trace_e1000e_wrn_io_addr_unknown(s
->ioaddr
);
129 e1000e_io_read(void *opaque
, hwaddr addr
, unsigned size
)
131 E1000EState
*s
= opaque
;
137 trace_e1000e_io_read_addr(s
->ioaddr
);
140 if (e1000e_io_get_reg_index(s
, &idx
)) {
141 val
= e1000e_core_read(&s
->core
, idx
, sizeof(val
));
142 trace_e1000e_io_read_data(idx
, val
);
147 trace_e1000e_wrn_io_read_unknown(addr
);
153 e1000e_io_write(void *opaque
, hwaddr addr
,
154 uint64_t val
, unsigned size
)
156 E1000EState
*s
= opaque
;
161 trace_e1000e_io_write_addr(val
);
162 s
->ioaddr
= (uint32_t) val
;
165 if (e1000e_io_get_reg_index(s
, &idx
)) {
166 trace_e1000e_io_write_data(idx
, val
);
167 e1000e_core_write(&s
->core
, idx
, val
, sizeof(val
));
171 trace_e1000e_wrn_io_write_unknown(addr
);
176 static const MemoryRegionOps mmio_ops
= {
177 .read
= e1000e_mmio_read
,
178 .write
= e1000e_mmio_write
,
179 .endianness
= DEVICE_LITTLE_ENDIAN
,
181 .min_access_size
= 4,
182 .max_access_size
= 4,
186 static const MemoryRegionOps io_ops
= {
187 .read
= e1000e_io_read
,
188 .write
= e1000e_io_write
,
189 .endianness
= DEVICE_LITTLE_ENDIAN
,
191 .min_access_size
= 4,
192 .max_access_size
= 4,
197 e1000e_nc_can_receive(NetClientState
*nc
)
199 E1000EState
*s
= qemu_get_nic_opaque(nc
);
200 return e1000e_can_receive(&s
->core
);
204 e1000e_nc_receive_iov(NetClientState
*nc
, const struct iovec
*iov
, int iovcnt
)
206 E1000EState
*s
= qemu_get_nic_opaque(nc
);
207 return e1000e_receive_iov(&s
->core
, iov
, iovcnt
);
211 e1000e_nc_receive(NetClientState
*nc
, const uint8_t *buf
, size_t size
)
213 E1000EState
*s
= qemu_get_nic_opaque(nc
);
214 return e1000e_receive(&s
->core
, buf
, size
);
218 e1000e_set_link_status(NetClientState
*nc
)
220 E1000EState
*s
= qemu_get_nic_opaque(nc
);
221 e1000e_core_set_link_status(&s
->core
);
224 static NetClientInfo net_e1000e_info
= {
225 .type
= NET_CLIENT_DRIVER_NIC
,
226 .size
= sizeof(NICState
),
227 .can_receive
= e1000e_nc_can_receive
,
228 .receive
= e1000e_nc_receive
,
229 .receive_iov
= e1000e_nc_receive_iov
,
230 .link_status_changed
= e1000e_set_link_status
,
234 * EEPROM (NVM) contents documented in Table 36, section 6.1
235 * and generally 6.1.2 Software accessed words.
237 static const uint16_t e1000e_eeprom_template
[64] = {
238 /* Address | Compat. | ImVer | Compat. */
239 0x0000, 0x0000, 0x0000, 0x0420, 0xf746, 0x2010, 0xffff, 0xffff,
240 /* PBA |ICtrl1 | SSID | SVID | DevID |-------|ICtrl2 */
241 0x0000, 0x0000, 0x026b, 0x0000, 0x8086, 0x0000, 0x0000, 0x8058,
242 /* NVM words 1,2,3 |-------------------------------|PCI-EID*/
243 0x0000, 0x2001, 0x7e7c, 0xffff, 0x1000, 0x00c8, 0x0000, 0x2704,
244 /* PCIe Init. Conf 1,2,3 |PCICtrl|PHY|LD1|-------| RevID | LD0,2 */
245 0x6cc9, 0x3150, 0x070e, 0x460b, 0x2d84, 0x0100, 0xf000, 0x0706,
246 /* FLPAR |FLANADD|LAN-PWR|FlVndr |ICtrl3 |APTSMBA|APTRxEP|APTSMBC*/
247 0x6000, 0x0080, 0x0f04, 0x7fff, 0x4f01, 0xc600, 0x0000, 0x20ff,
248 /* APTIF | APTMC |APTuCP |LSWFWID|MSWFWID|NC-SIMC|NC-SIC | VPDP */
249 0x0028, 0x0003, 0x0000, 0x0000, 0x0000, 0x0003, 0x0000, 0xffff,
251 0x0100, 0xc000, 0x121c, 0xc007, 0xffff, 0xffff, 0xffff, 0xffff,
252 /* SW Section |CHKSUM */
253 0xffff, 0xffff, 0xffff, 0xffff, 0x0000, 0x0120, 0xffff, 0x0000,
256 static void e1000e_core_realize(E1000EState
*s
)
258 s
->core
.owner
= &s
->parent_obj
;
259 s
->core
.owner_nic
= s
->nic
;
263 e1000e_unuse_msix_vectors(E1000EState
*s
, int num_vectors
)
266 for (i
= 0; i
< num_vectors
; i
++) {
267 msix_vector_unuse(PCI_DEVICE(s
), i
);
272 e1000e_use_msix_vectors(E1000EState
*s
, int num_vectors
)
275 for (i
= 0; i
< num_vectors
; i
++) {
276 int res
= msix_vector_use(PCI_DEVICE(s
), i
);
278 trace_e1000e_msix_use_vector_fail(i
, res
);
279 e1000e_unuse_msix_vectors(s
, i
);
287 e1000e_init_msix(E1000EState
*s
)
289 PCIDevice
*d
= PCI_DEVICE(s
);
290 int res
= msix_init(PCI_DEVICE(s
), E1000E_MSIX_VEC_NUM
,
292 E1000E_MSIX_IDX
, E1000E_MSIX_TABLE
,
294 E1000E_MSIX_IDX
, E1000E_MSIX_PBA
,
298 trace_e1000e_msix_init_fail(res
);
300 if (!e1000e_use_msix_vectors(s
, E1000E_MSIX_VEC_NUM
)) {
301 msix_uninit(d
, &s
->msix
, &s
->msix
);
307 e1000e_cleanup_msix(E1000EState
*s
)
309 if (msix_present(PCI_DEVICE(s
))) {
310 e1000e_unuse_msix_vectors(s
, E1000E_MSIX_VEC_NUM
);
311 msix_uninit(PCI_DEVICE(s
), &s
->msix
, &s
->msix
);
316 e1000e_init_net_peer(E1000EState
*s
, PCIDevice
*pci_dev
, uint8_t *macaddr
)
318 DeviceState
*dev
= DEVICE(pci_dev
);
322 s
->nic
= qemu_new_nic(&net_e1000e_info
, &s
->conf
,
323 object_get_typename(OBJECT(s
)), dev
->id
, s
);
325 s
->core
.max_queue_num
= s
->conf
.peers
.queues
- 1;
327 trace_e1000e_mac_set_permanent(MAC_ARG(macaddr
));
328 memcpy(s
->core
.permanent_mac
, macaddr
, sizeof(s
->core
.permanent_mac
));
330 qemu_format_nic_info_str(qemu_get_queue(s
->nic
), macaddr
);
332 /* Setup virtio headers */
333 if (s
->disable_vnet
) {
334 s
->core
.has_vnet
= false;
335 trace_e1000e_cfg_support_virtio(false);
338 s
->core
.has_vnet
= true;
341 for (i
= 0; i
< s
->conf
.peers
.queues
; i
++) {
342 nc
= qemu_get_subqueue(s
->nic
, i
);
343 if (!nc
->peer
|| !qemu_has_vnet_hdr(nc
->peer
)) {
344 s
->core
.has_vnet
= false;
345 trace_e1000e_cfg_support_virtio(false);
350 trace_e1000e_cfg_support_virtio(true);
352 for (i
= 0; i
< s
->conf
.peers
.queues
; i
++) {
353 nc
= qemu_get_subqueue(s
->nic
, i
);
354 qemu_set_vnet_hdr_len(nc
->peer
, sizeof(struct virtio_net_hdr
));
355 qemu_using_vnet_hdr(nc
->peer
, true);
359 static inline uint64_t
360 e1000e_gen_dsn(uint8_t *mac
)
362 return (uint64_t)(mac
[5]) |
363 (uint64_t)(mac
[4]) << 8 |
364 (uint64_t)(mac
[3]) << 16 |
365 (uint64_t)(0x00FF) << 24 |
366 (uint64_t)(0x00FF) << 32 |
367 (uint64_t)(mac
[2]) << 40 |
368 (uint64_t)(mac
[1]) << 48 |
369 (uint64_t)(mac
[0]) << 56;
373 e1000e_add_pm_capability(PCIDevice
*pdev
, uint8_t offset
, uint16_t pmc
)
375 int ret
= pci_add_capability(pdev
, PCI_CAP_ID_PM
, offset
, PCI_PM_SIZEOF
);
378 pci_set_word(pdev
->config
+ offset
+ PCI_PM_PMC
,
382 pci_set_word(pdev
->wmask
+ offset
+ PCI_PM_CTRL
,
383 PCI_PM_CTRL_STATE_MASK
|
384 PCI_PM_CTRL_PME_ENABLE
|
385 PCI_PM_CTRL_DATA_SEL_MASK
);
387 pci_set_word(pdev
->w1cmask
+ offset
+ PCI_PM_CTRL
,
388 PCI_PM_CTRL_PME_STATUS
);
394 static void e1000e_write_config(PCIDevice
*pci_dev
, uint32_t address
,
395 uint32_t val
, int len
)
397 E1000EState
*s
= E1000E(pci_dev
);
399 pci_default_write_config(pci_dev
, address
, val
, len
);
401 if (range_covers_byte(address
, len
, PCI_COMMAND
) &&
402 (pci_dev
->config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
403 e1000e_start_recv(&s
->core
);
407 static void e1000e_pci_realize(PCIDevice
*pci_dev
, Error
**errp
)
409 static const uint16_t e1000e_pmrb_offset
= 0x0C8;
410 static const uint16_t e1000e_pcie_offset
= 0x0E0;
411 static const uint16_t e1000e_aer_offset
= 0x100;
412 static const uint16_t e1000e_dsn_offset
= 0x140;
413 E1000EState
*s
= E1000E(pci_dev
);
417 trace_e1000e_cb_pci_realize();
419 pci_dev
->config_write
= e1000e_write_config
;
421 pci_dev
->config
[PCI_CACHE_LINE_SIZE
] = 0x10;
422 pci_dev
->config
[PCI_INTERRUPT_PIN
] = 1;
424 pci_set_word(pci_dev
->config
+ PCI_SUBSYSTEM_VENDOR_ID
, s
->subsys_ven
);
425 pci_set_word(pci_dev
->config
+ PCI_SUBSYSTEM_ID
, s
->subsys
);
427 s
->subsys_ven_used
= s
->subsys_ven
;
428 s
->subsys_used
= s
->subsys
;
430 /* Define IO/MMIO regions */
431 memory_region_init_io(&s
->mmio
, OBJECT(s
), &mmio_ops
, s
,
432 "e1000e-mmio", E1000E_MMIO_SIZE
);
433 pci_register_bar(pci_dev
, E1000E_MMIO_IDX
,
434 PCI_BASE_ADDRESS_SPACE_MEMORY
, &s
->mmio
);
437 * We provide a dummy implementation for the flash BAR
438 * for drivers that may theoretically probe for its presence.
440 memory_region_init(&s
->flash
, OBJECT(s
),
441 "e1000e-flash", E1000E_FLASH_SIZE
);
442 pci_register_bar(pci_dev
, E1000E_FLASH_IDX
,
443 PCI_BASE_ADDRESS_SPACE_MEMORY
, &s
->flash
);
445 memory_region_init_io(&s
->io
, OBJECT(s
), &io_ops
, s
,
446 "e1000e-io", E1000E_IO_SIZE
);
447 pci_register_bar(pci_dev
, E1000E_IO_IDX
,
448 PCI_BASE_ADDRESS_SPACE_IO
, &s
->io
);
450 memory_region_init(&s
->msix
, OBJECT(s
), "e1000e-msix",
452 pci_register_bar(pci_dev
, E1000E_MSIX_IDX
,
453 PCI_BASE_ADDRESS_SPACE_MEMORY
, &s
->msix
);
455 /* Create networking backend */
456 qemu_macaddr_default_if_unset(&s
->conf
.macaddr
);
457 macaddr
= s
->conf
.macaddr
.a
;
461 if (pcie_endpoint_cap_v1_init(pci_dev
, e1000e_pcie_offset
) < 0) {
462 hw_error("Failed to initialize PCIe capability");
465 ret
= msi_init(PCI_DEVICE(s
), 0xD0, 1, true, false, NULL
);
467 trace_e1000e_msi_init_fail(ret
);
470 if (e1000e_add_pm_capability(pci_dev
, e1000e_pmrb_offset
,
471 PCI_PM_CAP_DSI
) < 0) {
472 hw_error("Failed to initialize PM capability");
475 if (pcie_aer_init(pci_dev
, PCI_ERR_VER
, e1000e_aer_offset
,
476 PCI_ERR_SIZEOF
, NULL
) < 0) {
477 hw_error("Failed to initialize AER capability");
480 pcie_dev_ser_num_init(pci_dev
, e1000e_dsn_offset
,
481 e1000e_gen_dsn(macaddr
));
483 e1000e_init_net_peer(s
, pci_dev
, macaddr
);
485 /* Initialize core */
486 e1000e_core_realize(s
);
488 e1000e_core_pci_realize(&s
->core
,
489 e1000e_eeprom_template
,
490 sizeof(e1000e_eeprom_template
),
494 static void e1000e_pci_uninit(PCIDevice
*pci_dev
)
496 E1000EState
*s
= E1000E(pci_dev
);
498 trace_e1000e_cb_pci_uninit();
500 e1000e_core_pci_uninit(&s
->core
);
502 pcie_aer_exit(pci_dev
);
503 pcie_cap_exit(pci_dev
);
505 qemu_del_nic(s
->nic
);
507 e1000e_cleanup_msix(s
);
511 static void e1000e_qdev_reset(DeviceState
*dev
)
513 E1000EState
*s
= E1000E(dev
);
515 trace_e1000e_cb_qdev_reset();
517 e1000e_core_reset(&s
->core
);
520 static void e1000e_pre_save(void *opaque
)
522 E1000EState
*s
= opaque
;
524 trace_e1000e_cb_pre_save();
526 e1000e_core_pre_save(&s
->core
);
529 static int e1000e_post_load(void *opaque
, int version_id
)
531 E1000EState
*s
= opaque
;
533 trace_e1000e_cb_post_load();
535 if ((s
->subsys
!= s
->subsys_used
) ||
536 (s
->subsys_ven
!= s
->subsys_ven_used
)) {
538 "ERROR: Cannot migrate while device properties "
539 "(subsys/subsys_ven) differ");
543 return e1000e_core_post_load(&s
->core
);
546 static const VMStateDescription e1000e_vmstate_tx
= {
549 .minimum_version_id
= 1,
550 .fields
= (VMStateField
[]) {
551 VMSTATE_UINT8(props
.sum_needed
, struct e1000e_tx
),
552 VMSTATE_UINT8(props
.ipcss
, struct e1000e_tx
),
553 VMSTATE_UINT8(props
.ipcso
, struct e1000e_tx
),
554 VMSTATE_UINT16(props
.ipcse
, struct e1000e_tx
),
555 VMSTATE_UINT8(props
.tucss
, struct e1000e_tx
),
556 VMSTATE_UINT8(props
.tucso
, struct e1000e_tx
),
557 VMSTATE_UINT16(props
.tucse
, struct e1000e_tx
),
558 VMSTATE_UINT8(props
.hdr_len
, struct e1000e_tx
),
559 VMSTATE_UINT16(props
.mss
, struct e1000e_tx
),
560 VMSTATE_UINT32(props
.paylen
, struct e1000e_tx
),
561 VMSTATE_INT8(props
.ip
, struct e1000e_tx
),
562 VMSTATE_INT8(props
.tcp
, struct e1000e_tx
),
563 VMSTATE_BOOL(props
.tse
, struct e1000e_tx
),
564 VMSTATE_BOOL(props
.cptse
, struct e1000e_tx
),
565 VMSTATE_BOOL(skip_cp
, struct e1000e_tx
),
566 VMSTATE_END_OF_LIST()
570 static const VMStateDescription e1000e_vmstate_intr_timer
= {
571 .name
= "e1000e-intr-timer",
573 .minimum_version_id
= 1,
574 .fields
= (VMStateField
[]) {
575 VMSTATE_TIMER_PTR(timer
, E1000IntrDelayTimer
),
576 VMSTATE_BOOL(running
, E1000IntrDelayTimer
),
577 VMSTATE_END_OF_LIST()
581 #define VMSTATE_E1000E_INTR_DELAY_TIMER(_f, _s) \
582 VMSTATE_STRUCT(_f, _s, 0, \
583 e1000e_vmstate_intr_timer, E1000IntrDelayTimer)
585 #define VMSTATE_E1000E_INTR_DELAY_TIMER_ARRAY(_f, _s, _num) \
586 VMSTATE_STRUCT_ARRAY(_f, _s, _num, 0, \
587 e1000e_vmstate_intr_timer, E1000IntrDelayTimer)
589 static const VMStateDescription e1000e_vmstate
= {
592 .minimum_version_id
= 1,
593 .pre_save
= e1000e_pre_save
,
594 .post_load
= e1000e_post_load
,
595 .fields
= (VMStateField
[]) {
596 VMSTATE_PCI_DEVICE(parent_obj
, E1000EState
),
597 VMSTATE_MSIX(parent_obj
, E1000EState
),
599 VMSTATE_UINT32(ioaddr
, E1000EState
),
600 VMSTATE_UINT32(core
.rxbuf_min_shift
, E1000EState
),
601 VMSTATE_UINT8(core
.rx_desc_len
, E1000EState
),
602 VMSTATE_UINT32_ARRAY(core
.rxbuf_sizes
, E1000EState
,
603 E1000_PSRCTL_BUFFS_PER_DESC
),
604 VMSTATE_UINT32(core
.rx_desc_buf_size
, E1000EState
),
605 VMSTATE_UINT16_ARRAY(core
.eeprom
, E1000EState
, E1000E_EEPROM_SIZE
),
606 VMSTATE_UINT16_2DARRAY(core
.phy
, E1000EState
,
607 E1000E_PHY_PAGES
, E1000E_PHY_PAGE_SIZE
),
608 VMSTATE_UINT32_ARRAY(core
.mac
, E1000EState
, E1000E_MAC_SIZE
),
609 VMSTATE_UINT8_ARRAY(core
.permanent_mac
, E1000EState
, ETH_ALEN
),
611 VMSTATE_UINT32(core
.delayed_causes
, E1000EState
),
613 VMSTATE_UINT16(subsys
, E1000EState
),
614 VMSTATE_UINT16(subsys_ven
, E1000EState
),
616 VMSTATE_E1000E_INTR_DELAY_TIMER(core
.rdtr
, E1000EState
),
617 VMSTATE_E1000E_INTR_DELAY_TIMER(core
.radv
, E1000EState
),
618 VMSTATE_E1000E_INTR_DELAY_TIMER(core
.raid
, E1000EState
),
619 VMSTATE_E1000E_INTR_DELAY_TIMER(core
.tadv
, E1000EState
),
620 VMSTATE_E1000E_INTR_DELAY_TIMER(core
.tidv
, E1000EState
),
622 VMSTATE_E1000E_INTR_DELAY_TIMER(core
.itr
, E1000EState
),
623 VMSTATE_BOOL(core
.itr_intr_pending
, E1000EState
),
625 VMSTATE_E1000E_INTR_DELAY_TIMER_ARRAY(core
.eitr
, E1000EState
,
626 E1000E_MSIX_VEC_NUM
),
627 VMSTATE_BOOL_ARRAY(core
.eitr_intr_pending
, E1000EState
,
628 E1000E_MSIX_VEC_NUM
),
630 VMSTATE_UINT32(core
.itr_guest_value
, E1000EState
),
631 VMSTATE_UINT32_ARRAY(core
.eitr_guest_value
, E1000EState
,
632 E1000E_MSIX_VEC_NUM
),
634 VMSTATE_UINT16(core
.vet
, E1000EState
),
636 VMSTATE_STRUCT_ARRAY(core
.tx
, E1000EState
, E1000E_NUM_QUEUES
, 0,
637 e1000e_vmstate_tx
, struct e1000e_tx
),
638 VMSTATE_END_OF_LIST()
642 static PropertyInfo e1000e_prop_disable_vnet
,
643 e1000e_prop_subsys_ven
,
646 static Property e1000e_properties
[] = {
647 DEFINE_NIC_PROPERTIES(E1000EState
, conf
),
648 DEFINE_PROP_SIGNED("disable_vnet_hdr", E1000EState
, disable_vnet
, false,
649 e1000e_prop_disable_vnet
, bool),
650 DEFINE_PROP_SIGNED("subsys_ven", E1000EState
, subsys_ven
,
652 e1000e_prop_subsys_ven
, uint16_t),
653 DEFINE_PROP_SIGNED("subsys", E1000EState
, subsys
, 0,
654 e1000e_prop_subsys
, uint16_t),
655 DEFINE_PROP_END_OF_LIST(),
658 static void e1000e_class_init(ObjectClass
*class, void *data
)
660 DeviceClass
*dc
= DEVICE_CLASS(class);
661 PCIDeviceClass
*c
= PCI_DEVICE_CLASS(class);
663 c
->realize
= e1000e_pci_realize
;
664 c
->exit
= e1000e_pci_uninit
;
665 c
->vendor_id
= PCI_VENDOR_ID_INTEL
;
666 c
->device_id
= E1000_DEV_ID_82574L
;
668 c
->romfile
= "efi-e1000e.rom";
669 c
->class_id
= PCI_CLASS_NETWORK_ETHERNET
;
672 dc
->desc
= "Intel 82574L GbE Controller";
673 dc
->reset
= e1000e_qdev_reset
;
674 dc
->vmsd
= &e1000e_vmstate
;
675 dc
->props
= e1000e_properties
;
677 e1000e_prop_disable_vnet
= qdev_prop_uint8
;
678 e1000e_prop_disable_vnet
.description
= "Do not use virtio headers, "
679 "perform SW offloads emulation "
682 e1000e_prop_subsys_ven
= qdev_prop_uint16
;
683 e1000e_prop_subsys_ven
.description
= "PCI device Subsystem Vendor ID";
685 e1000e_prop_subsys
= qdev_prop_uint16
;
686 e1000e_prop_subsys
.description
= "PCI device Subsystem ID";
688 set_bit(DEVICE_CATEGORY_NETWORK
, dc
->categories
);
691 static void e1000e_instance_init(Object
*obj
)
693 E1000EState
*s
= E1000E(obj
);
694 device_add_bootindex_property(obj
, &s
->conf
.bootindex
,
695 "bootindex", "/ethernet-phy@0",
699 static const TypeInfo e1000e_info
= {
701 .parent
= TYPE_PCI_DEVICE
,
702 .instance_size
= sizeof(E1000EState
),
703 .class_init
= e1000e_class_init
,
704 .instance_init
= e1000e_instance_init
,
707 static void e1000e_register_types(void)
709 type_register_static(&e1000e_info
);
712 type_init(e1000e_register_types
)