4 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
5 * Copyright (c) 2008 Qumranet
6 * Based on work done by:
7 * Copyright (c) 2007 Dan Aloni
8 * Copyright (c) 2004 Antony T Curtis
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
35 DEBUG_GENERAL
, DEBUG_IO
, DEBUG_MMIO
, DEBUG_INTERRUPT
,
36 DEBUG_RX
, DEBUG_TX
, DEBUG_MDIC
, DEBUG_EEPROM
,
37 DEBUG_UNKNOWN
, DEBUG_TXSUM
, DEBUG_TXERR
, DEBUG_RXERR
,
38 DEBUG_RXFILTER
, DEBUG_NOTYET
,
40 #define DBGBIT(x) (1<<DEBUG_##x)
41 static int debugflags
= DBGBIT(TXERR
) | DBGBIT(GENERAL
);
43 #define DBGOUT(what, fmt, ...) do { \
44 if (debugflags & DBGBIT(what)) \
45 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
48 #define DBGOUT(what, fmt, ...) do {} while (0)
51 #define IOPORT_SIZE 0x40
52 #define PNPMMIO_SIZE 0x20000
56 * E1000_DEV_ID_82540EM works with Windows and Linux
57 * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
58 * appears to perform better than 82540EM, but breaks with Linux 2.6.18
59 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
62 enum { E1000_DEVID
= E1000_DEV_ID_82540EM
};
65 * May need to specify additional MAC-to-PHY entries --
66 * Intel's Windows driver refuses to initialize unless they match
69 PHY_ID2_INIT
= E1000_DEVID
== E1000_DEV_ID_82573L
? 0xcc2 :
70 E1000_DEVID
== E1000_DEV_ID_82544GC_COPPER
? 0xc30 :
71 /* default to E1000_DEV_ID_82540EM */ 0xc20
74 typedef struct E1000State_st
{
79 uint32_t mac_reg
[0x8000];
80 uint16_t phy_reg
[0x20];
81 uint16_t eeprom_data
[64];
84 uint32_t rxbuf_min_shift
;
87 unsigned char header
[256];
88 unsigned char vlan_header
[4];
89 unsigned char vlan
[4];
90 unsigned char data
[0x10000];
92 unsigned char sum_needed
;
93 unsigned char vlan_needed
;
107 char cptse
; // current packet tse bit
111 uint32_t val_in
; // shifted in from guest driver
119 #define defreg(x) x = (E1000_##x>>2)
121 defreg(CTRL
), defreg(EECD
), defreg(EERD
), defreg(GPRC
),
122 defreg(GPTC
), defreg(ICR
), defreg(ICS
), defreg(IMC
),
123 defreg(IMS
), defreg(LEDCTL
), defreg(MANC
), defreg(MDIC
),
124 defreg(MPC
), defreg(PBA
), defreg(RCTL
), defreg(RDBAH
),
125 defreg(RDBAL
), defreg(RDH
), defreg(RDLEN
), defreg(RDT
),
126 defreg(STATUS
), defreg(SWSM
), defreg(TCTL
), defreg(TDBAH
),
127 defreg(TDBAL
), defreg(TDH
), defreg(TDLEN
), defreg(TDT
),
128 defreg(TORH
), defreg(TORL
), defreg(TOTH
), defreg(TOTL
),
129 defreg(TPR
), defreg(TPT
), defreg(TXDCTL
), defreg(WUFC
),
130 defreg(RA
), defreg(MTA
), defreg(CRCERRS
),defreg(VFTA
),
134 enum { PHY_R
= 1, PHY_W
= 2, PHY_RW
= PHY_R
| PHY_W
};
135 static const char phy_regcap
[0x20] = {
136 [PHY_STATUS
] = PHY_R
, [M88E1000_EXT_PHY_SPEC_CTRL
] = PHY_RW
,
137 [PHY_ID1
] = PHY_R
, [M88E1000_PHY_SPEC_CTRL
] = PHY_RW
,
138 [PHY_CTRL
] = PHY_RW
, [PHY_1000T_CTRL
] = PHY_RW
,
139 [PHY_LP_ABILITY
] = PHY_R
, [PHY_1000T_STATUS
] = PHY_R
,
140 [PHY_AUTONEG_ADV
] = PHY_RW
, [M88E1000_RX_ERR_CNTR
] = PHY_R
,
141 [PHY_ID2
] = PHY_R
, [M88E1000_PHY_SPEC_STATUS
] = PHY_R
145 ioport_map(PCIDevice
*pci_dev
, int region_num
, uint32_t addr
,
146 uint32_t size
, int type
)
148 DBGOUT(IO
, "e1000_ioport_map addr=0x%04x size=0x%08x\n", addr
, size
);
152 set_interrupt_cause(E1000State
*s
, int index
, uint32_t val
)
155 val
|= E1000_ICR_INT_ASSERTED
;
156 s
->mac_reg
[ICR
] = val
;
157 qemu_set_irq(s
->dev
.irq
[0], (s
->mac_reg
[IMS
] & s
->mac_reg
[ICR
]) != 0);
161 set_ics(E1000State
*s
, int index
, uint32_t val
)
163 DBGOUT(INTERRUPT
, "set_ics %x, ICR %x, IMR %x\n", val
, s
->mac_reg
[ICR
],
165 set_interrupt_cause(s
, 0, val
| s
->mac_reg
[ICR
]);
169 rxbufsize(uint32_t v
)
171 v
&= E1000_RCTL_BSEX
| E1000_RCTL_SZ_16384
| E1000_RCTL_SZ_8192
|
172 E1000_RCTL_SZ_4096
| E1000_RCTL_SZ_2048
| E1000_RCTL_SZ_1024
|
173 E1000_RCTL_SZ_512
| E1000_RCTL_SZ_256
;
175 case E1000_RCTL_BSEX
| E1000_RCTL_SZ_16384
:
177 case E1000_RCTL_BSEX
| E1000_RCTL_SZ_8192
:
179 case E1000_RCTL_BSEX
| E1000_RCTL_SZ_4096
:
181 case E1000_RCTL_SZ_1024
:
183 case E1000_RCTL_SZ_512
:
185 case E1000_RCTL_SZ_256
:
192 set_ctrl(E1000State
*s
, int index
, uint32_t val
)
194 /* RST is self clearing */
195 s
->mac_reg
[CTRL
] = val
& ~E1000_CTRL_RST
;
199 set_rx_control(E1000State
*s
, int index
, uint32_t val
)
201 s
->mac_reg
[RCTL
] = val
;
202 s
->rxbuf_size
= rxbufsize(val
);
203 s
->rxbuf_min_shift
= ((val
/ E1000_RCTL_RDMTS_QUAT
) & 3) + 1;
204 DBGOUT(RX
, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s
->mac_reg
[RDT
],
209 set_mdic(E1000State
*s
, int index
, uint32_t val
)
211 uint32_t data
= val
& E1000_MDIC_DATA_MASK
;
212 uint32_t addr
= ((val
& E1000_MDIC_REG_MASK
) >> E1000_MDIC_REG_SHIFT
);
214 if ((val
& E1000_MDIC_PHY_MASK
) >> E1000_MDIC_PHY_SHIFT
!= 1) // phy #
215 val
= s
->mac_reg
[MDIC
] | E1000_MDIC_ERROR
;
216 else if (val
& E1000_MDIC_OP_READ
) {
217 DBGOUT(MDIC
, "MDIC read reg 0x%x\n", addr
);
218 if (!(phy_regcap
[addr
] & PHY_R
)) {
219 DBGOUT(MDIC
, "MDIC read reg %x unhandled\n", addr
);
220 val
|= E1000_MDIC_ERROR
;
222 val
= (val
^ data
) | s
->phy_reg
[addr
];
223 } else if (val
& E1000_MDIC_OP_WRITE
) {
224 DBGOUT(MDIC
, "MDIC write reg 0x%x, value 0x%x\n", addr
, data
);
225 if (!(phy_regcap
[addr
] & PHY_W
)) {
226 DBGOUT(MDIC
, "MDIC write reg %x unhandled\n", addr
);
227 val
|= E1000_MDIC_ERROR
;
229 s
->phy_reg
[addr
] = data
;
231 s
->mac_reg
[MDIC
] = val
| E1000_MDIC_READY
;
232 set_ics(s
, 0, E1000_ICR_MDAC
);
236 get_eecd(E1000State
*s
, int index
)
238 uint32_t ret
= E1000_EECD_PRES
|E1000_EECD_GNT
| s
->eecd_state
.old_eecd
;
240 DBGOUT(EEPROM
, "reading eeprom bit %d (reading %d)\n",
241 s
->eecd_state
.bitnum_out
, s
->eecd_state
.reading
);
242 if (!s
->eecd_state
.reading
||
243 ((s
->eeprom_data
[(s
->eecd_state
.bitnum_out
>> 4) & 0x3f] >>
244 ((s
->eecd_state
.bitnum_out
& 0xf) ^ 0xf))) & 1)
245 ret
|= E1000_EECD_DO
;
250 set_eecd(E1000State
*s
, int index
, uint32_t val
)
252 uint32_t oldval
= s
->eecd_state
.old_eecd
;
254 s
->eecd_state
.old_eecd
= val
& (E1000_EECD_SK
| E1000_EECD_CS
|
255 E1000_EECD_DI
|E1000_EECD_FWE_MASK
|E1000_EECD_REQ
);
256 if (!(E1000_EECD_SK
& (val
^ oldval
))) // no clock edge
258 if (!(E1000_EECD_SK
& val
)) { // falling edge
259 s
->eecd_state
.bitnum_out
++;
262 if (!(val
& E1000_EECD_CS
)) { // rising, no CS (EEPROM reset)
263 memset(&s
->eecd_state
, 0, sizeof s
->eecd_state
);
265 * restore old_eecd's E1000_EECD_SK (known to be on)
266 * to avoid false detection of a clock edge
268 s
->eecd_state
.old_eecd
= E1000_EECD_SK
;
271 s
->eecd_state
.val_in
<<= 1;
272 if (val
& E1000_EECD_DI
)
273 s
->eecd_state
.val_in
|= 1;
274 if (++s
->eecd_state
.bitnum_in
== 9 && !s
->eecd_state
.reading
) {
275 s
->eecd_state
.bitnum_out
= ((s
->eecd_state
.val_in
& 0x3f)<<4)-1;
276 s
->eecd_state
.reading
= (((s
->eecd_state
.val_in
>> 6) & 7) ==
277 EEPROM_READ_OPCODE_MICROWIRE
);
279 DBGOUT(EEPROM
, "eeprom bitnum in %d out %d, reading %d\n",
280 s
->eecd_state
.bitnum_in
, s
->eecd_state
.bitnum_out
,
281 s
->eecd_state
.reading
);
285 flash_eerd_read(E1000State
*s
, int x
)
287 unsigned int index
, r
= s
->mac_reg
[EERD
] & ~E1000_EEPROM_RW_REG_START
;
289 if ((index
= r
>> E1000_EEPROM_RW_ADDR_SHIFT
) > EEPROM_CHECKSUM_REG
)
291 return (s
->eeprom_data
[index
] << E1000_EEPROM_RW_REG_DATA
) |
292 E1000_EEPROM_RW_REG_DONE
| r
;
296 putsum(uint8_t *data
, uint32_t n
, uint32_t sloc
, uint32_t css
, uint32_t cse
)
303 sum
= net_checksum_add(n
-css
, data
+css
);
304 cpu_to_be16wu((uint16_t *)(data
+ sloc
),
305 net_checksum_finish(sum
));
310 vlan_enabled(E1000State
*s
)
312 return ((s
->mac_reg
[CTRL
] & E1000_CTRL_VME
) != 0);
316 vlan_rx_filter_enabled(E1000State
*s
)
318 return ((s
->mac_reg
[RCTL
] & E1000_RCTL_VFE
) != 0);
322 is_vlan_packet(E1000State
*s
, const uint8_t *buf
)
324 return (be16_to_cpup((uint16_t *)(buf
+ 12)) ==
325 le16_to_cpup((uint16_t *)(s
->mac_reg
+ VET
)));
329 is_vlan_txd(uint32_t txd_lower
)
331 return ((txd_lower
& E1000_TXD_CMD_VLE
) != 0);
335 xmit_seg(E1000State
*s
)
338 unsigned int frames
= s
->tx
.tso_frames
, css
, sofar
, n
;
339 struct e1000_tx
*tp
= &s
->tx
;
341 if (tp
->tse
&& tp
->cptse
) {
343 DBGOUT(TXSUM
, "frames %d size %d ipcss %d\n",
344 frames
, tp
->size
, css
);
345 if (tp
->ip
) { // IPv4
346 cpu_to_be16wu((uint16_t *)(tp
->data
+css
+2),
348 cpu_to_be16wu((uint16_t *)(tp
->data
+css
+4),
349 be16_to_cpup((uint16_t *)(tp
->data
+css
+4))+frames
);
351 cpu_to_be16wu((uint16_t *)(tp
->data
+css
+4),
354 len
= tp
->size
- css
;
355 DBGOUT(TXSUM
, "tcp %d tucss %d len %d\n", tp
->tcp
, css
, len
);
357 sofar
= frames
* tp
->mss
;
358 cpu_to_be32wu((uint32_t *)(tp
->data
+css
+4), // seq
359 be32_to_cpupu((uint32_t *)(tp
->data
+css
+4))+sofar
);
360 if (tp
->paylen
- sofar
> tp
->mss
)
361 tp
->data
[css
+ 13] &= ~9; // PSH, FIN
363 cpu_to_be16wu((uint16_t *)(tp
->data
+css
+4), len
);
364 if (tp
->sum_needed
& E1000_TXD_POPTS_TXSM
) {
365 // add pseudo-header length before checksum calculation
366 sp
= (uint16_t *)(tp
->data
+ tp
->tucso
);
367 cpu_to_be16wu(sp
, be16_to_cpup(sp
) + len
);
372 if (tp
->sum_needed
& E1000_TXD_POPTS_TXSM
)
373 putsum(tp
->data
, tp
->size
, tp
->tucso
, tp
->tucss
, tp
->tucse
);
374 if (tp
->sum_needed
& E1000_TXD_POPTS_IXSM
)
375 putsum(tp
->data
, tp
->size
, tp
->ipcso
, tp
->ipcss
, tp
->ipcse
);
376 if (tp
->vlan_needed
) {
377 memmove(tp
->vlan
, tp
->data
, 12);
378 memcpy(tp
->data
+ 8, tp
->vlan_header
, 4);
379 qemu_send_packet(s
->vc
, tp
->vlan
, tp
->size
+ 4);
381 qemu_send_packet(s
->vc
, tp
->data
, tp
->size
);
384 n
= s
->mac_reg
[TOTL
];
385 if ((s
->mac_reg
[TOTL
] += s
->tx
.size
) < n
)
390 process_tx_desc(E1000State
*s
, struct e1000_tx_desc
*dp
)
392 uint32_t txd_lower
= le32_to_cpu(dp
->lower
.data
);
393 uint32_t dtype
= txd_lower
& (E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
);
394 unsigned int split_size
= txd_lower
& 0xffff, bytes
, sz
, op
;
395 unsigned int msh
= 0xfffff, hdr
= 0;
397 struct e1000_context_desc
*xp
= (struct e1000_context_desc
*)dp
;
398 struct e1000_tx
*tp
= &s
->tx
;
400 if (dtype
== E1000_TXD_CMD_DEXT
) { // context descriptor
401 op
= le32_to_cpu(xp
->cmd_and_length
);
402 tp
->ipcss
= xp
->lower_setup
.ip_fields
.ipcss
;
403 tp
->ipcso
= xp
->lower_setup
.ip_fields
.ipcso
;
404 tp
->ipcse
= le16_to_cpu(xp
->lower_setup
.ip_fields
.ipcse
);
405 tp
->tucss
= xp
->upper_setup
.tcp_fields
.tucss
;
406 tp
->tucso
= xp
->upper_setup
.tcp_fields
.tucso
;
407 tp
->tucse
= le16_to_cpu(xp
->upper_setup
.tcp_fields
.tucse
);
408 tp
->paylen
= op
& 0xfffff;
409 tp
->hdr_len
= xp
->tcp_seg_setup
.fields
.hdr_len
;
410 tp
->mss
= le16_to_cpu(xp
->tcp_seg_setup
.fields
.mss
);
411 tp
->ip
= (op
& E1000_TXD_CMD_IP
) ? 1 : 0;
412 tp
->tcp
= (op
& E1000_TXD_CMD_TCP
) ? 1 : 0;
413 tp
->tse
= (op
& E1000_TXD_CMD_TSE
) ? 1 : 0;
415 if (tp
->tucso
== 0) { // this is probably wrong
416 DBGOUT(TXSUM
, "TCP/UDP: cso 0!\n");
417 tp
->tucso
= tp
->tucss
+ (tp
->tcp
? 16 : 6);
420 } else if (dtype
== (E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
)) {
422 tp
->sum_needed
= le32_to_cpu(dp
->upper
.data
) >> 8;
423 tp
->cptse
= ( txd_lower
& E1000_TXD_CMD_TSE
) ? 1 : 0;
428 if (vlan_enabled(s
) && is_vlan_txd(txd_lower
) &&
429 (tp
->cptse
|| txd_lower
& E1000_TXD_CMD_EOP
)) {
431 cpu_to_be16wu((uint16_t *)(tp
->vlan_header
),
432 le16_to_cpup((uint16_t *)(s
->mac_reg
+ VET
)));
433 cpu_to_be16wu((uint16_t *)(tp
->vlan_header
+ 2),
434 le16_to_cpu(dp
->upper
.fields
.special
));
437 addr
= le64_to_cpu(dp
->buffer_addr
);
438 if (tp
->tse
&& tp
->cptse
) {
443 if (tp
->size
+ bytes
> msh
)
444 bytes
= msh
- tp
->size
;
445 cpu_physical_memory_read(addr
, tp
->data
+ tp
->size
, bytes
);
446 if ((sz
= tp
->size
+ bytes
) >= hdr
&& tp
->size
< hdr
)
447 memmove(tp
->header
, tp
->data
, hdr
);
452 memmove(tp
->data
, tp
->header
, hdr
);
455 } while (split_size
-= bytes
);
456 } else if (!tp
->tse
&& tp
->cptse
) {
457 // context descriptor TSE is not set, while data descriptor TSE is set
458 DBGOUT(TXERR
, "TCP segmentaion Error\n");
460 cpu_physical_memory_read(addr
, tp
->data
+ tp
->size
, split_size
);
461 tp
->size
+= split_size
;
464 if (!(txd_lower
& E1000_TXD_CMD_EOP
))
466 if (!(tp
->tse
&& tp
->cptse
&& tp
->size
< hdr
))
476 txdesc_writeback(target_phys_addr_t base
, struct e1000_tx_desc
*dp
)
478 uint32_t txd_upper
, txd_lower
= le32_to_cpu(dp
->lower
.data
);
480 if (!(txd_lower
& (E1000_TXD_CMD_RS
|E1000_TXD_CMD_RPS
)))
482 txd_upper
= (le32_to_cpu(dp
->upper
.data
) | E1000_TXD_STAT_DD
) &
483 ~(E1000_TXD_STAT_EC
| E1000_TXD_STAT_LC
| E1000_TXD_STAT_TU
);
484 dp
->upper
.data
= cpu_to_le32(txd_upper
);
485 cpu_physical_memory_write(base
+ ((char *)&dp
->upper
- (char *)dp
),
486 (void *)&dp
->upper
, sizeof(dp
->upper
));
487 return E1000_ICR_TXDW
;
491 start_xmit(E1000State
*s
)
493 target_phys_addr_t base
;
494 struct e1000_tx_desc desc
;
495 uint32_t tdh_start
= s
->mac_reg
[TDH
], cause
= E1000_ICS_TXQE
;
497 if (!(s
->mac_reg
[TCTL
] & E1000_TCTL_EN
)) {
498 DBGOUT(TX
, "tx disabled\n");
502 while (s
->mac_reg
[TDH
] != s
->mac_reg
[TDT
]) {
503 base
= ((uint64_t)s
->mac_reg
[TDBAH
] << 32) + s
->mac_reg
[TDBAL
] +
504 sizeof(struct e1000_tx_desc
) * s
->mac_reg
[TDH
];
505 cpu_physical_memory_read(base
, (void *)&desc
, sizeof(desc
));
507 DBGOUT(TX
, "index %d: %p : %x %x\n", s
->mac_reg
[TDH
],
508 (void *)(intptr_t)desc
.buffer_addr
, desc
.lower
.data
,
511 process_tx_desc(s
, &desc
);
512 cause
|= txdesc_writeback(base
, &desc
);
514 if (++s
->mac_reg
[TDH
] * sizeof(desc
) >= s
->mac_reg
[TDLEN
])
517 * the following could happen only if guest sw assigns
518 * bogus values to TDT/TDLEN.
519 * there's nothing too intelligent we could do about this.
521 if (s
->mac_reg
[TDH
] == tdh_start
) {
522 DBGOUT(TXERR
, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
523 tdh_start
, s
->mac_reg
[TDT
], s
->mac_reg
[TDLEN
]);
527 set_ics(s
, 0, cause
);
531 receive_filter(E1000State
*s
, const uint8_t *buf
, int size
)
533 static uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
534 static int mta_shift
[] = {4, 3, 2, 0};
535 uint32_t f
, rctl
= s
->mac_reg
[RCTL
], ra
[2], *rp
;
537 if (is_vlan_packet(s
, buf
) && vlan_rx_filter_enabled(s
)) {
538 uint16_t vid
= be16_to_cpup((uint16_t *)(buf
+ 14));
539 uint32_t vfta
= le32_to_cpup((uint32_t *)(s
->mac_reg
+ VFTA
) +
540 ((vid
>> 5) & 0x7f));
541 if ((vfta
& (1 << (vid
& 0x1f))) == 0)
545 if (rctl
& E1000_RCTL_UPE
) // promiscuous
548 if ((buf
[0] & 1) && (rctl
& E1000_RCTL_MPE
)) // promiscuous mcast
551 if ((rctl
& E1000_RCTL_BAM
) && !memcmp(buf
, bcast
, sizeof bcast
))
554 for (rp
= s
->mac_reg
+ RA
; rp
< s
->mac_reg
+ RA
+ 32; rp
+= 2) {
555 if (!(rp
[1] & E1000_RAH_AV
))
557 ra
[0] = cpu_to_le32(rp
[0]);
558 ra
[1] = cpu_to_le32(rp
[1]);
559 if (!memcmp(buf
, (uint8_t *)ra
, 6)) {
561 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
562 (int)(rp
- s
->mac_reg
- RA
)/2,
563 buf
[0], buf
[1], buf
[2], buf
[3], buf
[4], buf
[5]);
567 DBGOUT(RXFILTER
, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
568 buf
[0], buf
[1], buf
[2], buf
[3], buf
[4], buf
[5]);
570 f
= mta_shift
[(rctl
>> E1000_RCTL_MO_SHIFT
) & 3];
571 f
= (((buf
[5] << 8) | buf
[4]) >> f
) & 0xfff;
572 if (s
->mac_reg
[MTA
+ (f
>> 5)] & (1 << (f
& 0x1f)))
575 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
576 buf
[0], buf
[1], buf
[2], buf
[3], buf
[4], buf
[5],
577 (rctl
>> E1000_RCTL_MO_SHIFT
) & 3, f
>> 5,
578 s
->mac_reg
[MTA
+ (f
>> 5)]);
584 e1000_set_link_status(VLANClientState
*vc
)
586 E1000State
*s
= vc
->opaque
;
587 uint32_t old_status
= s
->mac_reg
[STATUS
];
590 s
->mac_reg
[STATUS
] &= ~E1000_STATUS_LU
;
592 s
->mac_reg
[STATUS
] |= E1000_STATUS_LU
;
594 if (s
->mac_reg
[STATUS
] != old_status
)
595 set_ics(s
, 0, E1000_ICR_LSC
);
599 e1000_can_receive(VLANClientState
*vc
)
601 E1000State
*s
= vc
->opaque
;
603 return (s
->mac_reg
[RCTL
] & E1000_RCTL_EN
);
607 e1000_receive(VLANClientState
*vc
, const uint8_t *buf
, size_t size
)
609 E1000State
*s
= vc
->opaque
;
610 struct e1000_rx_desc desc
;
611 target_phys_addr_t base
;
614 uint16_t vlan_special
= 0;
615 uint8_t vlan_status
= 0, vlan_offset
= 0;
617 if (!(s
->mac_reg
[RCTL
] & E1000_RCTL_EN
))
620 if (size
> s
->rxbuf_size
) {
621 DBGOUT(RX
, "packet too large for buffers (%lu > %d)\n",
622 (unsigned long)size
, s
->rxbuf_size
);
626 if (!receive_filter(s
, buf
, size
))
629 if (vlan_enabled(s
) && is_vlan_packet(s
, buf
)) {
630 vlan_special
= cpu_to_le16(be16_to_cpup((uint16_t *)(buf
+ 14)));
631 memmove((void *)(buf
+ 4), buf
, 12);
632 vlan_status
= E1000_RXD_STAT_VP
;
637 rdh_start
= s
->mac_reg
[RDH
];
638 size
+= 4; // for the header
640 if (s
->mac_reg
[RDH
] == s
->mac_reg
[RDT
] && s
->check_rxov
) {
641 set_ics(s
, 0, E1000_ICS_RXO
);
644 base
= ((uint64_t)s
->mac_reg
[RDBAH
] << 32) + s
->mac_reg
[RDBAL
] +
645 sizeof(desc
) * s
->mac_reg
[RDH
];
646 cpu_physical_memory_read(base
, (void *)&desc
, sizeof(desc
));
647 desc
.special
= vlan_special
;
648 desc
.status
|= (vlan_status
| E1000_RXD_STAT_DD
);
649 if (desc
.buffer_addr
) {
650 cpu_physical_memory_write(le64_to_cpu(desc
.buffer_addr
),
651 (void *)(buf
+ vlan_offset
), size
);
652 desc
.length
= cpu_to_le16(size
);
653 desc
.status
|= E1000_RXD_STAT_EOP
|E1000_RXD_STAT_IXSM
;
654 } else // as per intel docs; skip descriptors with null buf addr
655 DBGOUT(RX
, "Null RX descriptor!!\n");
656 cpu_physical_memory_write(base
, (void *)&desc
, sizeof(desc
));
658 if (++s
->mac_reg
[RDH
] * sizeof(desc
) >= s
->mac_reg
[RDLEN
])
661 /* see comment in start_xmit; same here */
662 if (s
->mac_reg
[RDH
] == rdh_start
) {
663 DBGOUT(RXERR
, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
664 rdh_start
, s
->mac_reg
[RDT
], s
->mac_reg
[RDLEN
]);
665 set_ics(s
, 0, E1000_ICS_RXO
);
668 } while (desc
.buffer_addr
== 0);
672 n
= s
->mac_reg
[TORL
];
673 if ((s
->mac_reg
[TORL
] += size
) < n
)
677 if ((rdt
= s
->mac_reg
[RDT
]) < s
->mac_reg
[RDH
])
678 rdt
+= s
->mac_reg
[RDLEN
] / sizeof(desc
);
679 if (((rdt
- s
->mac_reg
[RDH
]) * sizeof(desc
)) <= s
->mac_reg
[RDLEN
] >>
681 n
|= E1000_ICS_RXDMT0
;
689 mac_readreg(E1000State
*s
, int index
)
691 return s
->mac_reg
[index
];
695 mac_icr_read(E1000State
*s
, int index
)
697 uint32_t ret
= s
->mac_reg
[ICR
];
699 DBGOUT(INTERRUPT
, "ICR read: %x\n", ret
);
700 set_interrupt_cause(s
, 0, 0);
705 mac_read_clr4(E1000State
*s
, int index
)
707 uint32_t ret
= s
->mac_reg
[index
];
709 s
->mac_reg
[index
] = 0;
714 mac_read_clr8(E1000State
*s
, int index
)
716 uint32_t ret
= s
->mac_reg
[index
];
718 s
->mac_reg
[index
] = 0;
719 s
->mac_reg
[index
-1] = 0;
724 mac_writereg(E1000State
*s
, int index
, uint32_t val
)
726 s
->mac_reg
[index
] = val
;
730 set_rdt(E1000State
*s
, int index
, uint32_t val
)
733 s
->mac_reg
[index
] = val
& 0xffff;
737 set_16bit(E1000State
*s
, int index
, uint32_t val
)
739 s
->mac_reg
[index
] = val
& 0xffff;
743 set_dlen(E1000State
*s
, int index
, uint32_t val
)
745 s
->mac_reg
[index
] = val
& 0xfff80;
749 set_tctl(E1000State
*s
, int index
, uint32_t val
)
751 s
->mac_reg
[index
] = val
;
752 s
->mac_reg
[TDT
] &= 0xffff;
757 set_icr(E1000State
*s
, int index
, uint32_t val
)
759 DBGOUT(INTERRUPT
, "set_icr %x\n", val
);
760 set_interrupt_cause(s
, 0, s
->mac_reg
[ICR
] & ~val
);
764 set_imc(E1000State
*s
, int index
, uint32_t val
)
766 s
->mac_reg
[IMS
] &= ~val
;
771 set_ims(E1000State
*s
, int index
, uint32_t val
)
773 s
->mac_reg
[IMS
] |= val
;
777 #define getreg(x) [x] = mac_readreg
778 static uint32_t (*macreg_readops
[])(E1000State
*, int) = {
779 getreg(PBA
), getreg(RCTL
), getreg(TDH
), getreg(TXDCTL
),
780 getreg(WUFC
), getreg(TDT
), getreg(CTRL
), getreg(LEDCTL
),
781 getreg(MANC
), getreg(MDIC
), getreg(SWSM
), getreg(STATUS
),
782 getreg(TORL
), getreg(TOTL
), getreg(IMS
), getreg(TCTL
),
783 getreg(RDH
), getreg(RDT
), getreg(VET
),
785 [TOTH
] = mac_read_clr8
, [TORH
] = mac_read_clr8
, [GPRC
] = mac_read_clr4
,
786 [GPTC
] = mac_read_clr4
, [TPR
] = mac_read_clr4
, [TPT
] = mac_read_clr4
,
787 [ICR
] = mac_icr_read
, [EECD
] = get_eecd
, [EERD
] = flash_eerd_read
,
788 [CRCERRS
... MPC
] = &mac_readreg
,
789 [RA
... RA
+31] = &mac_readreg
,
790 [MTA
... MTA
+127] = &mac_readreg
,
791 [VFTA
... VFTA
+127] = &mac_readreg
,
793 enum { NREADOPS
= ARRAY_SIZE(macreg_readops
) };
795 #define putreg(x) [x] = mac_writereg
796 static void (*macreg_writeops
[])(E1000State
*, int, uint32_t) = {
797 putreg(PBA
), putreg(EERD
), putreg(SWSM
), putreg(WUFC
),
798 putreg(TDBAL
), putreg(TDBAH
), putreg(TXDCTL
), putreg(RDBAH
),
799 putreg(RDBAL
), putreg(LEDCTL
), putreg(VET
),
800 [TDLEN
] = set_dlen
, [RDLEN
] = set_dlen
, [TCTL
] = set_tctl
,
801 [TDT
] = set_tctl
, [MDIC
] = set_mdic
, [ICS
] = set_ics
,
802 [TDH
] = set_16bit
, [RDH
] = set_16bit
, [RDT
] = set_rdt
,
803 [IMC
] = set_imc
, [IMS
] = set_ims
, [ICR
] = set_icr
,
804 [EECD
] = set_eecd
, [RCTL
] = set_rx_control
, [CTRL
] = set_ctrl
,
805 [RA
... RA
+31] = &mac_writereg
,
806 [MTA
... MTA
+127] = &mac_writereg
,
807 [VFTA
... VFTA
+127] = &mac_writereg
,
809 enum { NWRITEOPS
= ARRAY_SIZE(macreg_writeops
) };
812 e1000_mmio_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
814 E1000State
*s
= opaque
;
815 unsigned int index
= (addr
& 0x1ffff) >> 2;
817 #ifdef TARGET_WORDS_BIGENDIAN
820 if (index
< NWRITEOPS
&& macreg_writeops
[index
])
821 macreg_writeops
[index
](s
, index
, val
);
822 else if (index
< NREADOPS
&& macreg_readops
[index
])
823 DBGOUT(MMIO
, "e1000_mmio_writel RO %x: 0x%04x\n", index
<<2, val
);
825 DBGOUT(UNKNOWN
, "MMIO unknown write addr=0x%08x,val=0x%08x\n",
830 e1000_mmio_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
832 // emulate hw without byte enables: no RMW
833 e1000_mmio_writel(opaque
, addr
& ~3,
834 (val
& 0xffff) << (8*(addr
& 3)));
838 e1000_mmio_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
840 // emulate hw without byte enables: no RMW
841 e1000_mmio_writel(opaque
, addr
& ~3,
842 (val
& 0xff) << (8*(addr
& 3)));
846 e1000_mmio_readl(void *opaque
, target_phys_addr_t addr
)
848 E1000State
*s
= opaque
;
849 unsigned int index
= (addr
& 0x1ffff) >> 2;
851 if (index
< NREADOPS
&& macreg_readops
[index
])
853 uint32_t val
= macreg_readops
[index
](s
, index
);
854 #ifdef TARGET_WORDS_BIGENDIAN
859 DBGOUT(UNKNOWN
, "MMIO unknown read addr=0x%08x\n", index
<<2);
864 e1000_mmio_readb(void *opaque
, target_phys_addr_t addr
)
866 return ((e1000_mmio_readl(opaque
, addr
& ~3)) >>
867 (8 * (addr
& 3))) & 0xff;
871 e1000_mmio_readw(void *opaque
, target_phys_addr_t addr
)
873 return ((e1000_mmio_readl(opaque
, addr
& ~3)) >>
874 (8 * (addr
& 3))) & 0xffff;
877 static const int mac_regtosave
[] = {
878 CTRL
, EECD
, EERD
, GPRC
, GPTC
, ICR
, ICS
, IMC
, IMS
,
879 LEDCTL
, MANC
, MDIC
, MPC
, PBA
, RCTL
, RDBAH
, RDBAL
, RDH
,
880 RDLEN
, RDT
, STATUS
, SWSM
, TCTL
, TDBAH
, TDBAL
, TDH
, TDLEN
,
881 TDT
, TORH
, TORL
, TOTH
, TOTL
, TPR
, TPT
, TXDCTL
, WUFC
,
884 enum { MAC_NSAVE
= ARRAY_SIZE(mac_regtosave
) };
886 static const struct {
889 } mac_regarraystosave
[] = { {32, RA
}, {128, MTA
}, {128, VFTA
} };
890 enum { MAC_NARRAYS
= ARRAY_SIZE(mac_regarraystosave
) };
893 nic_save(QEMUFile
*f
, void *opaque
)
895 E1000State
*s
= (E1000State
*)opaque
;
898 pci_device_save(&s
->dev
, f
);
900 qemu_put_be32s(f
, &s
->rxbuf_size
);
901 qemu_put_be32s(f
, &s
->rxbuf_min_shift
);
902 qemu_put_be32s(f
, &s
->eecd_state
.val_in
);
903 qemu_put_be16s(f
, &s
->eecd_state
.bitnum_in
);
904 qemu_put_be16s(f
, &s
->eecd_state
.bitnum_out
);
905 qemu_put_be16s(f
, &s
->eecd_state
.reading
);
906 qemu_put_be32s(f
, &s
->eecd_state
.old_eecd
);
907 qemu_put_8s(f
, &s
->tx
.ipcss
);
908 qemu_put_8s(f
, &s
->tx
.ipcso
);
909 qemu_put_be16s(f
, &s
->tx
.ipcse
);
910 qemu_put_8s(f
, &s
->tx
.tucss
);
911 qemu_put_8s(f
, &s
->tx
.tucso
);
912 qemu_put_be16s(f
, &s
->tx
.tucse
);
913 qemu_put_be32s(f
, &s
->tx
.paylen
);
914 qemu_put_8s(f
, &s
->tx
.hdr_len
);
915 qemu_put_be16s(f
, &s
->tx
.mss
);
916 qemu_put_be16s(f
, &s
->tx
.size
);
917 qemu_put_be16s(f
, &s
->tx
.tso_frames
);
918 qemu_put_8s(f
, &s
->tx
.sum_needed
);
919 qemu_put_s8s(f
, &s
->tx
.ip
);
920 qemu_put_s8s(f
, &s
->tx
.tcp
);
921 qemu_put_buffer(f
, s
->tx
.header
, sizeof s
->tx
.header
);
922 qemu_put_buffer(f
, s
->tx
.data
, sizeof s
->tx
.data
);
923 for (i
= 0; i
< 64; i
++)
924 qemu_put_be16s(f
, s
->eeprom_data
+ i
);
925 for (i
= 0; i
< 0x20; i
++)
926 qemu_put_be16s(f
, s
->phy_reg
+ i
);
927 for (i
= 0; i
< MAC_NSAVE
; i
++)
928 qemu_put_be32s(f
, s
->mac_reg
+ mac_regtosave
[i
]);
929 for (i
= 0; i
< MAC_NARRAYS
; i
++)
930 for (j
= 0; j
< mac_regarraystosave
[i
].size
; j
++)
932 s
->mac_reg
+ mac_regarraystosave
[i
].array0
+ j
);
936 nic_load(QEMUFile
*f
, void *opaque
, int version_id
)
938 E1000State
*s
= (E1000State
*)opaque
;
941 if ((ret
= pci_device_load(&s
->dev
, f
)) < 0)
944 qemu_get_sbe32s(f
, &i
); /* once some unused instance id */
945 qemu_get_be32(f
); /* Ignored. Was mmio_base. */
946 qemu_get_be32s(f
, &s
->rxbuf_size
);
947 qemu_get_be32s(f
, &s
->rxbuf_min_shift
);
948 qemu_get_be32s(f
, &s
->eecd_state
.val_in
);
949 qemu_get_be16s(f
, &s
->eecd_state
.bitnum_in
);
950 qemu_get_be16s(f
, &s
->eecd_state
.bitnum_out
);
951 qemu_get_be16s(f
, &s
->eecd_state
.reading
);
952 qemu_get_be32s(f
, &s
->eecd_state
.old_eecd
);
953 qemu_get_8s(f
, &s
->tx
.ipcss
);
954 qemu_get_8s(f
, &s
->tx
.ipcso
);
955 qemu_get_be16s(f
, &s
->tx
.ipcse
);
956 qemu_get_8s(f
, &s
->tx
.tucss
);
957 qemu_get_8s(f
, &s
->tx
.tucso
);
958 qemu_get_be16s(f
, &s
->tx
.tucse
);
959 qemu_get_be32s(f
, &s
->tx
.paylen
);
960 qemu_get_8s(f
, &s
->tx
.hdr_len
);
961 qemu_get_be16s(f
, &s
->tx
.mss
);
962 qemu_get_be16s(f
, &s
->tx
.size
);
963 qemu_get_be16s(f
, &s
->tx
.tso_frames
);
964 qemu_get_8s(f
, &s
->tx
.sum_needed
);
965 qemu_get_s8s(f
, &s
->tx
.ip
);
966 qemu_get_s8s(f
, &s
->tx
.tcp
);
967 qemu_get_buffer(f
, s
->tx
.header
, sizeof s
->tx
.header
);
968 qemu_get_buffer(f
, s
->tx
.data
, sizeof s
->tx
.data
);
969 for (i
= 0; i
< 64; i
++)
970 qemu_get_be16s(f
, s
->eeprom_data
+ i
);
971 for (i
= 0; i
< 0x20; i
++)
972 qemu_get_be16s(f
, s
->phy_reg
+ i
);
973 for (i
= 0; i
< MAC_NSAVE
; i
++)
974 qemu_get_be32s(f
, s
->mac_reg
+ mac_regtosave
[i
]);
975 for (i
= 0; i
< MAC_NARRAYS
; i
++)
976 for (j
= 0; j
< mac_regarraystosave
[i
].size
; j
++)
978 s
->mac_reg
+ mac_regarraystosave
[i
].array0
+ j
);
982 static const uint16_t e1000_eeprom_template
[64] = {
983 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
984 0x3000, 0x1000, 0x6403, E1000_DEVID
, 0x8086, E1000_DEVID
, 0x8086, 0x3040,
985 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
986 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
987 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
988 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
989 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
990 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
993 static const uint16_t phy_reg_init
[] = {
994 [PHY_CTRL
] = 0x1140, [PHY_STATUS
] = 0x796d, // link initially up
995 [PHY_ID1
] = 0x141, [PHY_ID2
] = PHY_ID2_INIT
,
996 [PHY_1000T_CTRL
] = 0x0e00, [M88E1000_PHY_SPEC_CTRL
] = 0x360,
997 [M88E1000_EXT_PHY_SPEC_CTRL
] = 0x0d60, [PHY_AUTONEG_ADV
] = 0xde1,
998 [PHY_LP_ABILITY
] = 0x1e0, [PHY_1000T_STATUS
] = 0x3c00,
999 [M88E1000_PHY_SPEC_STATUS
] = 0xac00,
1002 static const uint32_t mac_reg_init
[] = {
1005 [CTRL
] = E1000_CTRL_SWDPIN2
| E1000_CTRL_SWDPIN0
|
1006 E1000_CTRL_SPD_1000
| E1000_CTRL_SLU
,
1007 [STATUS
] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE
|
1008 E1000_STATUS_ASDV
| E1000_STATUS_MTXCKOK
|
1009 E1000_STATUS_SPEED_1000
| E1000_STATUS_FD
|
1011 [MANC
] = E1000_MANC_EN_MNG2HOST
| E1000_MANC_RCV_TCO_EN
|
1012 E1000_MANC_ARP_EN
| E1000_MANC_0298_EN
|
1018 static CPUWriteMemoryFunc
*e1000_mmio_write
[] = {
1019 e1000_mmio_writeb
, e1000_mmio_writew
, e1000_mmio_writel
1022 static CPUReadMemoryFunc
*e1000_mmio_read
[] = {
1023 e1000_mmio_readb
, e1000_mmio_readw
, e1000_mmio_readl
1027 e1000_mmio_map(PCIDevice
*pci_dev
, int region_num
,
1028 uint32_t addr
, uint32_t size
, int type
)
1030 E1000State
*d
= (E1000State
*)pci_dev
;
1032 const uint32_t excluded_regs
[] = {
1033 E1000_MDIC
, E1000_ICR
, E1000_ICS
, E1000_IMS
,
1034 E1000_IMC
, E1000_TCTL
, E1000_TDT
, PNPMMIO_SIZE
1038 DBGOUT(MMIO
, "e1000_mmio_map addr=0x%08x 0x%08x\n", addr
, size
);
1040 cpu_register_physical_memory(addr
, PNPMMIO_SIZE
, d
->mmio_index
);
1041 qemu_register_coalesced_mmio(addr
, excluded_regs
[0]);
1043 for (i
= 0; excluded_regs
[i
] != PNPMMIO_SIZE
; i
++)
1044 qemu_register_coalesced_mmio(addr
+ excluded_regs
[i
] + 4,
1045 excluded_regs
[i
+ 1] -
1046 excluded_regs
[i
] - 4);
1050 e1000_cleanup(VLANClientState
*vc
)
1052 E1000State
*d
= vc
->opaque
;
1054 unregister_savevm("e1000", d
);
1058 pci_e1000_uninit(PCIDevice
*dev
)
1060 E1000State
*d
= (E1000State
*) dev
;
1062 cpu_unregister_io_memory(d
->mmio_index
);
1067 static void e1000_reset(void *opaque
)
1069 E1000State
*d
= opaque
;
1071 memset(d
->phy_reg
, 0, sizeof d
->phy_reg
);
1072 memmove(d
->phy_reg
, phy_reg_init
, sizeof phy_reg_init
);
1073 memset(d
->mac_reg
, 0, sizeof d
->mac_reg
);
1074 memmove(d
->mac_reg
, mac_reg_init
, sizeof mac_reg_init
);
1075 d
->rxbuf_min_shift
= 1;
1076 memset(&d
->tx
, 0, sizeof d
->tx
);
1079 static void pci_e1000_init(PCIDevice
*pci_dev
)
1081 E1000State
*d
= (E1000State
*)pci_dev
;
1083 uint16_t checksum
= 0;
1084 static const char info_str
[] = "e1000";
1088 pci_conf
= d
->dev
.config
;
1090 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_INTEL
);
1091 pci_config_set_device_id(pci_conf
, E1000_DEVID
);
1092 *(uint16_t *)(pci_conf
+0x04) = cpu_to_le16(0x0407);
1093 *(uint16_t *)(pci_conf
+0x06) = cpu_to_le16(0x0010);
1094 pci_conf
[0x08] = 0x03;
1095 pci_config_set_class(pci_conf
, PCI_CLASS_NETWORK_ETHERNET
);
1096 pci_conf
[0x0c] = 0x10;
1098 pci_conf
[0x3d] = 1; // interrupt pin 0
1100 d
->mmio_index
= cpu_register_io_memory(e1000_mmio_read
,
1101 e1000_mmio_write
, d
);
1103 pci_register_bar((PCIDevice
*)d
, 0, PNPMMIO_SIZE
,
1104 PCI_ADDRESS_SPACE_MEM
, e1000_mmio_map
);
1106 pci_register_bar((PCIDevice
*)d
, 1, IOPORT_SIZE
,
1107 PCI_ADDRESS_SPACE_IO
, ioport_map
);
1109 memmove(d
->eeprom_data
, e1000_eeprom_template
,
1110 sizeof e1000_eeprom_template
);
1111 qdev_get_macaddr(&d
->dev
.qdev
, macaddr
);
1112 for (i
= 0; i
< 3; i
++)
1113 d
->eeprom_data
[i
] = (macaddr
[2*i
+1]<<8) | macaddr
[2*i
];
1114 for (i
= 0; i
< EEPROM_CHECKSUM_REG
; i
++)
1115 checksum
+= d
->eeprom_data
[i
];
1116 checksum
= (uint16_t) EEPROM_SUM
- checksum
;
1117 d
->eeprom_data
[EEPROM_CHECKSUM_REG
] = checksum
;
1119 d
->vc
= qdev_get_vlan_client(&d
->dev
.qdev
,
1120 e1000_can_receive
, e1000_receive
,
1121 NULL
, e1000_cleanup
, d
);
1122 d
->vc
->link_status_changed
= e1000_set_link_status
;
1124 qemu_format_nic_info_str(d
->vc
, macaddr
);
1126 register_savevm(info_str
, -1, 2, nic_save
, nic_load
, d
);
1127 d
->dev
.unregister
= pci_e1000_uninit
;
1128 qemu_register_reset(e1000_reset
, d
);
1132 static PCIDeviceInfo e1000_info
= {
1133 .qdev
.name
= "e1000",
1134 .qdev
.size
= sizeof(E1000State
),
1135 .init
= pci_e1000_init
,
1138 static void e1000_register_devices(void)
1140 pci_qdev_register(&e1000_info
);
1143 device_init(e1000_register_devices
)