2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
4 * Copyright (c) 2001-2014, Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
36 * This code is derived from software contributed to The DragonFly Project
37 * by Matthew Dillon <dillon@backplane.com>
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
49 * 3. Neither the name of The DragonFly Project nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific, prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SERIALIZATION API RULES:
70 * - We must call lwkt_serialize_handler_enable() prior to enabling the
71 * hardware interrupt and lwkt_serialize_handler_disable() after disabling
72 * the hardware interrupt in order to avoid handler execution races from
73 * scheduled interrupt threads.
76 #include "opt_ifpoll.h"
78 #include <sys/param.h>
80 #include <sys/endian.h>
81 #include <sys/interrupt.h>
82 #include <sys/kernel.h>
84 #include <sys/malloc.h>
88 #include <sys/serialize.h>
89 #include <sys/socket.h>
90 #include <sys/sockio.h>
91 #include <sys/sysctl.h>
92 #include <sys/systm.h>
95 #include <net/ethernet.h>
97 #include <net/if_arp.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 #include <net/if_poll.h>
101 #include <net/ifq_var.h>
102 #include <net/vlan/if_vlan_var.h>
103 #include <net/vlan/if_vlan_ether.h>
105 #include <netinet/ip.h>
106 #include <netinet/tcp.h>
107 #include <netinet/udp.h>
109 #include <bus/pci/pcivar.h>
110 #include <bus/pci/pcireg.h>
112 #include <dev/netif/ig_hal/e1000_api.h>
113 #include <dev/netif/ig_hal/e1000_82571.h>
114 #include <dev/netif/ig_hal/e1000_dragonfly.h>
115 #include <dev/netif/em/if_em.h>
119 #define EM_NAME "Intel(R) PRO/1000 Network Connection "
120 #define EM_VER " 7.4.2"
122 #define _EM_DEVICE(id, ret) \
123 { EM_VENDOR_ID, E1000_DEV_ID_##id, ret, EM_NAME #id EM_VER }
124 #define EM_EMX_DEVICE(id) _EM_DEVICE(id, -100)
125 #define EM_DEVICE(id) _EM_DEVICE(id, 0)
126 #define EM_DEVICE_NULL { 0, 0, 0, NULL }
128 static const struct em_vendor_info em_vendor_info_array
[] = {
130 EM_DEVICE(82540EM_LOM
),
132 EM_DEVICE(82540EP_LOM
),
133 EM_DEVICE(82540EP_LP
),
137 EM_DEVICE(82541ER_LOM
),
138 EM_DEVICE(82541EI_MOBILE
),
140 EM_DEVICE(82541GI_LF
),
141 EM_DEVICE(82541GI_MOBILE
),
145 EM_DEVICE(82543GC_FIBER
),
146 EM_DEVICE(82543GC_COPPER
),
148 EM_DEVICE(82544EI_COPPER
),
149 EM_DEVICE(82544EI_FIBER
),
150 EM_DEVICE(82544GC_COPPER
),
151 EM_DEVICE(82544GC_LOM
),
153 EM_DEVICE(82545EM_COPPER
),
154 EM_DEVICE(82545EM_FIBER
),
155 EM_DEVICE(82545GM_COPPER
),
156 EM_DEVICE(82545GM_FIBER
),
157 EM_DEVICE(82545GM_SERDES
),
159 EM_DEVICE(82546EB_COPPER
),
160 EM_DEVICE(82546EB_FIBER
),
161 EM_DEVICE(82546EB_QUAD_COPPER
),
162 EM_DEVICE(82546GB_COPPER
),
163 EM_DEVICE(82546GB_FIBER
),
164 EM_DEVICE(82546GB_SERDES
),
165 EM_DEVICE(82546GB_PCIE
),
166 EM_DEVICE(82546GB_QUAD_COPPER
),
167 EM_DEVICE(82546GB_QUAD_COPPER_KSP3
),
170 EM_DEVICE(82547EI_MOBILE
),
173 EM_EMX_DEVICE(82571EB_COPPER
),
174 EM_EMX_DEVICE(82571EB_FIBER
),
175 EM_EMX_DEVICE(82571EB_SERDES
),
176 EM_EMX_DEVICE(82571EB_SERDES_DUAL
),
177 EM_EMX_DEVICE(82571EB_SERDES_QUAD
),
178 EM_EMX_DEVICE(82571EB_QUAD_COPPER
),
179 EM_EMX_DEVICE(82571EB_QUAD_COPPER_BP
),
180 EM_EMX_DEVICE(82571EB_QUAD_COPPER_LP
),
181 EM_EMX_DEVICE(82571EB_QUAD_FIBER
),
182 EM_EMX_DEVICE(82571PT_QUAD_COPPER
),
184 EM_EMX_DEVICE(82572EI_COPPER
),
185 EM_EMX_DEVICE(82572EI_FIBER
),
186 EM_EMX_DEVICE(82572EI_SERDES
),
187 EM_EMX_DEVICE(82572EI
),
189 EM_EMX_DEVICE(82573E
),
190 EM_EMX_DEVICE(82573E_IAMT
),
191 EM_EMX_DEVICE(82573L),
195 EM_EMX_DEVICE(80003ES2LAN_COPPER_SPT
),
196 EM_EMX_DEVICE(80003ES2LAN_SERDES_SPT
),
197 EM_EMX_DEVICE(80003ES2LAN_COPPER_DPT
),
198 EM_EMX_DEVICE(80003ES2LAN_SERDES_DPT
),
200 EM_DEVICE(ICH8_IGP_M_AMT
),
201 EM_DEVICE(ICH8_IGP_AMT
),
202 EM_DEVICE(ICH8_IGP_C
),
204 EM_DEVICE(ICH8_IFE_GT
),
205 EM_DEVICE(ICH8_IFE_G
),
206 EM_DEVICE(ICH8_IGP_M
),
207 EM_DEVICE(ICH8_82567V_3
),
209 EM_DEVICE(ICH9_IGP_M_AMT
),
210 EM_DEVICE(ICH9_IGP_AMT
),
211 EM_DEVICE(ICH9_IGP_C
),
212 EM_DEVICE(ICH9_IGP_M
),
213 EM_DEVICE(ICH9_IGP_M_V
),
215 EM_DEVICE(ICH9_IFE_GT
),
216 EM_DEVICE(ICH9_IFE_G
),
219 EM_EMX_DEVICE(82574L),
220 EM_EMX_DEVICE(82574LA
),
222 EM_DEVICE(ICH10_R_BM_LM
),
223 EM_DEVICE(ICH10_R_BM_LF
),
224 EM_DEVICE(ICH10_R_BM_V
),
225 EM_DEVICE(ICH10_D_BM_LM
),
226 EM_DEVICE(ICH10_D_BM_LF
),
227 EM_DEVICE(ICH10_D_BM_V
),
229 EM_DEVICE(PCH_M_HV_LM
),
230 EM_DEVICE(PCH_M_HV_LC
),
231 EM_DEVICE(PCH_D_HV_DM
),
232 EM_DEVICE(PCH_D_HV_DC
),
234 EM_DEVICE(PCH2_LV_LM
),
235 EM_DEVICE(PCH2_LV_V
),
237 EM_EMX_DEVICE(PCH_LPT_I217_LM
),
238 EM_EMX_DEVICE(PCH_LPT_I217_V
),
239 EM_EMX_DEVICE(PCH_LPTLP_I218_LM
),
240 EM_EMX_DEVICE(PCH_LPTLP_I218_V
),
241 EM_EMX_DEVICE(PCH_I218_LM2
),
242 EM_EMX_DEVICE(PCH_I218_V2
),
243 EM_EMX_DEVICE(PCH_I218_LM3
),
244 EM_EMX_DEVICE(PCH_I218_V3
),
245 EM_EMX_DEVICE(PCH_SPT_I219_LM
),
246 EM_EMX_DEVICE(PCH_SPT_I219_V
),
247 EM_EMX_DEVICE(PCH_SPT_I219_LM2
),
248 EM_EMX_DEVICE(PCH_SPT_I219_V2
),
250 /* required last entry */
254 static int em_probe(device_t
);
255 static int em_attach(device_t
);
256 static int em_detach(device_t
);
257 static int em_shutdown(device_t
);
258 static int em_suspend(device_t
);
259 static int em_resume(device_t
);
261 static void em_init(void *);
262 static void em_stop(struct adapter
*);
263 static int em_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
264 static void em_start(struct ifnet
*, struct ifaltq_subque
*);
266 static void em_npoll(struct ifnet
*, struct ifpoll_info
*);
267 static void em_npoll_compat(struct ifnet
*, void *, int);
269 static void em_watchdog(struct ifnet
*);
270 static void em_media_status(struct ifnet
*, struct ifmediareq
*);
271 static int em_media_change(struct ifnet
*);
272 static void em_timer(void *);
274 static void em_intr(void *);
275 static void em_intr_mask(void *);
276 static void em_intr_body(struct adapter
*, boolean_t
);
277 static void em_rxeof(struct adapter
*, int);
278 static void em_txeof(struct adapter
*);
279 static void em_tx_collect(struct adapter
*);
280 static void em_tx_purge(struct adapter
*);
281 static void em_enable_intr(struct adapter
*);
282 static void em_disable_intr(struct adapter
*);
284 static int em_dma_malloc(struct adapter
*, bus_size_t
,
285 struct em_dma_alloc
*);
286 static void em_dma_free(struct adapter
*, struct em_dma_alloc
*);
287 static void em_init_tx_ring(struct adapter
*);
288 static int em_init_rx_ring(struct adapter
*);
289 static int em_create_tx_ring(struct adapter
*);
290 static int em_create_rx_ring(struct adapter
*);
291 static void em_destroy_tx_ring(struct adapter
*, int);
292 static void em_destroy_rx_ring(struct adapter
*, int);
293 static int em_newbuf(struct adapter
*, int, int);
294 static int em_encap(struct adapter
*, struct mbuf
**, int *, int *);
295 static void em_rxcsum(struct adapter
*, struct e1000_rx_desc
*,
297 static int em_txcsum(struct adapter
*, struct mbuf
*,
298 uint32_t *, uint32_t *);
299 static int em_tso_pullup(struct adapter
*, struct mbuf
**);
300 static int em_tso_setup(struct adapter
*, struct mbuf
*,
301 uint32_t *, uint32_t *);
303 static int em_get_hw_info(struct adapter
*);
304 static int em_is_valid_eaddr(const uint8_t *);
305 static int em_alloc_pci_res(struct adapter
*);
306 static void em_free_pci_res(struct adapter
*);
307 static int em_reset(struct adapter
*);
308 static void em_setup_ifp(struct adapter
*);
309 static void em_init_tx_unit(struct adapter
*);
310 static void em_init_rx_unit(struct adapter
*);
311 static void em_update_stats(struct adapter
*);
312 static void em_set_promisc(struct adapter
*);
313 static void em_disable_promisc(struct adapter
*);
314 static void em_set_multi(struct adapter
*);
315 static void em_update_link_status(struct adapter
*);
316 static void em_smartspeed(struct adapter
*);
317 static void em_set_itr(struct adapter
*, uint32_t);
318 static void em_disable_aspm(struct adapter
*);
320 /* Hardware workarounds */
321 static int em_82547_fifo_workaround(struct adapter
*, int);
322 static void em_82547_update_fifo_head(struct adapter
*, int);
323 static int em_82547_tx_fifo_reset(struct adapter
*);
324 static void em_82547_move_tail(void *);
325 static void em_82547_move_tail_serialized(struct adapter
*);
326 static uint32_t em_82544_fill_desc(bus_addr_t
, uint32_t, PDESC_ARRAY
);
328 static void em_print_debug_info(struct adapter
*);
329 static void em_print_nvm_info(struct adapter
*);
330 static void em_print_hw_stats(struct adapter
*);
332 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS
);
333 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS
);
334 static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS
);
335 static int em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS
);
336 static void em_add_sysctl(struct adapter
*adapter
);
338 /* Management and WOL Support */
339 static void em_get_mgmt(struct adapter
*);
340 static void em_rel_mgmt(struct adapter
*);
341 static void em_get_hw_control(struct adapter
*);
342 static void em_rel_hw_control(struct adapter
*);
343 static void em_enable_wol(device_t
);
345 static device_method_t em_methods
[] = {
346 /* Device interface */
347 DEVMETHOD(device_probe
, em_probe
),
348 DEVMETHOD(device_attach
, em_attach
),
349 DEVMETHOD(device_detach
, em_detach
),
350 DEVMETHOD(device_shutdown
, em_shutdown
),
351 DEVMETHOD(device_suspend
, em_suspend
),
352 DEVMETHOD(device_resume
, em_resume
),
356 static driver_t em_driver
= {
359 sizeof(struct adapter
),
362 static devclass_t em_devclass
;
364 DECLARE_DUMMY_MODULE(if_em
);
365 MODULE_DEPEND(em
, ig_hal
, 1, 1, 1);
366 DRIVER_MODULE(if_em
, pci
, em_driver
, em_devclass
, NULL
, NULL
);
371 static int em_int_throttle_ceil
= EM_DEFAULT_ITR
;
372 static int em_rxd
= EM_DEFAULT_RXD
;
373 static int em_txd
= EM_DEFAULT_TXD
;
374 static int em_smart_pwr_down
= 0;
376 /* Controls whether promiscuous also shows bad packets */
377 static int em_debug_sbp
= FALSE
;
379 static int em_82573_workaround
= 1;
380 static int em_msi_enable
= 1;
382 static char em_flowctrl
[IFM_ETH_FC_STRLEN
] = IFM_ETH_FC_RXPAUSE
;
384 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil
);
385 TUNABLE_INT("hw.em.rxd", &em_rxd
);
386 TUNABLE_INT("hw.em.txd", &em_txd
);
387 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down
);
388 TUNABLE_INT("hw.em.sbp", &em_debug_sbp
);
389 TUNABLE_INT("hw.em.82573_workaround", &em_82573_workaround
);
390 TUNABLE_INT("hw.em.msi.enable", &em_msi_enable
);
391 TUNABLE_STR("hw.em.flow_ctrl", em_flowctrl
, sizeof(em_flowctrl
));
393 /* Global used in WOL setup with multiport cards */
394 static int em_global_quad_port_a
= 0;
396 /* Set this to one to display debug statistics */
397 static int em_display_debug_stats
= 0;
399 #if !defined(KTR_IF_EM)
400 #define KTR_IF_EM KTR_ALL
402 KTR_INFO_MASTER(if_em
);
403 KTR_INFO(KTR_IF_EM
, if_em
, intr_beg
, 0, "intr begin");
404 KTR_INFO(KTR_IF_EM
, if_em
, intr_end
, 1, "intr end");
405 KTR_INFO(KTR_IF_EM
, if_em
, pkt_receive
, 4, "rx packet");
406 KTR_INFO(KTR_IF_EM
, if_em
, pkt_txqueue
, 5, "tx packet");
407 KTR_INFO(KTR_IF_EM
, if_em
, pkt_txclean
, 6, "tx clean");
408 #define logif(name) KTR_LOG(if_em_ ## name)
411 em_probe(device_t dev
)
413 const struct em_vendor_info
*ent
;
416 vid
= pci_get_vendor(dev
);
417 did
= pci_get_device(dev
);
419 for (ent
= em_vendor_info_array
; ent
->desc
!= NULL
; ++ent
) {
420 if (vid
== ent
->vendor_id
&& did
== ent
->device_id
) {
421 device_set_desc(dev
, ent
->desc
);
422 device_set_async_attach(dev
, TRUE
);
430 em_attach(device_t dev
)
432 struct adapter
*adapter
= device_get_softc(dev
);
433 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
436 uint16_t eeprom_data
, device_id
, apme_mask
;
437 driver_intr_t
*intr_func
;
438 char flowctrl
[IFM_ETH_FC_STRLEN
];
440 adapter
->dev
= adapter
->osdep
.dev
= dev
;
442 callout_init_mp(&adapter
->timer
);
443 callout_init_mp(&adapter
->tx_fifo_timer
);
445 ifmedia_init(&adapter
->media
, IFM_IMASK
| IFM_ETH_FCMASK
,
446 em_media_change
, em_media_status
);
448 /* Determine hardware and mac info */
449 error
= em_get_hw_info(adapter
);
451 device_printf(dev
, "Identify hardware failed\n");
455 /* Setup PCI resources */
456 error
= em_alloc_pci_res(adapter
);
458 device_printf(dev
, "Allocation of PCI resources failed\n");
463 * For ICH8 and family we need to map the flash memory,
464 * and this must happen after the MAC is identified.
466 * (SPT does not map the flash with a separate BAR)
468 if (adapter
->hw
.mac
.type
== e1000_ich8lan
||
469 adapter
->hw
.mac
.type
== e1000_ich9lan
||
470 adapter
->hw
.mac
.type
== e1000_ich10lan
||
471 adapter
->hw
.mac
.type
== e1000_pchlan
||
472 adapter
->hw
.mac
.type
== e1000_pch2lan
||
473 adapter
->hw
.mac
.type
== e1000_pch_lpt
) {
474 adapter
->flash_rid
= EM_BAR_FLASH
;
476 adapter
->flash
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
,
477 &adapter
->flash_rid
, RF_ACTIVE
);
478 if (adapter
->flash
== NULL
) {
479 device_printf(dev
, "Mapping of Flash failed\n");
483 adapter
->osdep
.flash_bus_space_tag
=
484 rman_get_bustag(adapter
->flash
);
485 adapter
->osdep
.flash_bus_space_handle
=
486 rman_get_bushandle(adapter
->flash
);
489 * This is used in the shared code
490 * XXX this goof is actually not used.
492 adapter
->hw
.flash_address
= (uint8_t *)adapter
->flash
;
495 switch (adapter
->hw
.mac
.type
) {
501 * Pullup extra 4bytes into the first data segment for
503 * 82571/82572 specification update errata #7
505 * Same applies to I217 (and maybe I218 and I219).
508 * 4bytes instead of 2bytes, which are mentioned in the
509 * errata, are pulled; mainly to keep rest of the data
512 adapter
->flags
|= EM_FLAG_TSO_PULLEX
;
516 if (pci_is_pcie(dev
))
517 adapter
->flags
|= EM_FLAG_TSO
;
521 /* Do Shared Code initialization */
522 if (e1000_setup_init_funcs(&adapter
->hw
, TRUE
)) {
523 device_printf(dev
, "Setup of Shared code failed\n");
528 e1000_get_bus_info(&adapter
->hw
);
531 * Validate number of transmit and receive descriptors. It
532 * must not exceed hardware maximum, and must be multiple
533 * of E1000_DBA_ALIGN.
535 if ((em_txd
* sizeof(struct e1000_tx_desc
)) % EM_DBA_ALIGN
!= 0 ||
536 (adapter
->hw
.mac
.type
>= e1000_82544
&& em_txd
> EM_MAX_TXD
) ||
537 (adapter
->hw
.mac
.type
< e1000_82544
&& em_txd
> EM_MAX_TXD_82543
) ||
538 em_txd
< EM_MIN_TXD
) {
539 if (adapter
->hw
.mac
.type
< e1000_82544
)
540 adapter
->num_tx_desc
= EM_MAX_TXD_82543
;
542 adapter
->num_tx_desc
= EM_DEFAULT_TXD
;
543 device_printf(dev
, "Using %d TX descriptors instead of %d!\n",
544 adapter
->num_tx_desc
, em_txd
);
546 adapter
->num_tx_desc
= em_txd
;
548 if ((em_rxd
* sizeof(struct e1000_rx_desc
)) % EM_DBA_ALIGN
!= 0 ||
549 (adapter
->hw
.mac
.type
>= e1000_82544
&& em_rxd
> EM_MAX_RXD
) ||
550 (adapter
->hw
.mac
.type
< e1000_82544
&& em_rxd
> EM_MAX_RXD_82543
) ||
551 em_rxd
< EM_MIN_RXD
) {
552 if (adapter
->hw
.mac
.type
< e1000_82544
)
553 adapter
->num_rx_desc
= EM_MAX_RXD_82543
;
555 adapter
->num_rx_desc
= EM_DEFAULT_RXD
;
556 device_printf(dev
, "Using %d RX descriptors instead of %d!\n",
557 adapter
->num_rx_desc
, em_rxd
);
559 adapter
->num_rx_desc
= em_rxd
;
562 adapter
->hw
.mac
.autoneg
= DO_AUTO_NEG
;
563 adapter
->hw
.phy
.autoneg_wait_to_complete
= FALSE
;
564 adapter
->hw
.phy
.autoneg_advertised
= AUTONEG_ADV_DEFAULT
;
565 adapter
->rx_buffer_len
= MCLBYTES
;
568 * Interrupt throttle rate
570 if (em_int_throttle_ceil
== 0) {
571 adapter
->int_throttle_ceil
= 0;
573 int throttle
= em_int_throttle_ceil
;
576 throttle
= EM_DEFAULT_ITR
;
578 /* Recalculate the tunable value to get the exact frequency. */
579 throttle
= 1000000000 / 256 / throttle
;
581 /* Upper 16bits of ITR is reserved and should be zero */
582 if (throttle
& 0xffff0000)
583 throttle
= 1000000000 / 256 / EM_DEFAULT_ITR
;
585 adapter
->int_throttle_ceil
= 1000000000 / 256 / throttle
;
588 e1000_init_script_state_82541(&adapter
->hw
, TRUE
);
589 e1000_set_tbi_compatibility_82543(&adapter
->hw
, TRUE
);
592 if (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
) {
593 adapter
->hw
.phy
.mdix
= AUTO_ALL_MODES
;
594 adapter
->hw
.phy
.disable_polarity_correction
= FALSE
;
595 adapter
->hw
.phy
.ms_type
= EM_MASTER_SLAVE
;
598 /* Set the frame limits assuming standard ethernet sized frames. */
599 adapter
->hw
.mac
.max_frame_size
=
600 ETHERMTU
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
;
601 adapter
->min_frame_size
= ETH_ZLEN
+ ETHER_CRC_LEN
;
603 /* This controls when hardware reports transmit completion status. */
604 adapter
->hw
.mac
.report_tx_early
= 1;
607 * Create top level busdma tag
609 error
= bus_dma_tag_create(NULL
, 1, 0,
610 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
612 BUS_SPACE_MAXSIZE_32BIT
, 0, BUS_SPACE_MAXSIZE_32BIT
,
613 0, &adapter
->parent_dtag
);
615 device_printf(dev
, "could not create top level DMA tag\n");
620 * Allocate Transmit Descriptor ring
622 tsize
= roundup2(adapter
->num_tx_desc
* sizeof(struct e1000_tx_desc
),
624 error
= em_dma_malloc(adapter
, tsize
, &adapter
->txdma
);
626 device_printf(dev
, "Unable to allocate tx_desc memory\n");
629 adapter
->tx_desc_base
= adapter
->txdma
.dma_vaddr
;
632 * Allocate Receive Descriptor ring
634 rsize
= roundup2(adapter
->num_rx_desc
* sizeof(struct e1000_rx_desc
),
636 error
= em_dma_malloc(adapter
, rsize
, &adapter
->rxdma
);
638 device_printf(dev
, "Unable to allocate rx_desc memory\n");
641 adapter
->rx_desc_base
= adapter
->rxdma
.dma_vaddr
;
643 /* Allocate multicast array memory. */
644 adapter
->mta
= kmalloc(ETH_ADDR_LEN
* MAX_NUM_MULTICAST_ADDRESSES
,
647 /* Indicate SOL/IDER usage */
648 if (e1000_check_reset_block(&adapter
->hw
)) {
650 "PHY reset is blocked due to SOL/IDER session.\n");
654 adapter
->hw
.dev_spec
.ich8lan
.eee_disable
= 1;
657 * Start from a known state, this is important in reading the
658 * nvm and mac from that.
660 e1000_reset_hw(&adapter
->hw
);
662 /* Make sure we have a good EEPROM before we read from it */
663 if (e1000_validate_nvm_checksum(&adapter
->hw
) < 0) {
665 * Some PCI-E parts fail the first check due to
666 * the link being in sleep state, call it again,
667 * if it fails a second time its a real issue.
669 if (e1000_validate_nvm_checksum(&adapter
->hw
) < 0) {
671 "The EEPROM Checksum Is Not Valid\n");
677 /* Copy the permanent MAC address out of the EEPROM */
678 if (e1000_read_mac_addr(&adapter
->hw
) < 0) {
679 device_printf(dev
, "EEPROM read error while reading MAC"
684 if (!em_is_valid_eaddr(adapter
->hw
.mac
.addr
)) {
685 device_printf(dev
, "Invalid MAC address\n");
690 /* Disable ULP support */
691 e1000_disable_ulp_lpt_lp(&adapter
->hw
, TRUE
);
693 /* Allocate transmit descriptors and buffers */
694 error
= em_create_tx_ring(adapter
);
696 device_printf(dev
, "Could not setup transmit structures\n");
700 /* Allocate receive descriptors and buffers */
701 error
= em_create_rx_ring(adapter
);
703 device_printf(dev
, "Could not setup receive structures\n");
707 /* Manually turn off all interrupts */
708 E1000_WRITE_REG(&adapter
->hw
, E1000_IMC
, 0xffffffff);
710 /* Determine if we have to control management hardware */
711 if (e1000_enable_mng_pass_thru(&adapter
->hw
))
712 adapter
->flags
|= EM_FLAG_HAS_MGMT
;
717 apme_mask
= EM_EEPROM_APME
;
719 switch (adapter
->hw
.mac
.type
) {
726 adapter
->flags
|= EM_FLAG_HAS_AMT
;
730 case e1000_82546_rev_3
:
733 case e1000_80003es2lan
:
734 if (adapter
->hw
.bus
.func
== 1) {
735 e1000_read_nvm(&adapter
->hw
,
736 NVM_INIT_CONTROL3_PORT_B
, 1, &eeprom_data
);
738 e1000_read_nvm(&adapter
->hw
,
739 NVM_INIT_CONTROL3_PORT_A
, 1, &eeprom_data
);
748 apme_mask
= E1000_WUC_APME
;
749 adapter
->flags
|= EM_FLAG_HAS_AMT
;
750 eeprom_data
= E1000_READ_REG(&adapter
->hw
, E1000_WUC
);
754 e1000_read_nvm(&adapter
->hw
,
755 NVM_INIT_CONTROL3_PORT_A
, 1, &eeprom_data
);
758 if (eeprom_data
& apme_mask
)
759 adapter
->wol
= E1000_WUFC_MAG
| E1000_WUFC_MC
;
762 * We have the eeprom settings, now apply the special cases
763 * where the eeprom may be wrong or the board won't support
764 * wake on lan on a particular port
766 device_id
= pci_get_device(dev
);
768 case E1000_DEV_ID_82546GB_PCIE
:
772 case E1000_DEV_ID_82546EB_FIBER
:
773 case E1000_DEV_ID_82546GB_FIBER
:
774 case E1000_DEV_ID_82571EB_FIBER
:
776 * Wake events only supported on port A for dual fiber
777 * regardless of eeprom setting
779 if (E1000_READ_REG(&adapter
->hw
, E1000_STATUS
) &
784 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3
:
785 case E1000_DEV_ID_82571EB_QUAD_COPPER
:
786 case E1000_DEV_ID_82571EB_QUAD_FIBER
:
787 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP
:
788 /* if quad port adapter, disable WoL on all but port A */
789 if (em_global_quad_port_a
!= 0)
791 /* Reset for multiple quad port adapters */
792 if (++em_global_quad_port_a
== 4)
793 em_global_quad_port_a
= 0;
797 /* XXX disable wol */
800 /* Setup flow control. */
801 device_getenv_string(dev
, "flow_ctrl", flowctrl
, sizeof(flowctrl
),
803 adapter
->ifm_flowctrl
= ifmedia_str2ethfc(flowctrl
);
804 if (adapter
->hw
.mac
.type
== e1000_pchlan
) {
805 /* Only PAUSE reception is supported on PCH */
806 adapter
->ifm_flowctrl
&= ~IFM_ETH_TXPAUSE
;
809 /* Setup OS specific network interface */
810 em_setup_ifp(adapter
);
812 /* Add sysctl tree, must after em_setup_ifp() */
813 em_add_sysctl(adapter
);
817 ifpoll_compat_setup(&adapter
->npoll
,
818 device_get_sysctl_ctx(dev
), device_get_sysctl_tree(dev
),
819 device_get_unit(dev
), ifp
->if_serializer
);
822 /* Reset the hardware */
823 error
= em_reset(adapter
);
826 * Some 82573 parts fail the first reset, call it again,
827 * if it fails a second time its a real issue.
829 error
= em_reset(adapter
);
831 device_printf(dev
, "Unable to reset the hardware\n");
837 /* Initialize statistics */
838 em_update_stats(adapter
);
840 adapter
->hw
.mac
.get_link_status
= 1;
841 em_update_link_status(adapter
);
843 /* Do we need workaround for 82544 PCI-X adapter? */
844 if (adapter
->hw
.bus
.type
== e1000_bus_type_pcix
&&
845 adapter
->hw
.mac
.type
== e1000_82544
)
846 adapter
->pcix_82544
= TRUE
;
848 adapter
->pcix_82544
= FALSE
;
850 if (adapter
->pcix_82544
) {
852 * 82544 on PCI-X may split one TX segment
853 * into two TX descs, so we double its number
854 * of spare TX desc here.
856 adapter
->spare_tx_desc
= 2 * EM_TX_SPARE
;
858 adapter
->spare_tx_desc
= EM_TX_SPARE
;
860 if (adapter
->flags
& EM_FLAG_TSO
)
861 adapter
->spare_tx_desc
= EM_TX_SPARE_TSO
;
862 adapter
->tx_wreg_nsegs
= EM_DEFAULT_TXWREG
;
865 * Keep following relationship between spare_tx_desc, oact_tx_desc
867 * (spare_tx_desc + EM_TX_RESERVED) <=
868 * oact_tx_desc <= EM_TX_OACTIVE_MAX <= tx_int_nsegs
870 adapter
->oact_tx_desc
= adapter
->num_tx_desc
/ 8;
871 if (adapter
->oact_tx_desc
> EM_TX_OACTIVE_MAX
)
872 adapter
->oact_tx_desc
= EM_TX_OACTIVE_MAX
;
873 if (adapter
->oact_tx_desc
< adapter
->spare_tx_desc
+ EM_TX_RESERVED
)
874 adapter
->oact_tx_desc
= adapter
->spare_tx_desc
+ EM_TX_RESERVED
;
876 adapter
->tx_int_nsegs
= adapter
->num_tx_desc
/ 16;
877 if (adapter
->tx_int_nsegs
< adapter
->oact_tx_desc
)
878 adapter
->tx_int_nsegs
= adapter
->oact_tx_desc
;
880 /* Non-AMT based hardware can now take control from firmware */
881 if ((adapter
->flags
& (EM_FLAG_HAS_MGMT
| EM_FLAG_HAS_AMT
)) ==
882 EM_FLAG_HAS_MGMT
&& adapter
->hw
.mac
.type
>= e1000_82571
)
883 em_get_hw_control(adapter
);
885 ifq_set_cpuid(&ifp
->if_snd
, rman_get_cpuid(adapter
->intr_res
));
888 * Missing Interrupt Following ICR read:
890 * 82571/82572 specification update errata #76
891 * 82573 specification update errata #31
892 * 82574 specification update errata #12
893 * 82583 specification update errata #4
896 if ((adapter
->flags
& EM_FLAG_SHARED_INTR
) &&
897 (adapter
->hw
.mac
.type
== e1000_82571
||
898 adapter
->hw
.mac
.type
== e1000_82572
||
899 adapter
->hw
.mac
.type
== e1000_82573
||
900 adapter
->hw
.mac
.type
== e1000_82574
||
901 adapter
->hw
.mac
.type
== e1000_82583
))
902 intr_func
= em_intr_mask
;
904 error
= bus_setup_intr(dev
, adapter
->intr_res
, INTR_MPSAFE
,
905 intr_func
, adapter
, &adapter
->intr_tag
,
908 device_printf(dev
, "Failed to register interrupt handler");
919 em_detach(device_t dev
)
921 struct adapter
*adapter
= device_get_softc(dev
);
923 if (device_is_attached(dev
)) {
924 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
926 lwkt_serialize_enter(ifp
->if_serializer
);
930 e1000_phy_hw_reset(&adapter
->hw
);
932 em_rel_mgmt(adapter
);
933 em_rel_hw_control(adapter
);
936 E1000_WRITE_REG(&adapter
->hw
, E1000_WUC
,
938 E1000_WRITE_REG(&adapter
->hw
, E1000_WUFC
, adapter
->wol
);
942 bus_teardown_intr(dev
, adapter
->intr_res
, adapter
->intr_tag
);
944 lwkt_serialize_exit(ifp
->if_serializer
);
947 } else if (adapter
->memory
!= NULL
) {
948 em_rel_hw_control(adapter
);
951 ifmedia_removeall(&adapter
->media
);
952 bus_generic_detach(dev
);
954 em_free_pci_res(adapter
);
956 em_destroy_tx_ring(adapter
, adapter
->num_tx_desc
);
957 em_destroy_rx_ring(adapter
, adapter
->num_rx_desc
);
959 /* Free Transmit Descriptor ring */
960 if (adapter
->tx_desc_base
)
961 em_dma_free(adapter
, &adapter
->txdma
);
963 /* Free Receive Descriptor ring */
964 if (adapter
->rx_desc_base
)
965 em_dma_free(adapter
, &adapter
->rxdma
);
967 /* Free top level busdma tag */
968 if (adapter
->parent_dtag
!= NULL
)
969 bus_dma_tag_destroy(adapter
->parent_dtag
);
971 if (adapter
->mta
!= NULL
)
972 kfree(adapter
->mta
, M_DEVBUF
);
978 em_shutdown(device_t dev
)
980 return em_suspend(dev
);
984 em_suspend(device_t dev
)
986 struct adapter
*adapter
= device_get_softc(dev
);
987 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
989 lwkt_serialize_enter(ifp
->if_serializer
);
993 em_rel_mgmt(adapter
);
994 em_rel_hw_control(adapter
);
997 E1000_WRITE_REG(&adapter
->hw
, E1000_WUC
, E1000_WUC_PME_EN
);
998 E1000_WRITE_REG(&adapter
->hw
, E1000_WUFC
, adapter
->wol
);
1002 lwkt_serialize_exit(ifp
->if_serializer
);
1004 return bus_generic_suspend(dev
);
1008 em_resume(device_t dev
)
1010 struct adapter
*adapter
= device_get_softc(dev
);
1011 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
1013 lwkt_serialize_enter(ifp
->if_serializer
);
1015 if (adapter
->hw
.mac
.type
== e1000_pch2lan
)
1016 e1000_resume_workarounds_pchlan(&adapter
->hw
);
1019 em_get_mgmt(adapter
);
1022 lwkt_serialize_exit(ifp
->if_serializer
);
1024 return bus_generic_resume(dev
);
1028 em_start(struct ifnet
*ifp
, struct ifaltq_subque
*ifsq
)
1030 struct adapter
*adapter
= ifp
->if_softc
;
1031 struct mbuf
*m_head
;
1032 int idx
= -1, nsegs
= 0;
1034 ASSERT_ALTQ_SQ_DEFAULT(ifp
, ifsq
);
1035 ASSERT_SERIALIZED(ifp
->if_serializer
);
1037 if ((ifp
->if_flags
& IFF_RUNNING
) == 0 || ifq_is_oactive(&ifp
->if_snd
))
1040 if (!adapter
->link_active
) {
1041 ifq_purge(&ifp
->if_snd
);
1045 while (!ifq_is_empty(&ifp
->if_snd
)) {
1046 /* Now do we at least have a minimal? */
1047 if (EM_IS_OACTIVE(adapter
)) {
1048 em_tx_collect(adapter
);
1049 if (EM_IS_OACTIVE(adapter
)) {
1050 ifq_set_oactive(&ifp
->if_snd
);
1051 adapter
->no_tx_desc_avail1
++;
1057 m_head
= ifq_dequeue(&ifp
->if_snd
);
1061 if (em_encap(adapter
, &m_head
, &nsegs
, &idx
)) {
1062 IFNET_STAT_INC(ifp
, oerrors
, 1);
1063 em_tx_collect(adapter
);
1068 * TX interrupt are aggressively aggregated, so increasing
1069 * opackets at TX interrupt time will make the opackets
1070 * statistics vastly inaccurate; we do the opackets increment
1073 IFNET_STAT_INC(ifp
, opackets
, 1);
1075 if (nsegs
>= adapter
->tx_wreg_nsegs
&& idx
>= 0) {
1076 E1000_WRITE_REG(&adapter
->hw
, E1000_TDT(0), idx
);
1081 /* Send a copy of the frame to the BPF listener */
1082 ETHER_BPF_MTAP(ifp
, m_head
);
1084 /* Set timeout in case hardware has problems transmitting. */
1085 ifp
->if_timer
= EM_TX_TIMEOUT
;
1088 E1000_WRITE_REG(&adapter
->hw
, E1000_TDT(0), idx
);
1092 em_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
1094 struct adapter
*adapter
= ifp
->if_softc
;
1095 struct ifreq
*ifr
= (struct ifreq
*)data
;
1096 uint16_t eeprom_data
= 0;
1097 int max_frame_size
, mask
, reinit
;
1100 ASSERT_SERIALIZED(ifp
->if_serializer
);
1104 switch (adapter
->hw
.mac
.type
) {
1107 * 82573 only supports jumbo frames
1108 * if ASPM is disabled.
1110 e1000_read_nvm(&adapter
->hw
,
1111 NVM_INIT_3GIO_3
, 1, &eeprom_data
);
1112 if (eeprom_data
& NVM_WORD1A_ASPM_MASK
) {
1113 max_frame_size
= ETHER_MAX_LEN
;
1118 /* Limit Jumbo Frame size */
1122 case e1000_ich10lan
:
1128 case e1000_80003es2lan
:
1129 max_frame_size
= 9234;
1133 max_frame_size
= 4096;
1136 /* Adapters that do not support jumbo frames */
1139 max_frame_size
= ETHER_MAX_LEN
;
1143 max_frame_size
= MAX_JUMBO_FRAME_SIZE
;
1146 if (ifr
->ifr_mtu
> max_frame_size
- ETHER_HDR_LEN
-
1152 ifp
->if_mtu
= ifr
->ifr_mtu
;
1153 adapter
->hw
.mac
.max_frame_size
=
1154 ifp
->if_mtu
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
;
1156 if (ifp
->if_flags
& IFF_RUNNING
)
1161 if (ifp
->if_flags
& IFF_UP
) {
1162 if ((ifp
->if_flags
& IFF_RUNNING
)) {
1163 if ((ifp
->if_flags
^ adapter
->if_flags
) &
1164 (IFF_PROMISC
| IFF_ALLMULTI
)) {
1165 em_disable_promisc(adapter
);
1166 em_set_promisc(adapter
);
1171 } else if (ifp
->if_flags
& IFF_RUNNING
) {
1174 adapter
->if_flags
= ifp
->if_flags
;
1179 if (ifp
->if_flags
& IFF_RUNNING
) {
1180 em_disable_intr(adapter
);
1181 em_set_multi(adapter
);
1182 if (adapter
->hw
.mac
.type
== e1000_82542
&&
1183 adapter
->hw
.revision_id
== E1000_REVISION_2
)
1184 em_init_rx_unit(adapter
);
1185 #ifdef IFPOLL_ENABLE
1186 if (!(ifp
->if_flags
& IFF_NPOLLING
))
1188 em_enable_intr(adapter
);
1193 /* Check SOL/IDER usage */
1194 if (e1000_check_reset_block(&adapter
->hw
)) {
1195 device_printf(adapter
->dev
, "Media change is"
1196 " blocked due to SOL/IDER session.\n");
1202 error
= ifmedia_ioctl(ifp
, ifr
, &adapter
->media
, command
);
1207 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
1208 if (mask
& IFCAP_RXCSUM
) {
1209 ifp
->if_capenable
^= IFCAP_RXCSUM
;
1212 if (mask
& IFCAP_TXCSUM
) {
1213 ifp
->if_capenable
^= IFCAP_TXCSUM
;
1214 if (ifp
->if_capenable
& IFCAP_TXCSUM
)
1215 ifp
->if_hwassist
|= EM_CSUM_FEATURES
;
1217 ifp
->if_hwassist
&= ~EM_CSUM_FEATURES
;
1219 if (mask
& IFCAP_TSO
) {
1220 ifp
->if_capenable
^= IFCAP_TSO
;
1221 if (ifp
->if_capenable
& IFCAP_TSO
)
1222 ifp
->if_hwassist
|= CSUM_TSO
;
1224 ifp
->if_hwassist
&= ~CSUM_TSO
;
1226 if (mask
& IFCAP_VLAN_HWTAGGING
) {
1227 ifp
->if_capenable
^= IFCAP_VLAN_HWTAGGING
;
1230 if (reinit
&& (ifp
->if_flags
& IFF_RUNNING
))
1235 error
= ether_ioctl(ifp
, command
, data
);
1242 em_watchdog(struct ifnet
*ifp
)
1244 struct adapter
*adapter
= ifp
->if_softc
;
1246 ASSERT_SERIALIZED(ifp
->if_serializer
);
1249 * The timer is set to 5 every time start queues a packet.
1250 * Then txeof keeps resetting it as long as it cleans at
1251 * least one descriptor.
1252 * Finally, anytime all descriptors are clean the timer is
1256 if (E1000_READ_REG(&adapter
->hw
, E1000_TDT(0)) ==
1257 E1000_READ_REG(&adapter
->hw
, E1000_TDH(0))) {
1259 * If we reach here, all TX jobs are completed and
1260 * the TX engine should have been idled for some time.
1261 * We don't need to call if_devstart() here.
1263 ifq_clr_oactive(&ifp
->if_snd
);
1269 * If we are in this routine because of pause frames, then
1270 * don't reset the hardware.
1272 if (E1000_READ_REG(&adapter
->hw
, E1000_STATUS
) &
1273 E1000_STATUS_TXOFF
) {
1274 ifp
->if_timer
= EM_TX_TIMEOUT
;
1278 if (e1000_check_for_link(&adapter
->hw
) == 0)
1279 if_printf(ifp
, "watchdog timeout -- resetting\n");
1281 IFNET_STAT_INC(ifp
, oerrors
, 1);
1282 adapter
->watchdog_events
++;
1286 if (!ifq_is_empty(&ifp
->if_snd
))
1293 struct adapter
*adapter
= xsc
;
1294 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
1295 device_t dev
= adapter
->dev
;
1297 ASSERT_SERIALIZED(ifp
->if_serializer
);
1301 /* Get the latest mac address, User can use a LAA */
1302 bcopy(IF_LLADDR(ifp
), adapter
->hw
.mac
.addr
, ETHER_ADDR_LEN
);
1304 /* Put the address into the Receive Address Array */
1305 e1000_rar_set(&adapter
->hw
, adapter
->hw
.mac
.addr
, 0);
1308 * With the 82571 adapter, RAR[0] may be overwritten
1309 * when the other port is reset, we make a duplicate
1310 * in RAR[14] for that eventuality, this assures
1311 * the interface continues to function.
1313 if (adapter
->hw
.mac
.type
== e1000_82571
) {
1314 e1000_set_laa_state_82571(&adapter
->hw
, TRUE
);
1315 e1000_rar_set(&adapter
->hw
, adapter
->hw
.mac
.addr
,
1316 E1000_RAR_ENTRIES
- 1);
1319 /* Reset the hardware */
1320 if (em_reset(adapter
)) {
1321 device_printf(dev
, "Unable to reset the hardware\n");
1322 /* XXX em_stop()? */
1325 em_update_link_status(adapter
);
1327 /* Setup VLAN support, basic and offload if available */
1328 E1000_WRITE_REG(&adapter
->hw
, E1000_VET
, ETHERTYPE_VLAN
);
1330 if (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) {
1333 ctrl
= E1000_READ_REG(&adapter
->hw
, E1000_CTRL
);
1334 ctrl
|= E1000_CTRL_VME
;
1335 E1000_WRITE_REG(&adapter
->hw
, E1000_CTRL
, ctrl
);
1338 /* Configure for OS presence */
1339 em_get_mgmt(adapter
);
1341 /* Prepare transmit descriptors and buffers */
1342 em_init_tx_ring(adapter
);
1343 em_init_tx_unit(adapter
);
1345 /* Setup Multicast table */
1346 em_set_multi(adapter
);
1348 /* Prepare receive descriptors and buffers */
1349 if (em_init_rx_ring(adapter
)) {
1350 device_printf(dev
, "Could not setup receive structures\n");
1354 em_init_rx_unit(adapter
);
1356 /* Don't lose promiscuous settings */
1357 em_set_promisc(adapter
);
1359 ifp
->if_flags
|= IFF_RUNNING
;
1360 ifq_clr_oactive(&ifp
->if_snd
);
1362 callout_reset(&adapter
->timer
, hz
, em_timer
, adapter
);
1363 e1000_clear_hw_cntrs_base_generic(&adapter
->hw
);
1365 /* MSI/X configuration for 82574 */
1366 if (adapter
->hw
.mac
.type
== e1000_82574
) {
1369 tmp
= E1000_READ_REG(&adapter
->hw
, E1000_CTRL_EXT
);
1370 tmp
|= E1000_CTRL_EXT_PBA_CLR
;
1371 E1000_WRITE_REG(&adapter
->hw
, E1000_CTRL_EXT
, tmp
);
1374 * Set the IVAR - interrupt vector routing.
1375 * Each nibble represents a vector, high bit
1376 * is enable, other 3 bits are the MSIX table
1377 * entry, we map RXQ0 to 0, TXQ0 to 1, and
1378 * Link (other) to 2, hence the magic number.
1380 E1000_WRITE_REG(&adapter
->hw
, E1000_IVAR
, 0x800A0908);
1383 #ifdef IFPOLL_ENABLE
1385 * Only enable interrupts if we are not polling, make sure
1386 * they are off otherwise.
1388 if (ifp
->if_flags
& IFF_NPOLLING
)
1389 em_disable_intr(adapter
);
1391 #endif /* IFPOLL_ENABLE */
1392 em_enable_intr(adapter
);
1394 /* AMT based hardware can now take control from firmware */
1395 if ((adapter
->flags
& (EM_FLAG_HAS_MGMT
| EM_FLAG_HAS_AMT
)) ==
1396 (EM_FLAG_HAS_MGMT
| EM_FLAG_HAS_AMT
) &&
1397 adapter
->hw
.mac
.type
>= e1000_82571
)
1398 em_get_hw_control(adapter
);
1401 #ifdef IFPOLL_ENABLE
1404 em_npoll_compat(struct ifnet
*ifp
, void *arg __unused
, int count
)
1406 struct adapter
*adapter
= ifp
->if_softc
;
1408 ASSERT_SERIALIZED(ifp
->if_serializer
);
1410 if (adapter
->npoll
.ifpc_stcount
-- == 0) {
1413 adapter
->npoll
.ifpc_stcount
= adapter
->npoll
.ifpc_stfrac
;
1415 reg_icr
= E1000_READ_REG(&adapter
->hw
, E1000_ICR
);
1416 if (reg_icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
1417 callout_stop(&adapter
->timer
);
1418 adapter
->hw
.mac
.get_link_status
= 1;
1419 em_update_link_status(adapter
);
1420 callout_reset(&adapter
->timer
, hz
, em_timer
, adapter
);
1424 em_rxeof(adapter
, count
);
1427 if (!ifq_is_empty(&ifp
->if_snd
))
1432 em_npoll(struct ifnet
*ifp
, struct ifpoll_info
*info
)
1434 struct adapter
*adapter
= ifp
->if_softc
;
1436 ASSERT_SERIALIZED(ifp
->if_serializer
);
1439 int cpuid
= adapter
->npoll
.ifpc_cpuid
;
1441 info
->ifpi_rx
[cpuid
].poll_func
= em_npoll_compat
;
1442 info
->ifpi_rx
[cpuid
].arg
= NULL
;
1443 info
->ifpi_rx
[cpuid
].serializer
= ifp
->if_serializer
;
1445 if (ifp
->if_flags
& IFF_RUNNING
)
1446 em_disable_intr(adapter
);
1447 ifq_set_cpuid(&ifp
->if_snd
, cpuid
);
1449 if (ifp
->if_flags
& IFF_RUNNING
)
1450 em_enable_intr(adapter
);
1451 ifq_set_cpuid(&ifp
->if_snd
, rman_get_cpuid(adapter
->intr_res
));
1455 #endif /* IFPOLL_ENABLE */
1460 em_intr_body(xsc
, TRUE
);
1464 em_intr_body(struct adapter
*adapter
, boolean_t chk_asserted
)
1466 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
1470 ASSERT_SERIALIZED(ifp
->if_serializer
);
1472 reg_icr
= E1000_READ_REG(&adapter
->hw
, E1000_ICR
);
1475 ((adapter
->hw
.mac
.type
>= e1000_82571
&&
1476 (reg_icr
& E1000_ICR_INT_ASSERTED
) == 0) ||
1483 * XXX: some laptops trigger several spurious interrupts
1484 * on em(4) when in the resume cycle. The ICR register
1485 * reports all-ones value in this case. Processing such
1486 * interrupts would lead to a freeze. I don't know why.
1488 if (reg_icr
== 0xffffffff) {
1493 if (ifp
->if_flags
& IFF_RUNNING
) {
1495 (E1000_ICR_RXT0
| E1000_ICR_RXDMT0
| E1000_ICR_RXO
))
1496 em_rxeof(adapter
, -1);
1497 if (reg_icr
& E1000_ICR_TXDW
) {
1499 if (!ifq_is_empty(&ifp
->if_snd
))
1504 /* Link status change */
1505 if (reg_icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
1506 callout_stop(&adapter
->timer
);
1507 adapter
->hw
.mac
.get_link_status
= 1;
1508 em_update_link_status(adapter
);
1510 /* Deal with TX cruft when link lost */
1511 em_tx_purge(adapter
);
1513 callout_reset(&adapter
->timer
, hz
, em_timer
, adapter
);
1516 if (reg_icr
& E1000_ICR_RXO
)
1517 adapter
->rx_overruns
++;
1523 em_intr_mask(void *xsc
)
1525 struct adapter
*adapter
= xsc
;
1527 E1000_WRITE_REG(&adapter
->hw
, E1000_IMC
, 0xffffffff);
1530 * ICR.INT_ASSERTED bit will never be set if IMS is 0,
1531 * so don't check it.
1533 em_intr_body(adapter
, FALSE
);
1534 E1000_WRITE_REG(&adapter
->hw
, E1000_IMS
, IMS_ENABLE_MASK
);
1538 em_media_status(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1540 struct adapter
*adapter
= ifp
->if_softc
;
1542 ASSERT_SERIALIZED(ifp
->if_serializer
);
1544 em_update_link_status(adapter
);
1546 ifmr
->ifm_status
= IFM_AVALID
;
1547 ifmr
->ifm_active
= IFM_ETHER
;
1549 if (!adapter
->link_active
) {
1550 if (adapter
->hw
.mac
.autoneg
)
1551 ifmr
->ifm_active
|= IFM_NONE
;
1553 ifmr
->ifm_active
= adapter
->media
.ifm_media
;
1557 ifmr
->ifm_status
|= IFM_ACTIVE
;
1558 if (adapter
->ifm_flowctrl
& IFM_ETH_FORCEPAUSE
)
1559 ifmr
->ifm_active
|= adapter
->ifm_flowctrl
;
1561 if (adapter
->hw
.phy
.media_type
== e1000_media_type_fiber
||
1562 adapter
->hw
.phy
.media_type
== e1000_media_type_internal_serdes
) {
1563 u_char fiber_type
= IFM_1000_SX
;
1565 if (adapter
->hw
.mac
.type
== e1000_82545
)
1566 fiber_type
= IFM_1000_LX
;
1567 ifmr
->ifm_active
|= fiber_type
| IFM_FDX
;
1569 switch (adapter
->link_speed
) {
1571 ifmr
->ifm_active
|= IFM_10_T
;
1574 ifmr
->ifm_active
|= IFM_100_TX
;
1578 ifmr
->ifm_active
|= IFM_1000_T
;
1581 if (adapter
->link_duplex
== FULL_DUPLEX
)
1582 ifmr
->ifm_active
|= IFM_FDX
;
1584 ifmr
->ifm_active
|= IFM_HDX
;
1586 if (ifmr
->ifm_active
& IFM_FDX
) {
1588 e1000_fc2ifmedia(adapter
->hw
.fc
.current_mode
);
1593 em_media_change(struct ifnet
*ifp
)
1595 struct adapter
*adapter
= ifp
->if_softc
;
1596 struct ifmedia
*ifm
= &adapter
->media
;
1598 ASSERT_SERIALIZED(ifp
->if_serializer
);
1600 if (IFM_TYPE(ifm
->ifm_media
) != IFM_ETHER
)
1603 if (adapter
->hw
.mac
.type
== e1000_pchlan
&&
1604 (IFM_OPTIONS(ifm
->ifm_media
) & IFM_ETH_TXPAUSE
)) {
1606 if_printf(ifp
, "TX PAUSE is not supported on PCH\n");
1610 switch (IFM_SUBTYPE(ifm
->ifm_media
)) {
1612 adapter
->hw
.mac
.autoneg
= DO_AUTO_NEG
;
1613 adapter
->hw
.phy
.autoneg_advertised
= AUTONEG_ADV_DEFAULT
;
1619 adapter
->hw
.mac
.autoneg
= DO_AUTO_NEG
;
1620 adapter
->hw
.phy
.autoneg_advertised
= ADVERTISE_1000_FULL
;
1624 if (IFM_OPTIONS(ifm
->ifm_media
) & IFM_FDX
) {
1625 adapter
->hw
.mac
.forced_speed_duplex
= ADVERTISE_100_FULL
;
1627 if (IFM_OPTIONS(ifm
->ifm_media
) &
1628 (IFM_ETH_RXPAUSE
| IFM_ETH_TXPAUSE
)) {
1630 if_printf(ifp
, "Flow control is not "
1631 "allowed for half-duplex\n");
1635 adapter
->hw
.mac
.forced_speed_duplex
= ADVERTISE_100_HALF
;
1637 adapter
->hw
.mac
.autoneg
= FALSE
;
1638 adapter
->hw
.phy
.autoneg_advertised
= 0;
1642 if (IFM_OPTIONS(ifm
->ifm_media
) & IFM_FDX
) {
1643 adapter
->hw
.mac
.forced_speed_duplex
= ADVERTISE_10_FULL
;
1645 if (IFM_OPTIONS(ifm
->ifm_media
) &
1646 (IFM_ETH_RXPAUSE
| IFM_ETH_TXPAUSE
)) {
1648 if_printf(ifp
, "Flow control is not "
1649 "allowed for half-duplex\n");
1653 adapter
->hw
.mac
.forced_speed_duplex
= ADVERTISE_10_HALF
;
1655 adapter
->hw
.mac
.autoneg
= FALSE
;
1656 adapter
->hw
.phy
.autoneg_advertised
= 0;
1661 if_printf(ifp
, "Unsupported media type %d\n",
1662 IFM_SUBTYPE(ifm
->ifm_media
));
1666 adapter
->ifm_flowctrl
= ifm
->ifm_media
& IFM_ETH_FCMASK
;
1668 if (ifp
->if_flags
& IFF_RUNNING
)
1675 em_encap(struct adapter
*adapter
, struct mbuf
**m_headp
,
1676 int *segs_used
, int *idx
)
1678 bus_dma_segment_t segs
[EM_MAX_SCATTER
];
1680 struct em_buffer
*tx_buffer
, *tx_buffer_mapped
;
1681 struct e1000_tx_desc
*ctxd
= NULL
;
1682 struct mbuf
*m_head
= *m_headp
;
1683 uint32_t txd_upper
, txd_lower
, txd_used
, cmd
= 0;
1684 int maxsegs
, nsegs
, i
, j
, first
, last
= 0, error
;
1686 if (m_head
->m_pkthdr
.csum_flags
& CSUM_TSO
) {
1687 error
= em_tso_pullup(adapter
, m_headp
);
1693 txd_upper
= txd_lower
= 0;
1697 * Capture the first descriptor index, this descriptor
1698 * will have the index of the EOP which is the only one
1699 * that now gets a DONE bit writeback.
1701 first
= adapter
->next_avail_tx_desc
;
1702 tx_buffer
= &adapter
->tx_buffer_area
[first
];
1703 tx_buffer_mapped
= tx_buffer
;
1704 map
= tx_buffer
->map
;
1706 maxsegs
= adapter
->num_tx_desc_avail
- EM_TX_RESERVED
;
1707 KASSERT(maxsegs
>= adapter
->spare_tx_desc
,
1708 ("not enough spare TX desc"));
1709 if (adapter
->pcix_82544
) {
1710 /* Half it; see the comment in em_attach() */
1713 if (maxsegs
> EM_MAX_SCATTER
)
1714 maxsegs
= EM_MAX_SCATTER
;
1716 error
= bus_dmamap_load_mbuf_defrag(adapter
->txtag
, map
, m_headp
,
1717 segs
, maxsegs
, &nsegs
, BUS_DMA_NOWAIT
);
1719 if (error
== ENOBUFS
)
1720 adapter
->mbuf_alloc_failed
++;
1722 adapter
->no_tx_dma_setup
++;
1728 bus_dmamap_sync(adapter
->txtag
, map
, BUS_DMASYNC_PREWRITE
);
1731 adapter
->tx_nsegs
+= nsegs
;
1732 *segs_used
+= nsegs
;
1734 if (m_head
->m_pkthdr
.csum_flags
& CSUM_TSO
) {
1735 /* TSO will consume one TX desc */
1736 i
= em_tso_setup(adapter
, m_head
, &txd_upper
, &txd_lower
);
1737 adapter
->tx_nsegs
+= i
;
1739 } else if (m_head
->m_pkthdr
.csum_flags
& EM_CSUM_FEATURES
) {
1740 /* TX csum offloading will consume one TX desc */
1741 i
= em_txcsum(adapter
, m_head
, &txd_upper
, &txd_lower
);
1742 adapter
->tx_nsegs
+= i
;
1746 /* Handle VLAN tag */
1747 if (m_head
->m_flags
& M_VLANTAG
) {
1748 /* Set the vlan id. */
1749 txd_upper
|= (htole16(m_head
->m_pkthdr
.ether_vlantag
) << 16);
1750 /* Tell hardware to add tag */
1751 txd_lower
|= htole32(E1000_TXD_CMD_VLE
);
1754 i
= adapter
->next_avail_tx_desc
;
1756 /* Set up our transmit descriptors */
1757 for (j
= 0; j
< nsegs
; j
++) {
1758 /* If adapter is 82544 and on PCIX bus */
1759 if(adapter
->pcix_82544
) {
1760 DESC_ARRAY desc_array
;
1761 uint32_t array_elements
, counter
;
1764 * Check the Address and Length combination and
1765 * split the data accordingly
1767 array_elements
= em_82544_fill_desc(segs
[j
].ds_addr
,
1768 segs
[j
].ds_len
, &desc_array
);
1769 for (counter
= 0; counter
< array_elements
; counter
++) {
1770 KKASSERT(txd_used
< adapter
->num_tx_desc_avail
);
1772 tx_buffer
= &adapter
->tx_buffer_area
[i
];
1773 ctxd
= &adapter
->tx_desc_base
[i
];
1775 ctxd
->buffer_addr
= htole64(
1776 desc_array
.descriptor
[counter
].address
);
1777 ctxd
->lower
.data
= htole32(
1778 E1000_TXD_CMD_IFCS
| txd_lower
|
1779 desc_array
.descriptor
[counter
].length
);
1780 ctxd
->upper
.data
= htole32(txd_upper
);
1783 if (++i
== adapter
->num_tx_desc
)
1789 tx_buffer
= &adapter
->tx_buffer_area
[i
];
1790 ctxd
= &adapter
->tx_desc_base
[i
];
1792 ctxd
->buffer_addr
= htole64(segs
[j
].ds_addr
);
1793 ctxd
->lower
.data
= htole32(E1000_TXD_CMD_IFCS
|
1794 txd_lower
| segs
[j
].ds_len
);
1795 ctxd
->upper
.data
= htole32(txd_upper
);
1798 if (++i
== adapter
->num_tx_desc
)
1803 adapter
->next_avail_tx_desc
= i
;
1804 if (adapter
->pcix_82544
) {
1805 KKASSERT(adapter
->num_tx_desc_avail
> txd_used
);
1806 adapter
->num_tx_desc_avail
-= txd_used
;
1808 KKASSERT(adapter
->num_tx_desc_avail
> nsegs
);
1809 adapter
->num_tx_desc_avail
-= nsegs
;
1812 tx_buffer
->m_head
= m_head
;
1813 tx_buffer_mapped
->map
= tx_buffer
->map
;
1814 tx_buffer
->map
= map
;
1816 if (adapter
->tx_nsegs
>= adapter
->tx_int_nsegs
) {
1817 adapter
->tx_nsegs
= 0;
1820 * Report Status (RS) is turned on
1821 * every tx_int_nsegs descriptors.
1823 cmd
= E1000_TXD_CMD_RS
;
1826 * Keep track of the descriptor, which will
1827 * be written back by hardware.
1829 adapter
->tx_dd
[adapter
->tx_dd_tail
] = last
;
1830 EM_INC_TXDD_IDX(adapter
->tx_dd_tail
);
1831 KKASSERT(adapter
->tx_dd_tail
!= adapter
->tx_dd_head
);
1835 * Last Descriptor of Packet needs End Of Packet (EOP)
1837 ctxd
->lower
.data
|= htole32(E1000_TXD_CMD_EOP
| cmd
);
1839 if (adapter
->hw
.mac
.type
== e1000_82547
) {
1841 * Advance the Transmit Descriptor Tail (TDT), this tells the
1842 * E1000 that this frame is available to transmit.
1844 if (adapter
->link_duplex
== HALF_DUPLEX
) {
1845 em_82547_move_tail_serialized(adapter
);
1847 E1000_WRITE_REG(&adapter
->hw
, E1000_TDT(0), i
);
1848 em_82547_update_fifo_head(adapter
,
1849 m_head
->m_pkthdr
.len
);
1853 * Defer TDT updating, until enough descriptors are setup
1861 * 82547 workaround to avoid controller hang in half-duplex environment.
1862 * The workaround is to avoid queuing a large packet that would span
1863 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1864 * in this case. We do that only when FIFO is quiescent.
1867 em_82547_move_tail_serialized(struct adapter
*adapter
)
1869 struct e1000_tx_desc
*tx_desc
;
1870 uint16_t hw_tdt
, sw_tdt
, length
= 0;
1873 ASSERT_SERIALIZED(adapter
->arpcom
.ac_if
.if_serializer
);
1875 hw_tdt
= E1000_READ_REG(&adapter
->hw
, E1000_TDT(0));
1876 sw_tdt
= adapter
->next_avail_tx_desc
;
1878 while (hw_tdt
!= sw_tdt
) {
1879 tx_desc
= &adapter
->tx_desc_base
[hw_tdt
];
1880 length
+= tx_desc
->lower
.flags
.length
;
1881 eop
= tx_desc
->lower
.data
& E1000_TXD_CMD_EOP
;
1882 if (++hw_tdt
== adapter
->num_tx_desc
)
1886 if (em_82547_fifo_workaround(adapter
, length
)) {
1887 adapter
->tx_fifo_wrk_cnt
++;
1888 callout_reset(&adapter
->tx_fifo_timer
, 1,
1889 em_82547_move_tail
, adapter
);
1892 E1000_WRITE_REG(&adapter
->hw
, E1000_TDT(0), hw_tdt
);
1893 em_82547_update_fifo_head(adapter
, length
);
1900 em_82547_move_tail(void *xsc
)
1902 struct adapter
*adapter
= xsc
;
1903 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
1905 lwkt_serialize_enter(ifp
->if_serializer
);
1906 em_82547_move_tail_serialized(adapter
);
1907 lwkt_serialize_exit(ifp
->if_serializer
);
1911 em_82547_fifo_workaround(struct adapter
*adapter
, int len
)
1913 int fifo_space
, fifo_pkt_len
;
1915 fifo_pkt_len
= roundup2(len
+ EM_FIFO_HDR
, EM_FIFO_HDR
);
1917 if (adapter
->link_duplex
== HALF_DUPLEX
) {
1918 fifo_space
= adapter
->tx_fifo_size
- adapter
->tx_fifo_head
;
1920 if (fifo_pkt_len
>= (EM_82547_PKT_THRESH
+ fifo_space
)) {
1921 if (em_82547_tx_fifo_reset(adapter
))
1931 em_82547_update_fifo_head(struct adapter
*adapter
, int len
)
1933 int fifo_pkt_len
= roundup2(len
+ EM_FIFO_HDR
, EM_FIFO_HDR
);
1935 /* tx_fifo_head is always 16 byte aligned */
1936 adapter
->tx_fifo_head
+= fifo_pkt_len
;
1937 if (adapter
->tx_fifo_head
>= adapter
->tx_fifo_size
)
1938 adapter
->tx_fifo_head
-= adapter
->tx_fifo_size
;
1942 em_82547_tx_fifo_reset(struct adapter
*adapter
)
1946 if ((E1000_READ_REG(&adapter
->hw
, E1000_TDT(0)) ==
1947 E1000_READ_REG(&adapter
->hw
, E1000_TDH(0))) &&
1948 (E1000_READ_REG(&adapter
->hw
, E1000_TDFT
) ==
1949 E1000_READ_REG(&adapter
->hw
, E1000_TDFH
)) &&
1950 (E1000_READ_REG(&adapter
->hw
, E1000_TDFTS
) ==
1951 E1000_READ_REG(&adapter
->hw
, E1000_TDFHS
)) &&
1952 (E1000_READ_REG(&adapter
->hw
, E1000_TDFPC
) == 0)) {
1953 /* Disable TX unit */
1954 tctl
= E1000_READ_REG(&adapter
->hw
, E1000_TCTL
);
1955 E1000_WRITE_REG(&adapter
->hw
, E1000_TCTL
,
1956 tctl
& ~E1000_TCTL_EN
);
1958 /* Reset FIFO pointers */
1959 E1000_WRITE_REG(&adapter
->hw
, E1000_TDFT
,
1960 adapter
->tx_head_addr
);
1961 E1000_WRITE_REG(&adapter
->hw
, E1000_TDFH
,
1962 adapter
->tx_head_addr
);
1963 E1000_WRITE_REG(&adapter
->hw
, E1000_TDFTS
,
1964 adapter
->tx_head_addr
);
1965 E1000_WRITE_REG(&adapter
->hw
, E1000_TDFHS
,
1966 adapter
->tx_head_addr
);
1968 /* Re-enable TX unit */
1969 E1000_WRITE_REG(&adapter
->hw
, E1000_TCTL
, tctl
);
1970 E1000_WRITE_FLUSH(&adapter
->hw
);
1972 adapter
->tx_fifo_head
= 0;
1973 adapter
->tx_fifo_reset_cnt
++;
1982 em_set_promisc(struct adapter
*adapter
)
1984 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
1987 reg_rctl
= E1000_READ_REG(&adapter
->hw
, E1000_RCTL
);
1989 if (ifp
->if_flags
& IFF_PROMISC
) {
1990 reg_rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
1991 /* Turn this on if you want to see bad packets */
1993 reg_rctl
|= E1000_RCTL_SBP
;
1994 E1000_WRITE_REG(&adapter
->hw
, E1000_RCTL
, reg_rctl
);
1995 } else if (ifp
->if_flags
& IFF_ALLMULTI
) {
1996 reg_rctl
|= E1000_RCTL_MPE
;
1997 reg_rctl
&= ~E1000_RCTL_UPE
;
1998 E1000_WRITE_REG(&adapter
->hw
, E1000_RCTL
, reg_rctl
);
2003 em_disable_promisc(struct adapter
*adapter
)
2007 reg_rctl
= E1000_READ_REG(&adapter
->hw
, E1000_RCTL
);
2009 reg_rctl
&= ~E1000_RCTL_UPE
;
2010 reg_rctl
&= ~E1000_RCTL_MPE
;
2011 reg_rctl
&= ~E1000_RCTL_SBP
;
2012 E1000_WRITE_REG(&adapter
->hw
, E1000_RCTL
, reg_rctl
);
2016 em_set_multi(struct adapter
*adapter
)
2018 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
2019 struct ifmultiaddr
*ifma
;
2020 uint32_t reg_rctl
= 0;
2025 bzero(mta
, ETH_ADDR_LEN
* MAX_NUM_MULTICAST_ADDRESSES
);
2027 if (adapter
->hw
.mac
.type
== e1000_82542
&&
2028 adapter
->hw
.revision_id
== E1000_REVISION_2
) {
2029 reg_rctl
= E1000_READ_REG(&adapter
->hw
, E1000_RCTL
);
2030 if (adapter
->hw
.bus
.pci_cmd_word
& CMD_MEM_WRT_INVALIDATE
)
2031 e1000_pci_clear_mwi(&adapter
->hw
);
2032 reg_rctl
|= E1000_RCTL_RST
;
2033 E1000_WRITE_REG(&adapter
->hw
, E1000_RCTL
, reg_rctl
);
2037 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
2038 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
2041 if (mcnt
== MAX_NUM_MULTICAST_ADDRESSES
)
2044 bcopy(LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
),
2045 &mta
[mcnt
* ETHER_ADDR_LEN
], ETHER_ADDR_LEN
);
2049 if (mcnt
>= MAX_NUM_MULTICAST_ADDRESSES
) {
2050 reg_rctl
= E1000_READ_REG(&adapter
->hw
, E1000_RCTL
);
2051 reg_rctl
|= E1000_RCTL_MPE
;
2052 E1000_WRITE_REG(&adapter
->hw
, E1000_RCTL
, reg_rctl
);
2054 e1000_update_mc_addr_list(&adapter
->hw
, mta
, mcnt
);
2057 if (adapter
->hw
.mac
.type
== e1000_82542
&&
2058 adapter
->hw
.revision_id
== E1000_REVISION_2
) {
2059 reg_rctl
= E1000_READ_REG(&adapter
->hw
, E1000_RCTL
);
2060 reg_rctl
&= ~E1000_RCTL_RST
;
2061 E1000_WRITE_REG(&adapter
->hw
, E1000_RCTL
, reg_rctl
);
2063 if (adapter
->hw
.bus
.pci_cmd_word
& CMD_MEM_WRT_INVALIDATE
)
2064 e1000_pci_set_mwi(&adapter
->hw
);
2069 * This routine checks for link status and updates statistics.
2074 struct adapter
*adapter
= xsc
;
2075 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
2077 lwkt_serialize_enter(ifp
->if_serializer
);
2079 em_update_link_status(adapter
);
2080 em_update_stats(adapter
);
2082 /* Reset LAA into RAR[0] on 82571 */
2083 if (e1000_get_laa_state_82571(&adapter
->hw
) == TRUE
)
2084 e1000_rar_set(&adapter
->hw
, adapter
->hw
.mac
.addr
, 0);
2086 if (em_display_debug_stats
&& (ifp
->if_flags
& IFF_RUNNING
))
2087 em_print_hw_stats(adapter
);
2089 em_smartspeed(adapter
);
2091 callout_reset(&adapter
->timer
, hz
, em_timer
, adapter
);
2093 lwkt_serialize_exit(ifp
->if_serializer
);
2097 em_update_link_status(struct adapter
*adapter
)
2099 struct e1000_hw
*hw
= &adapter
->hw
;
2100 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
2101 device_t dev
= adapter
->dev
;
2102 uint32_t link_check
= 0;
2104 /* Get the cached link value or read phy for real */
2105 switch (hw
->phy
.media_type
) {
2106 case e1000_media_type_copper
:
2107 if (hw
->mac
.get_link_status
) {
2108 /* Do the work to read phy */
2109 e1000_check_for_link(hw
);
2110 link_check
= !hw
->mac
.get_link_status
;
2111 if (link_check
) /* ESB2 fix */
2112 e1000_cfg_on_link_up(hw
);
2118 case e1000_media_type_fiber
:
2119 e1000_check_for_link(hw
);
2121 E1000_READ_REG(hw
, E1000_STATUS
) & E1000_STATUS_LU
;
2124 case e1000_media_type_internal_serdes
:
2125 e1000_check_for_link(hw
);
2126 link_check
= adapter
->hw
.mac
.serdes_has_link
;
2129 case e1000_media_type_unknown
:
2134 /* Now check for a transition */
2135 if (link_check
&& adapter
->link_active
== 0) {
2136 e1000_get_speed_and_duplex(hw
, &adapter
->link_speed
,
2137 &adapter
->link_duplex
);
2140 * Check if we should enable/disable SPEED_MODE bit on
2143 if (adapter
->link_speed
!= SPEED_1000
&&
2144 (hw
->mac
.type
== e1000_82571
||
2145 hw
->mac
.type
== e1000_82572
)) {
2148 tarc0
= E1000_READ_REG(hw
, E1000_TARC(0));
2149 tarc0
&= ~SPEED_MODE_BIT
;
2150 E1000_WRITE_REG(hw
, E1000_TARC(0), tarc0
);
2153 char flowctrl
[IFM_ETH_FC_STRLEN
];
2155 e1000_fc2str(hw
->fc
.current_mode
, flowctrl
,
2157 device_printf(dev
, "Link is up %d Mbps %s, "
2158 "Flow control: %s\n",
2159 adapter
->link_speed
,
2160 (adapter
->link_duplex
== FULL_DUPLEX
) ?
2161 "Full Duplex" : "Half Duplex",
2164 if (adapter
->ifm_flowctrl
& IFM_ETH_FORCEPAUSE
)
2165 e1000_force_flowctrl(hw
, adapter
->ifm_flowctrl
);
2166 adapter
->link_active
= 1;
2167 adapter
->smartspeed
= 0;
2168 ifp
->if_baudrate
= adapter
->link_speed
* 1000000;
2169 ifp
->if_link_state
= LINK_STATE_UP
;
2170 if_link_state_change(ifp
);
2171 } else if (!link_check
&& adapter
->link_active
== 1) {
2172 ifp
->if_baudrate
= adapter
->link_speed
= 0;
2173 adapter
->link_duplex
= 0;
2175 device_printf(dev
, "Link is Down\n");
2176 adapter
->link_active
= 0;
2178 /* Link down, disable watchdog */
2181 ifp
->if_link_state
= LINK_STATE_DOWN
;
2182 if_link_state_change(ifp
);
2187 em_stop(struct adapter
*adapter
)
2189 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
2192 ASSERT_SERIALIZED(ifp
->if_serializer
);
2194 em_disable_intr(adapter
);
2196 callout_stop(&adapter
->timer
);
2197 callout_stop(&adapter
->tx_fifo_timer
);
2199 ifp
->if_flags
&= ~IFF_RUNNING
;
2200 ifq_clr_oactive(&ifp
->if_snd
);
2203 e1000_reset_hw(&adapter
->hw
);
2204 if (adapter
->hw
.mac
.type
>= e1000_82544
)
2205 E1000_WRITE_REG(&adapter
->hw
, E1000_WUC
, 0);
2207 for (i
= 0; i
< adapter
->num_tx_desc
; i
++) {
2208 struct em_buffer
*tx_buffer
= &adapter
->tx_buffer_area
[i
];
2210 if (tx_buffer
->m_head
!= NULL
) {
2211 bus_dmamap_unload(adapter
->txtag
, tx_buffer
->map
);
2212 m_freem(tx_buffer
->m_head
);
2213 tx_buffer
->m_head
= NULL
;
2217 for (i
= 0; i
< adapter
->num_rx_desc
; i
++) {
2218 struct em_buffer
*rx_buffer
= &adapter
->rx_buffer_area
[i
];
2220 if (rx_buffer
->m_head
!= NULL
) {
2221 bus_dmamap_unload(adapter
->rxtag
, rx_buffer
->map
);
2222 m_freem(rx_buffer
->m_head
);
2223 rx_buffer
->m_head
= NULL
;
2227 if (adapter
->fmp
!= NULL
)
2228 m_freem(adapter
->fmp
);
2229 adapter
->fmp
= NULL
;
2230 adapter
->lmp
= NULL
;
2232 adapter
->csum_flags
= 0;
2233 adapter
->csum_lhlen
= 0;
2234 adapter
->csum_iphlen
= 0;
2235 adapter
->csum_thlen
= 0;
2236 adapter
->csum_mss
= 0;
2237 adapter
->csum_pktlen
= 0;
2239 adapter
->tx_dd_head
= 0;
2240 adapter
->tx_dd_tail
= 0;
2241 adapter
->tx_nsegs
= 0;
2245 em_get_hw_info(struct adapter
*adapter
)
2247 device_t dev
= adapter
->dev
;
2249 /* Save off the information about this board */
2250 adapter
->hw
.vendor_id
= pci_get_vendor(dev
);
2251 adapter
->hw
.device_id
= pci_get_device(dev
);
2252 adapter
->hw
.revision_id
= pci_get_revid(dev
);
2253 adapter
->hw
.subsystem_vendor_id
= pci_get_subvendor(dev
);
2254 adapter
->hw
.subsystem_device_id
= pci_get_subdevice(dev
);
2256 /* Do Shared Code Init and Setup */
2257 if (e1000_set_mac_type(&adapter
->hw
))
2263 em_alloc_pci_res(struct adapter
*adapter
)
2265 device_t dev
= adapter
->dev
;
2267 int val
, rid
, msi_enable
, cap
;
2269 /* Enable bus mastering */
2270 pci_enable_busmaster(dev
);
2272 adapter
->memory_rid
= EM_BAR_MEM
;
2273 adapter
->memory
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
,
2274 &adapter
->memory_rid
, RF_ACTIVE
);
2275 if (adapter
->memory
== NULL
) {
2276 device_printf(dev
, "Unable to allocate bus resource: memory\n");
2279 adapter
->osdep
.mem_bus_space_tag
=
2280 rman_get_bustag(adapter
->memory
);
2281 adapter
->osdep
.mem_bus_space_handle
=
2282 rman_get_bushandle(adapter
->memory
);
2284 /* XXX This is quite goofy, it is not actually used */
2285 adapter
->hw
.hw_addr
= (uint8_t *)&adapter
->osdep
.mem_bus_space_handle
;
2287 /* Only older adapters use IO mapping */
2288 if (adapter
->hw
.mac
.type
> e1000_82543
&&
2289 adapter
->hw
.mac
.type
< e1000_82571
) {
2290 /* Figure our where our IO BAR is ? */
2291 for (rid
= PCIR_BAR(0); rid
< PCIR_CARDBUSCIS
;) {
2292 val
= pci_read_config(dev
, rid
, 4);
2293 if (EM_BAR_TYPE(val
) == EM_BAR_TYPE_IO
) {
2294 adapter
->io_rid
= rid
;
2298 /* check for 64bit BAR */
2299 if (EM_BAR_MEM_TYPE(val
) == EM_BAR_MEM_TYPE_64BIT
)
2302 if (rid
>= PCIR_CARDBUSCIS
) {
2303 device_printf(dev
, "Unable to locate IO BAR\n");
2306 adapter
->ioport
= bus_alloc_resource_any(dev
, SYS_RES_IOPORT
,
2307 &adapter
->io_rid
, RF_ACTIVE
);
2308 if (adapter
->ioport
== NULL
) {
2309 device_printf(dev
, "Unable to allocate bus resource: "
2313 adapter
->hw
.io_base
= 0;
2314 adapter
->osdep
.io_bus_space_tag
=
2315 rman_get_bustag(adapter
->ioport
);
2316 adapter
->osdep
.io_bus_space_handle
=
2317 rman_get_bushandle(adapter
->ioport
);
2321 * Don't enable MSI-X on 82574, see:
2322 * 82574 specification update errata #15
2324 * Don't enable MSI on PCI/PCI-X chips, see:
2325 * 82540 specification update errata #6
2326 * 82545 specification update errata #4
2328 * Don't enable MSI on 82571/82572, see:
2329 * 82571/82572 specification update errata #63
2331 * Some versions of I219 only have PCI AF.
2333 msi_enable
= em_msi_enable
;
2335 (!(pci_is_pcie(dev
) ||
2336 pci_find_extcap(dev
, PCIY_PCIAF
, &cap
) == 0) ||
2337 adapter
->hw
.mac
.type
== e1000_82571
||
2338 adapter
->hw
.mac
.type
== e1000_82572
))
2341 adapter
->intr_type
= pci_alloc_1intr(dev
, msi_enable
,
2342 &adapter
->intr_rid
, &intr_flags
);
2344 if (adapter
->intr_type
== PCI_INTR_TYPE_LEGACY
) {
2347 unshared
= device_getenv_int(dev
, "irq.unshared", 0);
2349 adapter
->flags
|= EM_FLAG_SHARED_INTR
;
2351 device_printf(dev
, "IRQ shared\n");
2353 intr_flags
&= ~RF_SHAREABLE
;
2355 device_printf(dev
, "IRQ unshared\n");
2359 adapter
->intr_res
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
,
2360 &adapter
->intr_rid
, intr_flags
);
2361 if (adapter
->intr_res
== NULL
) {
2362 device_printf(dev
, "Unable to allocate bus resource: %s\n",
2363 adapter
->intr_type
== PCI_INTR_TYPE_MSI
?
2364 "MSI" : "legacy intr");
2366 /* Retry with MSI. */
2368 adapter
->flags
&= ~EM_FLAG_SHARED_INTR
;
2374 adapter
->hw
.bus
.pci_cmd_word
= pci_read_config(dev
, PCIR_COMMAND
, 2);
2375 adapter
->hw
.back
= &adapter
->osdep
;
2380 em_free_pci_res(struct adapter
*adapter
)
2382 device_t dev
= adapter
->dev
;
2384 if (adapter
->intr_res
!= NULL
) {
2385 bus_release_resource(dev
, SYS_RES_IRQ
,
2386 adapter
->intr_rid
, adapter
->intr_res
);
2389 if (adapter
->intr_type
== PCI_INTR_TYPE_MSI
)
2390 pci_release_msi(dev
);
2392 if (adapter
->memory
!= NULL
) {
2393 bus_release_resource(dev
, SYS_RES_MEMORY
,
2394 adapter
->memory_rid
, adapter
->memory
);
2397 if (adapter
->flash
!= NULL
) {
2398 bus_release_resource(dev
, SYS_RES_MEMORY
,
2399 adapter
->flash_rid
, adapter
->flash
);
2402 if (adapter
->ioport
!= NULL
) {
2403 bus_release_resource(dev
, SYS_RES_IOPORT
,
2404 adapter
->io_rid
, adapter
->ioport
);
2409 em_reset(struct adapter
*adapter
)
2411 device_t dev
= adapter
->dev
;
2412 uint16_t rx_buffer_size
;
2415 /* When hardware is reset, fifo_head is also reset */
2416 adapter
->tx_fifo_head
= 0;
2418 /* Set up smart power down as default off on newer adapters. */
2419 if (!em_smart_pwr_down
&&
2420 (adapter
->hw
.mac
.type
== e1000_82571
||
2421 adapter
->hw
.mac
.type
== e1000_82572
)) {
2422 uint16_t phy_tmp
= 0;
2424 /* Speed up time to link by disabling smart power down. */
2425 e1000_read_phy_reg(&adapter
->hw
,
2426 IGP02E1000_PHY_POWER_MGMT
, &phy_tmp
);
2427 phy_tmp
&= ~IGP02E1000_PM_SPD
;
2428 e1000_write_phy_reg(&adapter
->hw
,
2429 IGP02E1000_PHY_POWER_MGMT
, phy_tmp
);
2433 * Packet Buffer Allocation (PBA)
2434 * Writing PBA sets the receive portion of the buffer
2435 * the remainder is used for the transmit buffer.
2437 * Devices before the 82547 had a Packet Buffer of 64K.
2438 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
2439 * After the 82547 the buffer was reduced to 40K.
2440 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
2441 * Note: default does not leave enough room for Jumbo Frame >10k.
2443 switch (adapter
->hw
.mac
.type
) {
2445 case e1000_82547_rev_2
: /* 82547: Total Packet Buffer is 40K */
2446 if (adapter
->hw
.mac
.max_frame_size
> 8192)
2447 pba
= E1000_PBA_22K
; /* 22K for Rx, 18K for Tx */
2449 pba
= E1000_PBA_30K
; /* 30K for Rx, 10K for Tx */
2450 adapter
->tx_fifo_head
= 0;
2451 adapter
->tx_head_addr
= pba
<< EM_TX_HEAD_ADDR_SHIFT
;
2452 adapter
->tx_fifo_size
=
2453 (E1000_PBA_40K
- pba
) << EM_PBA_BYTES_SHIFT
;
2456 /* Total Packet Buffer on these is 48K */
2459 case e1000_80003es2lan
:
2460 pba
= E1000_PBA_32K
; /* 32K for Rx, 16K for Tx */
2463 case e1000_82573
: /* 82573: Total Packet Buffer is 32K */
2464 pba
= E1000_PBA_12K
; /* 12K for Rx, 20K for Tx */
2469 pba
= E1000_PBA_20K
; /* 20K for Rx, 20K for Tx */
2477 case e1000_ich10lan
:
2478 #define E1000_PBA_10K 0x000A
2479 pba
= E1000_PBA_10K
;
2486 pba
= E1000_PBA_26K
;
2490 /* Devices before 82547 had a Packet Buffer of 64K. */
2491 if (adapter
->hw
.mac
.max_frame_size
> 8192)
2492 pba
= E1000_PBA_40K
; /* 40K for Rx, 24K for Tx */
2494 pba
= E1000_PBA_48K
; /* 48K for Rx, 16K for Tx */
2496 E1000_WRITE_REG(&adapter
->hw
, E1000_PBA
, pba
);
2499 * These parameters control the automatic generation (Tx) and
2500 * response (Rx) to Ethernet PAUSE frames.
2501 * - High water mark should allow for at least two frames to be
2502 * received after sending an XOFF.
2503 * - Low water mark works best when it is very near the high water mark.
2504 * This allows the receiver to restart by sending XON when it has
2505 * drained a bit. Here we use an arbitary value of 1500 which will
2506 * restart after one full frame is pulled from the buffer. There
2507 * could be several smaller frames in the buffer and if so they will
2508 * not trigger the XON until their total number reduces the buffer
2510 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2513 (E1000_READ_REG(&adapter
->hw
, E1000_PBA
) & 0xffff) << 10;
2515 adapter
->hw
.fc
.high_water
= rx_buffer_size
-
2516 roundup2(adapter
->hw
.mac
.max_frame_size
, 1024);
2517 adapter
->hw
.fc
.low_water
= adapter
->hw
.fc
.high_water
- 1500;
2519 if (adapter
->hw
.mac
.type
== e1000_80003es2lan
)
2520 adapter
->hw
.fc
.pause_time
= 0xFFFF;
2522 adapter
->hw
.fc
.pause_time
= EM_FC_PAUSE_TIME
;
2524 adapter
->hw
.fc
.send_xon
= TRUE
;
2526 adapter
->hw
.fc
.requested_mode
= e1000_ifmedia2fc(adapter
->ifm_flowctrl
);
2529 * Device specific overrides/settings
2531 switch (adapter
->hw
.mac
.type
) {
2533 KASSERT(adapter
->hw
.fc
.requested_mode
== e1000_fc_rx_pause
||
2534 adapter
->hw
.fc
.requested_mode
== e1000_fc_none
,
2535 ("unsupported flow control on PCH %d",
2536 adapter
->hw
.fc
.requested_mode
));
2537 adapter
->hw
.fc
.pause_time
= 0xFFFF; /* override */
2538 if (adapter
->arpcom
.ac_if
.if_mtu
> ETHERMTU
) {
2539 adapter
->hw
.fc
.high_water
= 0x3500;
2540 adapter
->hw
.fc
.low_water
= 0x1500;
2542 adapter
->hw
.fc
.high_water
= 0x5000;
2543 adapter
->hw
.fc
.low_water
= 0x3000;
2545 adapter
->hw
.fc
.refresh_time
= 0x1000;
2551 adapter
->hw
.fc
.high_water
= 0x5C20;
2552 adapter
->hw
.fc
.low_water
= 0x5048;
2553 adapter
->hw
.fc
.pause_time
= 0x0650;
2554 adapter
->hw
.fc
.refresh_time
= 0x0400;
2555 /* Jumbos need adjusted PBA */
2556 if (adapter
->arpcom
.ac_if
.if_mtu
> ETHERMTU
)
2557 E1000_WRITE_REG(&adapter
->hw
, E1000_PBA
, 12);
2559 E1000_WRITE_REG(&adapter
->hw
, E1000_PBA
, 26);
2563 case e1000_ich10lan
:
2564 if (adapter
->arpcom
.ac_if
.if_mtu
> ETHERMTU
) {
2565 adapter
->hw
.fc
.high_water
= 0x2800;
2566 adapter
->hw
.fc
.low_water
=
2567 adapter
->hw
.fc
.high_water
- 8;
2572 if (adapter
->hw
.mac
.type
== e1000_80003es2lan
)
2573 adapter
->hw
.fc
.pause_time
= 0xFFFF;
2577 /* Issue a global reset */
2578 e1000_reset_hw(&adapter
->hw
);
2579 if (adapter
->hw
.mac
.type
>= e1000_82544
)
2580 E1000_WRITE_REG(&adapter
->hw
, E1000_WUC
, 0);
2581 em_disable_aspm(adapter
);
2583 if (e1000_init_hw(&adapter
->hw
) < 0) {
2584 device_printf(dev
, "Hardware Initialization Failed\n");
2588 E1000_WRITE_REG(&adapter
->hw
, E1000_VET
, ETHERTYPE_VLAN
);
2589 e1000_get_phy_info(&adapter
->hw
);
2590 e1000_check_for_link(&adapter
->hw
);
2596 em_setup_ifp(struct adapter
*adapter
)
2598 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
2600 if_initname(ifp
, device_get_name(adapter
->dev
),
2601 device_get_unit(adapter
->dev
));
2602 ifp
->if_softc
= adapter
;
2603 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
2604 ifp
->if_init
= em_init
;
2605 ifp
->if_ioctl
= em_ioctl
;
2606 ifp
->if_start
= em_start
;
2607 #ifdef IFPOLL_ENABLE
2608 ifp
->if_npoll
= em_npoll
;
2610 ifp
->if_watchdog
= em_watchdog
;
2611 ifp
->if_nmbclusters
= adapter
->num_rx_desc
;
2612 ifq_set_maxlen(&ifp
->if_snd
, adapter
->num_tx_desc
- 1);
2613 ifq_set_ready(&ifp
->if_snd
);
2615 ether_ifattach(ifp
, adapter
->hw
.mac
.addr
, NULL
);
2617 ifp
->if_capabilities
= IFCAP_VLAN_HWTAGGING
| IFCAP_VLAN_MTU
;
2618 if (adapter
->hw
.mac
.type
>= e1000_82543
)
2619 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
2620 if (adapter
->flags
& EM_FLAG_TSO
)
2621 ifp
->if_capabilities
|= IFCAP_TSO
;
2622 ifp
->if_capenable
= ifp
->if_capabilities
;
2624 if (ifp
->if_capenable
& IFCAP_TXCSUM
)
2625 ifp
->if_hwassist
|= EM_CSUM_FEATURES
;
2626 if (ifp
->if_capenable
& IFCAP_TSO
)
2627 ifp
->if_hwassist
|= CSUM_TSO
;
2630 * Tell the upper layer(s) we support long frames.
2632 ifp
->if_data
.ifi_hdrlen
= sizeof(struct ether_vlan_header
);
2635 * Specify the media types supported by this adapter and register
2636 * callbacks to update media and link information
2638 if (adapter
->hw
.phy
.media_type
== e1000_media_type_fiber
||
2639 adapter
->hw
.phy
.media_type
== e1000_media_type_internal_serdes
) {
2640 u_char fiber_type
= IFM_1000_SX
; /* default type */
2642 if (adapter
->hw
.mac
.type
== e1000_82545
)
2643 fiber_type
= IFM_1000_LX
;
2644 ifmedia_add(&adapter
->media
, IFM_ETHER
| fiber_type
| IFM_FDX
,
2647 ifmedia_add(&adapter
->media
, IFM_ETHER
| IFM_10_T
, 0, NULL
);
2648 ifmedia_add(&adapter
->media
, IFM_ETHER
| IFM_10_T
| IFM_FDX
,
2650 ifmedia_add(&adapter
->media
, IFM_ETHER
| IFM_100_TX
,
2652 ifmedia_add(&adapter
->media
, IFM_ETHER
| IFM_100_TX
| IFM_FDX
,
2654 if (adapter
->hw
.phy
.type
!= e1000_phy_ife
) {
2655 ifmedia_add(&adapter
->media
,
2656 IFM_ETHER
| IFM_1000_T
| IFM_FDX
, 0, NULL
);
2659 ifmedia_add(&adapter
->media
, IFM_ETHER
| IFM_AUTO
, 0, NULL
);
2660 ifmedia_set(&adapter
->media
, IFM_ETHER
| IFM_AUTO
|
2661 adapter
->ifm_flowctrl
);
2666 * Workaround for SmartSpeed on 82541 and 82547 controllers
2669 em_smartspeed(struct adapter
*adapter
)
2673 if (adapter
->link_active
|| adapter
->hw
.phy
.type
!= e1000_phy_igp
||
2674 adapter
->hw
.mac
.autoneg
== 0 ||
2675 (adapter
->hw
.phy
.autoneg_advertised
& ADVERTISE_1000_FULL
) == 0)
2678 if (adapter
->smartspeed
== 0) {
2680 * If Master/Slave config fault is asserted twice,
2681 * we assume back-to-back
2683 e1000_read_phy_reg(&adapter
->hw
, PHY_1000T_STATUS
, &phy_tmp
);
2684 if (!(phy_tmp
& SR_1000T_MS_CONFIG_FAULT
))
2686 e1000_read_phy_reg(&adapter
->hw
, PHY_1000T_STATUS
, &phy_tmp
);
2687 if (phy_tmp
& SR_1000T_MS_CONFIG_FAULT
) {
2688 e1000_read_phy_reg(&adapter
->hw
,
2689 PHY_1000T_CTRL
, &phy_tmp
);
2690 if (phy_tmp
& CR_1000T_MS_ENABLE
) {
2691 phy_tmp
&= ~CR_1000T_MS_ENABLE
;
2692 e1000_write_phy_reg(&adapter
->hw
,
2693 PHY_1000T_CTRL
, phy_tmp
);
2694 adapter
->smartspeed
++;
2695 if (adapter
->hw
.mac
.autoneg
&&
2696 !e1000_phy_setup_autoneg(&adapter
->hw
) &&
2697 !e1000_read_phy_reg(&adapter
->hw
,
2698 PHY_CONTROL
, &phy_tmp
)) {
2699 phy_tmp
|= MII_CR_AUTO_NEG_EN
|
2700 MII_CR_RESTART_AUTO_NEG
;
2701 e1000_write_phy_reg(&adapter
->hw
,
2702 PHY_CONTROL
, phy_tmp
);
2707 } else if (adapter
->smartspeed
== EM_SMARTSPEED_DOWNSHIFT
) {
2708 /* If still no link, perhaps using 2/3 pair cable */
2709 e1000_read_phy_reg(&adapter
->hw
, PHY_1000T_CTRL
, &phy_tmp
);
2710 phy_tmp
|= CR_1000T_MS_ENABLE
;
2711 e1000_write_phy_reg(&adapter
->hw
, PHY_1000T_CTRL
, phy_tmp
);
2712 if (adapter
->hw
.mac
.autoneg
&&
2713 !e1000_phy_setup_autoneg(&adapter
->hw
) &&
2714 !e1000_read_phy_reg(&adapter
->hw
, PHY_CONTROL
, &phy_tmp
)) {
2715 phy_tmp
|= MII_CR_AUTO_NEG_EN
| MII_CR_RESTART_AUTO_NEG
;
2716 e1000_write_phy_reg(&adapter
->hw
, PHY_CONTROL
, phy_tmp
);
2720 /* Restart process after EM_SMARTSPEED_MAX iterations */
2721 if (adapter
->smartspeed
++ == EM_SMARTSPEED_MAX
)
2722 adapter
->smartspeed
= 0;
2726 em_dma_malloc(struct adapter
*adapter
, bus_size_t size
,
2727 struct em_dma_alloc
*dma
)
2729 dma
->dma_vaddr
= bus_dmamem_coherent_any(adapter
->parent_dtag
,
2730 EM_DBA_ALIGN
, size
, BUS_DMA_WAITOK
,
2731 &dma
->dma_tag
, &dma
->dma_map
,
2733 if (dma
->dma_vaddr
== NULL
)
2740 em_dma_free(struct adapter
*adapter
, struct em_dma_alloc
*dma
)
2742 if (dma
->dma_tag
== NULL
)
2744 bus_dmamap_unload(dma
->dma_tag
, dma
->dma_map
);
2745 bus_dmamem_free(dma
->dma_tag
, dma
->dma_vaddr
, dma
->dma_map
);
2746 bus_dma_tag_destroy(dma
->dma_tag
);
2750 em_create_tx_ring(struct adapter
*adapter
)
2752 device_t dev
= adapter
->dev
;
2753 struct em_buffer
*tx_buffer
;
2756 adapter
->tx_buffer_area
=
2757 kmalloc(sizeof(struct em_buffer
) * adapter
->num_tx_desc
,
2758 M_DEVBUF
, M_WAITOK
| M_ZERO
);
2761 * Create DMA tags for tx buffers
2763 error
= bus_dma_tag_create(adapter
->parent_dtag
, /* parent */
2764 1, 0, /* alignment, bounds */
2765 BUS_SPACE_MAXADDR
, /* lowaddr */
2766 BUS_SPACE_MAXADDR
, /* highaddr */
2767 NULL
, NULL
, /* filter, filterarg */
2768 EM_TSO_SIZE
, /* maxsize */
2769 EM_MAX_SCATTER
, /* nsegments */
2770 PAGE_SIZE
, /* maxsegsize */
2771 BUS_DMA_WAITOK
| BUS_DMA_ALLOCNOW
|
2772 BUS_DMA_ONEBPAGE
, /* flags */
2775 device_printf(dev
, "Unable to allocate TX DMA tag\n");
2776 kfree(adapter
->tx_buffer_area
, M_DEVBUF
);
2777 adapter
->tx_buffer_area
= NULL
;
2782 * Create DMA maps for tx buffers
2784 for (i
= 0; i
< adapter
->num_tx_desc
; i
++) {
2785 tx_buffer
= &adapter
->tx_buffer_area
[i
];
2787 error
= bus_dmamap_create(adapter
->txtag
,
2788 BUS_DMA_WAITOK
| BUS_DMA_ONEBPAGE
,
2791 device_printf(dev
, "Unable to create TX DMA map\n");
2792 em_destroy_tx_ring(adapter
, i
);
2800 em_init_tx_ring(struct adapter
*adapter
)
2802 /* Clear the old ring contents */
2803 bzero(adapter
->tx_desc_base
,
2804 (sizeof(struct e1000_tx_desc
)) * adapter
->num_tx_desc
);
2807 adapter
->next_avail_tx_desc
= 0;
2808 adapter
->next_tx_to_clean
= 0;
2809 adapter
->num_tx_desc_avail
= adapter
->num_tx_desc
;
2813 em_init_tx_unit(struct adapter
*adapter
)
2815 uint32_t tctl
, tarc
, tipg
= 0;
2818 /* Setup the Base and Length of the Tx Descriptor Ring */
2819 bus_addr
= adapter
->txdma
.dma_paddr
;
2820 E1000_WRITE_REG(&adapter
->hw
, E1000_TDLEN(0),
2821 adapter
->num_tx_desc
* sizeof(struct e1000_tx_desc
));
2822 E1000_WRITE_REG(&adapter
->hw
, E1000_TDBAH(0),
2823 (uint32_t)(bus_addr
>> 32));
2824 E1000_WRITE_REG(&adapter
->hw
, E1000_TDBAL(0),
2825 (uint32_t)bus_addr
);
2826 /* Setup the HW Tx Head and Tail descriptor pointers */
2827 E1000_WRITE_REG(&adapter
->hw
, E1000_TDT(0), 0);
2828 E1000_WRITE_REG(&adapter
->hw
, E1000_TDH(0), 0);
2830 /* Set the default values for the Tx Inter Packet Gap timer */
2831 switch (adapter
->hw
.mac
.type
) {
2833 tipg
= DEFAULT_82542_TIPG_IPGT
;
2834 tipg
|= DEFAULT_82542_TIPG_IPGR1
<< E1000_TIPG_IPGR1_SHIFT
;
2835 tipg
|= DEFAULT_82542_TIPG_IPGR2
<< E1000_TIPG_IPGR2_SHIFT
;
2838 case e1000_80003es2lan
:
2839 tipg
= DEFAULT_82543_TIPG_IPGR1
;
2840 tipg
|= DEFAULT_80003ES2LAN_TIPG_IPGR2
<<
2841 E1000_TIPG_IPGR2_SHIFT
;
2845 if (adapter
->hw
.phy
.media_type
== e1000_media_type_fiber
||
2846 adapter
->hw
.phy
.media_type
==
2847 e1000_media_type_internal_serdes
)
2848 tipg
= DEFAULT_82543_TIPG_IPGT_FIBER
;
2850 tipg
= DEFAULT_82543_TIPG_IPGT_COPPER
;
2851 tipg
|= DEFAULT_82543_TIPG_IPGR1
<< E1000_TIPG_IPGR1_SHIFT
;
2852 tipg
|= DEFAULT_82543_TIPG_IPGR2
<< E1000_TIPG_IPGR2_SHIFT
;
2856 E1000_WRITE_REG(&adapter
->hw
, E1000_TIPG
, tipg
);
2858 /* NOTE: 0 is not allowed for TIDV */
2859 E1000_WRITE_REG(&adapter
->hw
, E1000_TIDV
, 1);
2860 if(adapter
->hw
.mac
.type
>= e1000_82540
)
2861 E1000_WRITE_REG(&adapter
->hw
, E1000_TADV
, 0);
2863 if (adapter
->hw
.mac
.type
== e1000_82571
||
2864 adapter
->hw
.mac
.type
== e1000_82572
) {
2865 tarc
= E1000_READ_REG(&adapter
->hw
, E1000_TARC(0));
2866 tarc
|= SPEED_MODE_BIT
;
2867 E1000_WRITE_REG(&adapter
->hw
, E1000_TARC(0), tarc
);
2868 } else if (adapter
->hw
.mac
.type
== e1000_80003es2lan
) {
2869 tarc
= E1000_READ_REG(&adapter
->hw
, E1000_TARC(0));
2871 E1000_WRITE_REG(&adapter
->hw
, E1000_TARC(0), tarc
);
2872 tarc
= E1000_READ_REG(&adapter
->hw
, E1000_TARC(1));
2874 E1000_WRITE_REG(&adapter
->hw
, E1000_TARC(1), tarc
);
2877 /* Program the Transmit Control Register */
2878 tctl
= E1000_READ_REG(&adapter
->hw
, E1000_TCTL
);
2879 tctl
&= ~E1000_TCTL_CT
;
2880 tctl
|= E1000_TCTL_PSP
| E1000_TCTL_RTLC
| E1000_TCTL_EN
|
2881 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
2883 if (adapter
->hw
.mac
.type
>= e1000_82571
)
2884 tctl
|= E1000_TCTL_MULR
;
2886 /* This write will effectively turn on the transmit unit. */
2887 E1000_WRITE_REG(&adapter
->hw
, E1000_TCTL
, tctl
);
2889 if (adapter
->hw
.mac
.type
== e1000_82571
||
2890 adapter
->hw
.mac
.type
== e1000_82572
||
2891 adapter
->hw
.mac
.type
== e1000_80003es2lan
) {
2892 /* Bit 28 of TARC1 must be cleared when MULR is enabled */
2893 tarc
= E1000_READ_REG(&adapter
->hw
, E1000_TARC(1));
2895 E1000_WRITE_REG(&adapter
->hw
, E1000_TARC(1), tarc
);
2900 em_destroy_tx_ring(struct adapter
*adapter
, int ndesc
)
2902 struct em_buffer
*tx_buffer
;
2905 if (adapter
->tx_buffer_area
== NULL
)
2908 for (i
= 0; i
< ndesc
; i
++) {
2909 tx_buffer
= &adapter
->tx_buffer_area
[i
];
2911 KKASSERT(tx_buffer
->m_head
== NULL
);
2912 bus_dmamap_destroy(adapter
->txtag
, tx_buffer
->map
);
2914 bus_dma_tag_destroy(adapter
->txtag
);
2916 kfree(adapter
->tx_buffer_area
, M_DEVBUF
);
2917 adapter
->tx_buffer_area
= NULL
;
2921 * The offload context needs to be set when we transfer the first
2922 * packet of a particular protocol (TCP/UDP). This routine has been
2923 * enhanced to deal with inserted VLAN headers.
2925 * If the new packet's ether header length, ip header length and
2926 * csum offloading type are same as the previous packet, we should
2927 * avoid allocating a new csum context descriptor; mainly to take
2928 * advantage of the pipeline effect of the TX data read request.
2930 * This function returns number of TX descrptors allocated for
2934 em_txcsum(struct adapter
*adapter
, struct mbuf
*mp
,
2935 uint32_t *txd_upper
, uint32_t *txd_lower
)
2937 struct e1000_context_desc
*TXD
;
2938 int curr_txd
, ehdrlen
, csum_flags
;
2939 uint32_t cmd
, hdr_len
, ip_hlen
;
2941 csum_flags
= mp
->m_pkthdr
.csum_flags
& EM_CSUM_FEATURES
;
2942 ip_hlen
= mp
->m_pkthdr
.csum_iphlen
;
2943 ehdrlen
= mp
->m_pkthdr
.csum_lhlen
;
2945 if (adapter
->csum_lhlen
== ehdrlen
&&
2946 adapter
->csum_iphlen
== ip_hlen
&&
2947 adapter
->csum_flags
== csum_flags
) {
2949 * Same csum offload context as the previous packets;
2952 *txd_upper
= adapter
->csum_txd_upper
;
2953 *txd_lower
= adapter
->csum_txd_lower
;
2958 * Setup a new csum offload context.
2961 curr_txd
= adapter
->next_avail_tx_desc
;
2962 TXD
= (struct e1000_context_desc
*)&adapter
->tx_desc_base
[curr_txd
];
2966 /* Setup of IP header checksum. */
2967 if (csum_flags
& CSUM_IP
) {
2969 * Start offset for header checksum calculation.
2970 * End offset for header checksum calculation.
2971 * Offset of place to put the checksum.
2973 TXD
->lower_setup
.ip_fields
.ipcss
= ehdrlen
;
2974 TXD
->lower_setup
.ip_fields
.ipcse
=
2975 htole16(ehdrlen
+ ip_hlen
- 1);
2976 TXD
->lower_setup
.ip_fields
.ipcso
=
2977 ehdrlen
+ offsetof(struct ip
, ip_sum
);
2978 cmd
|= E1000_TXD_CMD_IP
;
2979 *txd_upper
|= E1000_TXD_POPTS_IXSM
<< 8;
2981 hdr_len
= ehdrlen
+ ip_hlen
;
2983 if (csum_flags
& CSUM_TCP
) {
2985 * Start offset for payload checksum calculation.
2986 * End offset for payload checksum calculation.
2987 * Offset of place to put the checksum.
2989 TXD
->upper_setup
.tcp_fields
.tucss
= hdr_len
;
2990 TXD
->upper_setup
.tcp_fields
.tucse
= htole16(0);
2991 TXD
->upper_setup
.tcp_fields
.tucso
=
2992 hdr_len
+ offsetof(struct tcphdr
, th_sum
);
2993 cmd
|= E1000_TXD_CMD_TCP
;
2994 *txd_upper
|= E1000_TXD_POPTS_TXSM
<< 8;
2995 } else if (csum_flags
& CSUM_UDP
) {
2997 * Start offset for header checksum calculation.
2998 * End offset for header checksum calculation.
2999 * Offset of place to put the checksum.
3001 TXD
->upper_setup
.tcp_fields
.tucss
= hdr_len
;
3002 TXD
->upper_setup
.tcp_fields
.tucse
= htole16(0);
3003 TXD
->upper_setup
.tcp_fields
.tucso
=
3004 hdr_len
+ offsetof(struct udphdr
, uh_sum
);
3005 *txd_upper
|= E1000_TXD_POPTS_TXSM
<< 8;
3008 *txd_lower
= E1000_TXD_CMD_DEXT
| /* Extended descr type */
3009 E1000_TXD_DTYP_D
; /* Data descr */
3011 /* Save the information for this csum offloading context */
3012 adapter
->csum_lhlen
= ehdrlen
;
3013 adapter
->csum_iphlen
= ip_hlen
;
3014 adapter
->csum_flags
= csum_flags
;
3015 adapter
->csum_txd_upper
= *txd_upper
;
3016 adapter
->csum_txd_lower
= *txd_lower
;
3018 TXD
->tcp_seg_setup
.data
= htole32(0);
3019 TXD
->cmd_and_length
=
3020 htole32(E1000_TXD_CMD_IFCS
| E1000_TXD_CMD_DEXT
| cmd
);
3022 if (++curr_txd
== adapter
->num_tx_desc
)
3025 KKASSERT(adapter
->num_tx_desc_avail
> 0);
3026 adapter
->num_tx_desc_avail
--;
3028 adapter
->next_avail_tx_desc
= curr_txd
;
3033 em_txeof(struct adapter
*adapter
)
3035 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
3036 struct em_buffer
*tx_buffer
;
3037 int first
, num_avail
;
3039 if (adapter
->tx_dd_head
== adapter
->tx_dd_tail
)
3042 if (adapter
->num_tx_desc_avail
== adapter
->num_tx_desc
)
3045 num_avail
= adapter
->num_tx_desc_avail
;
3046 first
= adapter
->next_tx_to_clean
;
3048 while (adapter
->tx_dd_head
!= adapter
->tx_dd_tail
) {
3049 struct e1000_tx_desc
*tx_desc
;
3050 int dd_idx
= adapter
->tx_dd
[adapter
->tx_dd_head
];
3052 tx_desc
= &adapter
->tx_desc_base
[dd_idx
];
3053 if (tx_desc
->upper
.fields
.status
& E1000_TXD_STAT_DD
) {
3054 EM_INC_TXDD_IDX(adapter
->tx_dd_head
);
3056 if (++dd_idx
== adapter
->num_tx_desc
)
3059 while (first
!= dd_idx
) {
3064 tx_buffer
= &adapter
->tx_buffer_area
[first
];
3065 if (tx_buffer
->m_head
) {
3066 bus_dmamap_unload(adapter
->txtag
,
3068 m_freem(tx_buffer
->m_head
);
3069 tx_buffer
->m_head
= NULL
;
3072 if (++first
== adapter
->num_tx_desc
)
3079 adapter
->next_tx_to_clean
= first
;
3080 adapter
->num_tx_desc_avail
= num_avail
;
3082 if (adapter
->tx_dd_head
== adapter
->tx_dd_tail
) {
3083 adapter
->tx_dd_head
= 0;
3084 adapter
->tx_dd_tail
= 0;
3087 if (!EM_IS_OACTIVE(adapter
)) {
3088 ifq_clr_oactive(&ifp
->if_snd
);
3090 /* All clean, turn off the timer */
3091 if (adapter
->num_tx_desc_avail
== adapter
->num_tx_desc
)
3097 em_tx_collect(struct adapter
*adapter
)
3099 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
3100 struct em_buffer
*tx_buffer
;
3101 int tdh
, first
, num_avail
, dd_idx
= -1;
3103 if (adapter
->num_tx_desc_avail
== adapter
->num_tx_desc
)
3106 tdh
= E1000_READ_REG(&adapter
->hw
, E1000_TDH(0));
3107 if (tdh
== adapter
->next_tx_to_clean
)
3110 if (adapter
->tx_dd_head
!= adapter
->tx_dd_tail
)
3111 dd_idx
= adapter
->tx_dd
[adapter
->tx_dd_head
];
3113 num_avail
= adapter
->num_tx_desc_avail
;
3114 first
= adapter
->next_tx_to_clean
;
3116 while (first
!= tdh
) {
3121 tx_buffer
= &adapter
->tx_buffer_area
[first
];
3122 if (tx_buffer
->m_head
) {
3123 bus_dmamap_unload(adapter
->txtag
,
3125 m_freem(tx_buffer
->m_head
);
3126 tx_buffer
->m_head
= NULL
;
3129 if (first
== dd_idx
) {
3130 EM_INC_TXDD_IDX(adapter
->tx_dd_head
);
3131 if (adapter
->tx_dd_head
== adapter
->tx_dd_tail
) {
3132 adapter
->tx_dd_head
= 0;
3133 adapter
->tx_dd_tail
= 0;
3136 dd_idx
= adapter
->tx_dd
[adapter
->tx_dd_head
];
3140 if (++first
== adapter
->num_tx_desc
)
3143 adapter
->next_tx_to_clean
= first
;
3144 adapter
->num_tx_desc_avail
= num_avail
;
3146 if (!EM_IS_OACTIVE(adapter
)) {
3147 ifq_clr_oactive(&ifp
->if_snd
);
3149 /* All clean, turn off the timer */
3150 if (adapter
->num_tx_desc_avail
== adapter
->num_tx_desc
)
3156 * When Link is lost sometimes there is work still in the TX ring
3157 * which will result in a watchdog, rather than allow that do an
3158 * attempted cleanup and then reinit here. Note that this has been
3159 * seens mostly with fiber adapters.
3162 em_tx_purge(struct adapter
*adapter
)
3164 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
3166 if (!adapter
->link_active
&& ifp
->if_timer
) {
3167 em_tx_collect(adapter
);
3168 if (ifp
->if_timer
) {
3169 if_printf(ifp
, "Link lost, TX pending, reinit\n");
3177 em_newbuf(struct adapter
*adapter
, int i
, int init
)
3180 bus_dma_segment_t seg
;
3182 struct em_buffer
*rx_buffer
;
3185 m
= m_getcl(init
? M_WAITOK
: M_NOWAIT
, MT_DATA
, M_PKTHDR
);
3187 adapter
->mbuf_cluster_failed
++;
3189 if_printf(&adapter
->arpcom
.ac_if
,
3190 "Unable to allocate RX mbuf\n");
3194 m
->m_len
= m
->m_pkthdr
.len
= MCLBYTES
;
3196 if (adapter
->hw
.mac
.max_frame_size
<= MCLBYTES
- ETHER_ALIGN
)
3197 m_adj(m
, ETHER_ALIGN
);
3199 error
= bus_dmamap_load_mbuf_segment(adapter
->rxtag
,
3200 adapter
->rx_sparemap
, m
,
3201 &seg
, 1, &nseg
, BUS_DMA_NOWAIT
);
3205 if_printf(&adapter
->arpcom
.ac_if
,
3206 "Unable to load RX mbuf\n");
3211 rx_buffer
= &adapter
->rx_buffer_area
[i
];
3212 if (rx_buffer
->m_head
!= NULL
)
3213 bus_dmamap_unload(adapter
->rxtag
, rx_buffer
->map
);
3215 map
= rx_buffer
->map
;
3216 rx_buffer
->map
= adapter
->rx_sparemap
;
3217 adapter
->rx_sparemap
= map
;
3219 rx_buffer
->m_head
= m
;
3221 adapter
->rx_desc_base
[i
].buffer_addr
= htole64(seg
.ds_addr
);
3226 em_create_rx_ring(struct adapter
*adapter
)
3228 device_t dev
= adapter
->dev
;
3229 struct em_buffer
*rx_buffer
;
3232 adapter
->rx_buffer_area
=
3233 kmalloc(sizeof(struct em_buffer
) * adapter
->num_rx_desc
,
3234 M_DEVBUF
, M_WAITOK
| M_ZERO
);
3237 * Create DMA tag for rx buffers
3239 error
= bus_dma_tag_create(adapter
->parent_dtag
, /* parent */
3240 1, 0, /* alignment, bounds */
3241 BUS_SPACE_MAXADDR
, /* lowaddr */
3242 BUS_SPACE_MAXADDR
, /* highaddr */
3243 NULL
, NULL
, /* filter, filterarg */
3244 MCLBYTES
, /* maxsize */
3246 MCLBYTES
, /* maxsegsize */
3247 BUS_DMA_WAITOK
| BUS_DMA_ALLOCNOW
, /* flags */
3250 device_printf(dev
, "Unable to allocate RX DMA tag\n");
3251 kfree(adapter
->rx_buffer_area
, M_DEVBUF
);
3252 adapter
->rx_buffer_area
= NULL
;
3257 * Create spare DMA map for rx buffers
3259 error
= bus_dmamap_create(adapter
->rxtag
, BUS_DMA_WAITOK
,
3260 &adapter
->rx_sparemap
);
3262 device_printf(dev
, "Unable to create spare RX DMA map\n");
3263 bus_dma_tag_destroy(adapter
->rxtag
);
3264 kfree(adapter
->rx_buffer_area
, M_DEVBUF
);
3265 adapter
->rx_buffer_area
= NULL
;
3270 * Create DMA maps for rx buffers
3272 for (i
= 0; i
< adapter
->num_rx_desc
; i
++) {
3273 rx_buffer
= &adapter
->rx_buffer_area
[i
];
3275 error
= bus_dmamap_create(adapter
->rxtag
, BUS_DMA_WAITOK
,
3278 device_printf(dev
, "Unable to create RX DMA map\n");
3279 em_destroy_rx_ring(adapter
, i
);
3287 em_init_rx_ring(struct adapter
*adapter
)
3291 /* Reset descriptor ring */
3292 bzero(adapter
->rx_desc_base
,
3293 (sizeof(struct e1000_rx_desc
)) * adapter
->num_rx_desc
);
3295 /* Allocate new ones. */
3296 for (i
= 0; i
< adapter
->num_rx_desc
; i
++) {
3297 error
= em_newbuf(adapter
, i
, 1);
3302 /* Setup our descriptor pointers */
3303 adapter
->next_rx_desc_to_check
= 0;
3309 em_init_rx_unit(struct adapter
*adapter
)
3311 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
3316 * Make sure receives are disabled while setting
3317 * up the descriptor ring
3319 rctl
= E1000_READ_REG(&adapter
->hw
, E1000_RCTL
);
3320 E1000_WRITE_REG(&adapter
->hw
, E1000_RCTL
, rctl
& ~E1000_RCTL_EN
);
3322 if (adapter
->hw
.mac
.type
>= e1000_82540
) {
3326 * Set the interrupt throttling rate. Value is calculated
3327 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns)
3329 if (adapter
->int_throttle_ceil
)
3330 itr
= 1000000000 / 256 / adapter
->int_throttle_ceil
;
3333 em_set_itr(adapter
, itr
);
3336 /* Disable accelerated ackknowledge */
3337 if (adapter
->hw
.mac
.type
== e1000_82574
) {
3338 E1000_WRITE_REG(&adapter
->hw
,
3339 E1000_RFCTL
, E1000_RFCTL_ACK_DIS
);
3342 /* Receive Checksum Offload for TCP and UDP */
3343 if (ifp
->if_capenable
& IFCAP_RXCSUM
) {
3346 rxcsum
= E1000_READ_REG(&adapter
->hw
, E1000_RXCSUM
);
3347 rxcsum
|= (E1000_RXCSUM_IPOFL
| E1000_RXCSUM_TUOFL
);
3348 E1000_WRITE_REG(&adapter
->hw
, E1000_RXCSUM
, rxcsum
);
3352 * XXX TEMPORARY WORKAROUND: on some systems with 82573
3353 * long latencies are observed, like Lenovo X60. This
3354 * change eliminates the problem, but since having positive
3355 * values in RDTR is a known source of problems on other
3356 * platforms another solution is being sought.
3358 if (em_82573_workaround
&& adapter
->hw
.mac
.type
== e1000_82573
) {
3359 E1000_WRITE_REG(&adapter
->hw
, E1000_RADV
, EM_RADV_82573
);
3360 E1000_WRITE_REG(&adapter
->hw
, E1000_RDTR
, EM_RDTR_82573
);
3364 * Setup the Base and Length of the Rx Descriptor Ring
3366 bus_addr
= adapter
->rxdma
.dma_paddr
;
3367 E1000_WRITE_REG(&adapter
->hw
, E1000_RDLEN(0),
3368 adapter
->num_rx_desc
* sizeof(struct e1000_rx_desc
));
3369 E1000_WRITE_REG(&adapter
->hw
, E1000_RDBAH(0),
3370 (uint32_t)(bus_addr
>> 32));
3371 E1000_WRITE_REG(&adapter
->hw
, E1000_RDBAL(0),
3372 (uint32_t)bus_addr
);
3375 * Setup the HW Rx Head and Tail Descriptor Pointers
3377 E1000_WRITE_REG(&adapter
->hw
, E1000_RDH(0), 0);
3378 E1000_WRITE_REG(&adapter
->hw
, E1000_RDT(0), adapter
->num_rx_desc
- 1);
3380 /* Set PTHRESH for improved jumbo performance */
3381 if (((adapter
->hw
.mac
.type
== e1000_ich9lan
) ||
3382 (adapter
->hw
.mac
.type
== e1000_pch2lan
) ||
3383 (adapter
->hw
.mac
.type
== e1000_ich10lan
)) &&
3384 (ifp
->if_mtu
> ETHERMTU
)) {
3387 rxdctl
= E1000_READ_REG(&adapter
->hw
, E1000_RXDCTL(0));
3388 E1000_WRITE_REG(&adapter
->hw
, E1000_RXDCTL(0), rxdctl
| 3);
3391 if (adapter
->hw
.mac
.type
>= e1000_pch2lan
) {
3392 if (ifp
->if_mtu
> ETHERMTU
)
3393 e1000_lv_jumbo_workaround_ich8lan(&adapter
->hw
, TRUE
);
3395 e1000_lv_jumbo_workaround_ich8lan(&adapter
->hw
, FALSE
);
3398 /* Setup the Receive Control Register */
3399 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
3400 rctl
|= E1000_RCTL_EN
| E1000_RCTL_BAM
| E1000_RCTL_LBM_NO
|
3401 E1000_RCTL_RDMTS_HALF
|
3402 (adapter
->hw
.mac
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
3404 /* Make sure VLAN Filters are off */
3405 rctl
&= ~E1000_RCTL_VFE
;
3407 if (e1000_tbi_sbp_enabled_82543(&adapter
->hw
))
3408 rctl
|= E1000_RCTL_SBP
;
3410 rctl
&= ~E1000_RCTL_SBP
;
3412 switch (adapter
->rx_buffer_len
) {
3415 rctl
|= E1000_RCTL_SZ_2048
;
3419 rctl
|= E1000_RCTL_SZ_4096
|
3420 E1000_RCTL_BSEX
| E1000_RCTL_LPE
;
3424 rctl
|= E1000_RCTL_SZ_8192
|
3425 E1000_RCTL_BSEX
| E1000_RCTL_LPE
;
3429 rctl
|= E1000_RCTL_SZ_16384
|
3430 E1000_RCTL_BSEX
| E1000_RCTL_LPE
;
3434 if (ifp
->if_mtu
> ETHERMTU
)
3435 rctl
|= E1000_RCTL_LPE
;
3437 rctl
&= ~E1000_RCTL_LPE
;
3439 /* Enable Receives */
3440 E1000_WRITE_REG(&adapter
->hw
, E1000_RCTL
, rctl
);
3444 em_destroy_rx_ring(struct adapter
*adapter
, int ndesc
)
3446 struct em_buffer
*rx_buffer
;
3449 if (adapter
->rx_buffer_area
== NULL
)
3452 for (i
= 0; i
< ndesc
; i
++) {
3453 rx_buffer
= &adapter
->rx_buffer_area
[i
];
3455 KKASSERT(rx_buffer
->m_head
== NULL
);
3456 bus_dmamap_destroy(adapter
->rxtag
, rx_buffer
->map
);
3458 bus_dmamap_destroy(adapter
->rxtag
, adapter
->rx_sparemap
);
3459 bus_dma_tag_destroy(adapter
->rxtag
);
3461 kfree(adapter
->rx_buffer_area
, M_DEVBUF
);
3462 adapter
->rx_buffer_area
= NULL
;
3466 em_rxeof(struct adapter
*adapter
, int count
)
3468 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
3469 uint8_t status
, accept_frame
= 0, eop
= 0;
3470 uint16_t len
, desc_len
, prev_len_adj
;
3471 struct e1000_rx_desc
*current_desc
;
3475 i
= adapter
->next_rx_desc_to_check
;
3476 current_desc
= &adapter
->rx_desc_base
[i
];
3478 if (!(current_desc
->status
& E1000_RXD_STAT_DD
))
3481 while ((current_desc
->status
& E1000_RXD_STAT_DD
) && count
!= 0) {
3482 struct mbuf
*m
= NULL
;
3486 mp
= adapter
->rx_buffer_area
[i
].m_head
;
3489 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3490 * needs to access the last received byte in the mbuf.
3492 bus_dmamap_sync(adapter
->rxtag
, adapter
->rx_buffer_area
[i
].map
,
3493 BUS_DMASYNC_POSTREAD
);
3497 desc_len
= le16toh(current_desc
->length
);
3498 status
= current_desc
->status
;
3499 if (status
& E1000_RXD_STAT_EOP
) {
3502 if (desc_len
< ETHER_CRC_LEN
) {
3504 prev_len_adj
= ETHER_CRC_LEN
- desc_len
;
3506 len
= desc_len
- ETHER_CRC_LEN
;
3513 if (current_desc
->errors
& E1000_RXD_ERR_FRAME_ERR_MASK
) {
3515 uint32_t pkt_len
= desc_len
;
3517 if (adapter
->fmp
!= NULL
)
3518 pkt_len
+= adapter
->fmp
->m_pkthdr
.len
;
3520 last_byte
= *(mtod(mp
, caddr_t
) + desc_len
- 1);
3521 if (TBI_ACCEPT(&adapter
->hw
, status
,
3522 current_desc
->errors
, pkt_len
, last_byte
,
3523 adapter
->min_frame_size
,
3524 adapter
->hw
.mac
.max_frame_size
)) {
3525 e1000_tbi_adjust_stats_82543(&adapter
->hw
,
3526 &adapter
->stats
, pkt_len
,
3527 adapter
->hw
.mac
.addr
,
3528 adapter
->hw
.mac
.max_frame_size
);
3537 if (em_newbuf(adapter
, i
, 0) != 0) {
3538 IFNET_STAT_INC(ifp
, iqdrops
, 1);
3542 /* Assign correct length to the current fragment */
3545 if (adapter
->fmp
== NULL
) {
3546 mp
->m_pkthdr
.len
= len
;
3547 adapter
->fmp
= mp
; /* Store the first mbuf */
3551 * Chain mbuf's together
3555 * Adjust length of previous mbuf in chain if
3556 * we received less than 4 bytes in the last
3559 if (prev_len_adj
> 0) {
3560 adapter
->lmp
->m_len
-= prev_len_adj
;
3561 adapter
->fmp
->m_pkthdr
.len
-=
3564 adapter
->lmp
->m_next
= mp
;
3565 adapter
->lmp
= adapter
->lmp
->m_next
;
3566 adapter
->fmp
->m_pkthdr
.len
+= len
;
3570 adapter
->fmp
->m_pkthdr
.rcvif
= ifp
;
3571 IFNET_STAT_INC(ifp
, ipackets
, 1);
3573 if (ifp
->if_capenable
& IFCAP_RXCSUM
) {
3574 em_rxcsum(adapter
, current_desc
,
3578 if (status
& E1000_RXD_STAT_VP
) {
3579 adapter
->fmp
->m_pkthdr
.ether_vlantag
=
3580 (le16toh(current_desc
->special
) &
3581 E1000_RXD_SPC_VLAN_MASK
);
3582 adapter
->fmp
->m_flags
|= M_VLANTAG
;
3585 adapter
->fmp
= NULL
;
3586 adapter
->lmp
= NULL
;
3589 IFNET_STAT_INC(ifp
, ierrors
, 1);
3592 /* Reuse loaded DMA map and just update mbuf chain */
3593 mp
= adapter
->rx_buffer_area
[i
].m_head
;
3594 mp
->m_len
= mp
->m_pkthdr
.len
= MCLBYTES
;
3595 mp
->m_data
= mp
->m_ext
.ext_buf
;
3597 if (adapter
->hw
.mac
.max_frame_size
<=
3598 (MCLBYTES
- ETHER_ALIGN
))
3599 m_adj(mp
, ETHER_ALIGN
);
3601 if (adapter
->fmp
!= NULL
) {
3602 m_freem(adapter
->fmp
);
3603 adapter
->fmp
= NULL
;
3604 adapter
->lmp
= NULL
;
3609 /* Zero out the receive descriptors status. */
3610 current_desc
->status
= 0;
3613 ifp
->if_input(ifp
, m
, NULL
, -1);
3615 /* Advance our pointers to the next descriptor. */
3616 if (++i
== adapter
->num_rx_desc
)
3618 current_desc
= &adapter
->rx_desc_base
[i
];
3620 adapter
->next_rx_desc_to_check
= i
;
3622 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3624 i
= adapter
->num_rx_desc
- 1;
3625 E1000_WRITE_REG(&adapter
->hw
, E1000_RDT(0), i
);
3629 em_rxcsum(struct adapter
*adapter
, struct e1000_rx_desc
*rx_desc
,
3632 /* 82543 or newer only */
3633 if (adapter
->hw
.mac
.type
< e1000_82543
||
3634 /* Ignore Checksum bit is set */
3635 (rx_desc
->status
& E1000_RXD_STAT_IXSM
))
3638 if ((rx_desc
->status
& E1000_RXD_STAT_IPCS
) &&
3639 !(rx_desc
->errors
& E1000_RXD_ERR_IPE
)) {
3640 /* IP Checksum Good */
3641 mp
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
| CSUM_IP_VALID
;
3644 if ((rx_desc
->status
& E1000_RXD_STAT_TCPCS
) &&
3645 !(rx_desc
->errors
& E1000_RXD_ERR_TCPE
)) {
3646 mp
->m_pkthdr
.csum_flags
|= CSUM_DATA_VALID
|
3648 CSUM_FRAG_NOT_CHECKED
;
3649 mp
->m_pkthdr
.csum_data
= htons(0xffff);
3654 em_enable_intr(struct adapter
*adapter
)
3656 uint32_t ims_mask
= IMS_ENABLE_MASK
;
3658 lwkt_serialize_handler_enable(adapter
->arpcom
.ac_if
.if_serializer
);
3662 if (adapter
->hw
.mac
.type
== e1000_82574
) {
3663 E1000_WRITE_REG(&adapter
->hw
, EM_EIAC
, EM_MSIX_MASK
);
3664 ims_mask
|= EM_MSIX_MASK
;
3667 E1000_WRITE_REG(&adapter
->hw
, E1000_IMS
, ims_mask
);
3671 em_disable_intr(struct adapter
*adapter
)
3673 uint32_t clear
= 0xffffffff;
3676 * The first version of 82542 had an errata where when link was forced
3677 * it would stay up even up even if the cable was disconnected.
3678 * Sequence errors were used to detect the disconnect and then the
3679 * driver would unforce the link. This code in the in the ISR. For
3680 * this to work correctly the Sequence error interrupt had to be
3681 * enabled all the time.
3683 if (adapter
->hw
.mac
.type
== e1000_82542
&&
3684 adapter
->hw
.revision_id
== E1000_REVISION_2
)
3685 clear
&= ~E1000_ICR_RXSEQ
;
3686 else if (adapter
->hw
.mac
.type
== e1000_82574
)
3687 E1000_WRITE_REG(&adapter
->hw
, EM_EIAC
, 0);
3689 E1000_WRITE_REG(&adapter
->hw
, E1000_IMC
, clear
);
3691 adapter
->npoll
.ifpc_stcount
= 0;
3693 lwkt_serialize_handler_disable(adapter
->arpcom
.ac_if
.if_serializer
);
3697 * Bit of a misnomer, what this really means is
3698 * to enable OS management of the system... aka
3699 * to disable special hardware management features
3702 em_get_mgmt(struct adapter
*adapter
)
3704 /* A shared code workaround */
3705 #define E1000_82542_MANC2H E1000_MANC2H
3706 if (adapter
->flags
& EM_FLAG_HAS_MGMT
) {
3707 int manc2h
= E1000_READ_REG(&adapter
->hw
, E1000_MANC2H
);
3708 int manc
= E1000_READ_REG(&adapter
->hw
, E1000_MANC
);
3710 /* disable hardware interception of ARP */
3711 manc
&= ~(E1000_MANC_ARP_EN
);
3713 /* enable receiving management packets to the host */
3714 if (adapter
->hw
.mac
.type
>= e1000_82571
) {
3715 manc
|= E1000_MANC_EN_MNG2HOST
;
3716 #define E1000_MNG2HOST_PORT_623 (1 << 5)
3717 #define E1000_MNG2HOST_PORT_664 (1 << 6)
3718 manc2h
|= E1000_MNG2HOST_PORT_623
;
3719 manc2h
|= E1000_MNG2HOST_PORT_664
;
3720 E1000_WRITE_REG(&adapter
->hw
, E1000_MANC2H
, manc2h
);
3723 E1000_WRITE_REG(&adapter
->hw
, E1000_MANC
, manc
);
3728 * Give control back to hardware management
3729 * controller if there is one.
3732 em_rel_mgmt(struct adapter
*adapter
)
3734 if (adapter
->flags
& EM_FLAG_HAS_MGMT
) {
3735 int manc
= E1000_READ_REG(&adapter
->hw
, E1000_MANC
);
3737 /* re-enable hardware interception of ARP */
3738 manc
|= E1000_MANC_ARP_EN
;
3740 if (adapter
->hw
.mac
.type
>= e1000_82571
)
3741 manc
&= ~E1000_MANC_EN_MNG2HOST
;
3743 E1000_WRITE_REG(&adapter
->hw
, E1000_MANC
, manc
);
3748 * em_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3749 * For ASF and Pass Through versions of f/w this means that
3750 * the driver is loaded. For AMT version (only with 82573)
3751 * of the f/w this means that the network i/f is open.
3754 em_get_hw_control(struct adapter
*adapter
)
3756 /* Let firmware know the driver has taken over */
3757 if (adapter
->hw
.mac
.type
== e1000_82573
) {
3760 swsm
= E1000_READ_REG(&adapter
->hw
, E1000_SWSM
);
3761 E1000_WRITE_REG(&adapter
->hw
, E1000_SWSM
,
3762 swsm
| E1000_SWSM_DRV_LOAD
);
3766 ctrl_ext
= E1000_READ_REG(&adapter
->hw
, E1000_CTRL_EXT
);
3767 E1000_WRITE_REG(&adapter
->hw
, E1000_CTRL_EXT
,
3768 ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
3770 adapter
->flags
|= EM_FLAG_HW_CTRL
;
3774 * em_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3775 * For ASF and Pass Through versions of f/w this means that the
3776 * driver is no longer loaded. For AMT version (only with 82573)
3777 * of the f/w this means that the network i/f is closed.
3780 em_rel_hw_control(struct adapter
*adapter
)
3782 if ((adapter
->flags
& EM_FLAG_HW_CTRL
) == 0)
3784 adapter
->flags
&= ~EM_FLAG_HW_CTRL
;
3786 /* Let firmware taken over control of h/w */
3787 if (adapter
->hw
.mac
.type
== e1000_82573
) {
3790 swsm
= E1000_READ_REG(&adapter
->hw
, E1000_SWSM
);
3791 E1000_WRITE_REG(&adapter
->hw
, E1000_SWSM
,
3792 swsm
& ~E1000_SWSM_DRV_LOAD
);
3796 ctrl_ext
= E1000_READ_REG(&adapter
->hw
, E1000_CTRL_EXT
);
3797 E1000_WRITE_REG(&adapter
->hw
, E1000_CTRL_EXT
,
3798 ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
3803 em_is_valid_eaddr(const uint8_t *addr
)
3805 char zero_addr
[ETHER_ADDR_LEN
] = { 0, 0, 0, 0, 0, 0 };
3807 if ((addr
[0] & 1) || !bcmp(addr
, zero_addr
, ETHER_ADDR_LEN
))
3814 * Enable PCI Wake On Lan capability
3817 em_enable_wol(device_t dev
)
3819 uint16_t cap
, status
;
3822 /* First find the capabilities pointer*/
3823 cap
= pci_read_config(dev
, PCIR_CAP_PTR
, 2);
3825 /* Read the PM Capabilities */
3826 id
= pci_read_config(dev
, cap
, 1);
3827 if (id
!= PCIY_PMG
) /* Something wrong */
3831 * OK, we have the power capabilities,
3832 * so now get the status register
3834 cap
+= PCIR_POWER_STATUS
;
3835 status
= pci_read_config(dev
, cap
, 2);
3836 status
|= PCIM_PSTAT_PME
| PCIM_PSTAT_PMEENABLE
;
3837 pci_write_config(dev
, cap
, status
, 2);
3842 * 82544 Coexistence issue workaround.
3843 * There are 2 issues.
3844 * 1. Transmit Hang issue.
3845 * To detect this issue, following equation can be used...
3846 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3847 * If SUM[3:0] is in between 1 to 4, we will have this issue.
3850 * To detect this issue, following equation can be used...
3851 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3852 * If SUM[3:0] is in between 9 to c, we will have this issue.
3855 * Make sure we do not have ending address
3856 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3859 em_82544_fill_desc(bus_addr_t address
, uint32_t length
, PDESC_ARRAY desc_array
)
3861 uint32_t safe_terminator
;
3864 * Since issue is sensitive to length and address.
3865 * Let us first check the address...
3868 desc_array
->descriptor
[0].address
= address
;
3869 desc_array
->descriptor
[0].length
= length
;
3870 desc_array
->elements
= 1;
3871 return (desc_array
->elements
);
3875 (uint32_t)((((uint32_t)address
& 0x7) + (length
& 0xF)) & 0xF);
3877 /* If it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3878 if (safe_terminator
== 0 ||
3879 (safe_terminator
> 4 && safe_terminator
< 9) ||
3880 (safe_terminator
> 0xC && safe_terminator
<= 0xF)) {
3881 desc_array
->descriptor
[0].address
= address
;
3882 desc_array
->descriptor
[0].length
= length
;
3883 desc_array
->elements
= 1;
3884 return (desc_array
->elements
);
3887 desc_array
->descriptor
[0].address
= address
;
3888 desc_array
->descriptor
[0].length
= length
- 4;
3889 desc_array
->descriptor
[1].address
= address
+ (length
- 4);
3890 desc_array
->descriptor
[1].length
= 4;
3891 desc_array
->elements
= 2;
3892 return (desc_array
->elements
);
3896 em_update_stats(struct adapter
*adapter
)
3898 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
3900 if (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
||
3901 (E1000_READ_REG(&adapter
->hw
, E1000_STATUS
) & E1000_STATUS_LU
)) {
3902 adapter
->stats
.symerrs
+=
3903 E1000_READ_REG(&adapter
->hw
, E1000_SYMERRS
);
3904 adapter
->stats
.sec
+= E1000_READ_REG(&adapter
->hw
, E1000_SEC
);
3906 adapter
->stats
.crcerrs
+= E1000_READ_REG(&adapter
->hw
, E1000_CRCERRS
);
3907 adapter
->stats
.mpc
+= E1000_READ_REG(&adapter
->hw
, E1000_MPC
);
3908 adapter
->stats
.scc
+= E1000_READ_REG(&adapter
->hw
, E1000_SCC
);
3909 adapter
->stats
.ecol
+= E1000_READ_REG(&adapter
->hw
, E1000_ECOL
);
3911 adapter
->stats
.mcc
+= E1000_READ_REG(&adapter
->hw
, E1000_MCC
);
3912 adapter
->stats
.latecol
+= E1000_READ_REG(&adapter
->hw
, E1000_LATECOL
);
3913 adapter
->stats
.colc
+= E1000_READ_REG(&adapter
->hw
, E1000_COLC
);
3914 adapter
->stats
.dc
+= E1000_READ_REG(&adapter
->hw
, E1000_DC
);
3915 adapter
->stats
.rlec
+= E1000_READ_REG(&adapter
->hw
, E1000_RLEC
);
3916 adapter
->stats
.xonrxc
+= E1000_READ_REG(&adapter
->hw
, E1000_XONRXC
);
3917 adapter
->stats
.xontxc
+= E1000_READ_REG(&adapter
->hw
, E1000_XONTXC
);
3918 adapter
->stats
.xoffrxc
+= E1000_READ_REG(&adapter
->hw
, E1000_XOFFRXC
);
3919 adapter
->stats
.xofftxc
+= E1000_READ_REG(&adapter
->hw
, E1000_XOFFTXC
);
3920 adapter
->stats
.fcruc
+= E1000_READ_REG(&adapter
->hw
, E1000_FCRUC
);
3921 adapter
->stats
.prc64
+= E1000_READ_REG(&adapter
->hw
, E1000_PRC64
);
3922 adapter
->stats
.prc127
+= E1000_READ_REG(&adapter
->hw
, E1000_PRC127
);
3923 adapter
->stats
.prc255
+= E1000_READ_REG(&adapter
->hw
, E1000_PRC255
);
3924 adapter
->stats
.prc511
+= E1000_READ_REG(&adapter
->hw
, E1000_PRC511
);
3925 adapter
->stats
.prc1023
+= E1000_READ_REG(&adapter
->hw
, E1000_PRC1023
);
3926 adapter
->stats
.prc1522
+= E1000_READ_REG(&adapter
->hw
, E1000_PRC1522
);
3927 adapter
->stats
.gprc
+= E1000_READ_REG(&adapter
->hw
, E1000_GPRC
);
3928 adapter
->stats
.bprc
+= E1000_READ_REG(&adapter
->hw
, E1000_BPRC
);
3929 adapter
->stats
.mprc
+= E1000_READ_REG(&adapter
->hw
, E1000_MPRC
);
3930 adapter
->stats
.gptc
+= E1000_READ_REG(&adapter
->hw
, E1000_GPTC
);
3932 /* For the 64-bit byte counters the low dword must be read first. */
3933 /* Both registers clear on the read of the high dword */
3935 adapter
->stats
.gorc
+= E1000_READ_REG(&adapter
->hw
, E1000_GORCH
);
3936 adapter
->stats
.gotc
+= E1000_READ_REG(&adapter
->hw
, E1000_GOTCH
);
3938 adapter
->stats
.rnbc
+= E1000_READ_REG(&adapter
->hw
, E1000_RNBC
);
3939 adapter
->stats
.ruc
+= E1000_READ_REG(&adapter
->hw
, E1000_RUC
);
3940 adapter
->stats
.rfc
+= E1000_READ_REG(&adapter
->hw
, E1000_RFC
);
3941 adapter
->stats
.roc
+= E1000_READ_REG(&adapter
->hw
, E1000_ROC
);
3942 adapter
->stats
.rjc
+= E1000_READ_REG(&adapter
->hw
, E1000_RJC
);
3944 adapter
->stats
.tor
+= E1000_READ_REG(&adapter
->hw
, E1000_TORH
);
3945 adapter
->stats
.tot
+= E1000_READ_REG(&adapter
->hw
, E1000_TOTH
);
3947 adapter
->stats
.tpr
+= E1000_READ_REG(&adapter
->hw
, E1000_TPR
);
3948 adapter
->stats
.tpt
+= E1000_READ_REG(&adapter
->hw
, E1000_TPT
);
3949 adapter
->stats
.ptc64
+= E1000_READ_REG(&adapter
->hw
, E1000_PTC64
);
3950 adapter
->stats
.ptc127
+= E1000_READ_REG(&adapter
->hw
, E1000_PTC127
);
3951 adapter
->stats
.ptc255
+= E1000_READ_REG(&adapter
->hw
, E1000_PTC255
);
3952 adapter
->stats
.ptc511
+= E1000_READ_REG(&adapter
->hw
, E1000_PTC511
);
3953 adapter
->stats
.ptc1023
+= E1000_READ_REG(&adapter
->hw
, E1000_PTC1023
);
3954 adapter
->stats
.ptc1522
+= E1000_READ_REG(&adapter
->hw
, E1000_PTC1522
);
3955 adapter
->stats
.mptc
+= E1000_READ_REG(&adapter
->hw
, E1000_MPTC
);
3956 adapter
->stats
.bptc
+= E1000_READ_REG(&adapter
->hw
, E1000_BPTC
);
3958 if (adapter
->hw
.mac
.type
>= e1000_82543
) {
3959 adapter
->stats
.algnerrc
+=
3960 E1000_READ_REG(&adapter
->hw
, E1000_ALGNERRC
);
3961 adapter
->stats
.rxerrc
+=
3962 E1000_READ_REG(&adapter
->hw
, E1000_RXERRC
);
3963 adapter
->stats
.tncrs
+=
3964 E1000_READ_REG(&adapter
->hw
, E1000_TNCRS
);
3965 adapter
->stats
.cexterr
+=
3966 E1000_READ_REG(&adapter
->hw
, E1000_CEXTERR
);
3967 adapter
->stats
.tsctc
+=
3968 E1000_READ_REG(&adapter
->hw
, E1000_TSCTC
);
3969 adapter
->stats
.tsctfc
+=
3970 E1000_READ_REG(&adapter
->hw
, E1000_TSCTFC
);
3973 IFNET_STAT_SET(ifp
, collisions
, adapter
->stats
.colc
);
3976 IFNET_STAT_SET(ifp
, ierrors
,
3977 adapter
->dropped_pkts
+ adapter
->stats
.rxerrc
+
3978 adapter
->stats
.crcerrs
+ adapter
->stats
.algnerrc
+
3979 adapter
->stats
.ruc
+ adapter
->stats
.roc
+
3980 adapter
->stats
.mpc
+ adapter
->stats
.cexterr
);
3983 IFNET_STAT_SET(ifp
, oerrors
,
3984 adapter
->stats
.ecol
+ adapter
->stats
.latecol
+
3985 adapter
->watchdog_events
);
3989 em_print_debug_info(struct adapter
*adapter
)
3991 device_t dev
= adapter
->dev
;
3992 uint8_t *hw_addr
= adapter
->hw
.hw_addr
;
3994 device_printf(dev
, "Adapter hardware address = %p \n", hw_addr
);
3995 device_printf(dev
, "CTRL = 0x%x RCTL = 0x%x \n",
3996 E1000_READ_REG(&adapter
->hw
, E1000_CTRL
),
3997 E1000_READ_REG(&adapter
->hw
, E1000_RCTL
));
3998 device_printf(dev
, "Packet buffer = Tx=%dk Rx=%dk \n",
3999 ((E1000_READ_REG(&adapter
->hw
, E1000_PBA
) & 0xffff0000) >> 16),\
4000 (E1000_READ_REG(&adapter
->hw
, E1000_PBA
) & 0xffff) );
4001 device_printf(dev
, "Flow control watermarks high = %d low = %d\n",
4002 adapter
->hw
.fc
.high_water
,
4003 adapter
->hw
.fc
.low_water
);
4004 device_printf(dev
, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
4005 E1000_READ_REG(&adapter
->hw
, E1000_TIDV
),
4006 E1000_READ_REG(&adapter
->hw
, E1000_TADV
));
4007 device_printf(dev
, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
4008 E1000_READ_REG(&adapter
->hw
, E1000_RDTR
),
4009 E1000_READ_REG(&adapter
->hw
, E1000_RADV
));
4010 device_printf(dev
, "fifo workaround = %lld, fifo_reset_count = %lld\n",
4011 (long long)adapter
->tx_fifo_wrk_cnt
,
4012 (long long)adapter
->tx_fifo_reset_cnt
);
4013 device_printf(dev
, "hw tdh = %d, hw tdt = %d\n",
4014 E1000_READ_REG(&adapter
->hw
, E1000_TDH(0)),
4015 E1000_READ_REG(&adapter
->hw
, E1000_TDT(0)));
4016 device_printf(dev
, "hw rdh = %d, hw rdt = %d\n",
4017 E1000_READ_REG(&adapter
->hw
, E1000_RDH(0)),
4018 E1000_READ_REG(&adapter
->hw
, E1000_RDT(0)));
4019 device_printf(dev
, "Num Tx descriptors avail = %d\n",
4020 adapter
->num_tx_desc_avail
);
4021 device_printf(dev
, "Tx Descriptors not avail1 = %ld\n",
4022 adapter
->no_tx_desc_avail1
);
4023 device_printf(dev
, "Tx Descriptors not avail2 = %ld\n",
4024 adapter
->no_tx_desc_avail2
);
4025 device_printf(dev
, "Std mbuf failed = %ld\n",
4026 adapter
->mbuf_alloc_failed
);
4027 device_printf(dev
, "Std mbuf cluster failed = %ld\n",
4028 adapter
->mbuf_cluster_failed
);
4029 device_printf(dev
, "Driver dropped packets = %ld\n",
4030 adapter
->dropped_pkts
);
4031 device_printf(dev
, "Driver tx dma failure in encap = %ld\n",
4032 adapter
->no_tx_dma_setup
);
4036 em_print_hw_stats(struct adapter
*adapter
)
4038 device_t dev
= adapter
->dev
;
4040 device_printf(dev
, "Excessive collisions = %lld\n",
4041 (long long)adapter
->stats
.ecol
);
4042 #if (DEBUG_HW > 0) /* Dont output these errors normally */
4043 device_printf(dev
, "Symbol errors = %lld\n",
4044 (long long)adapter
->stats
.symerrs
);
4046 device_printf(dev
, "Sequence errors = %lld\n",
4047 (long long)adapter
->stats
.sec
);
4048 device_printf(dev
, "Defer count = %lld\n",
4049 (long long)adapter
->stats
.dc
);
4050 device_printf(dev
, "Missed Packets = %lld\n",
4051 (long long)adapter
->stats
.mpc
);
4052 device_printf(dev
, "Receive No Buffers = %lld\n",
4053 (long long)adapter
->stats
.rnbc
);
4054 /* RLEC is inaccurate on some hardware, calculate our own. */
4055 device_printf(dev
, "Receive Length Errors = %lld\n",
4056 ((long long)adapter
->stats
.roc
+ (long long)adapter
->stats
.ruc
));
4057 device_printf(dev
, "Receive errors = %lld\n",
4058 (long long)adapter
->stats
.rxerrc
);
4059 device_printf(dev
, "Crc errors = %lld\n",
4060 (long long)adapter
->stats
.crcerrs
);
4061 device_printf(dev
, "Alignment errors = %lld\n",
4062 (long long)adapter
->stats
.algnerrc
);
4063 device_printf(dev
, "Collision/Carrier extension errors = %lld\n",
4064 (long long)adapter
->stats
.cexterr
);
4065 device_printf(dev
, "RX overruns = %ld\n", adapter
->rx_overruns
);
4066 device_printf(dev
, "watchdog timeouts = %ld\n",
4067 adapter
->watchdog_events
);
4068 device_printf(dev
, "XON Rcvd = %lld\n",
4069 (long long)adapter
->stats
.xonrxc
);
4070 device_printf(dev
, "XON Xmtd = %lld\n",
4071 (long long)adapter
->stats
.xontxc
);
4072 device_printf(dev
, "XOFF Rcvd = %lld\n",
4073 (long long)adapter
->stats
.xoffrxc
);
4074 device_printf(dev
, "XOFF Xmtd = %lld\n",
4075 (long long)adapter
->stats
.xofftxc
);
4076 device_printf(dev
, "Good Packets Rcvd = %lld\n",
4077 (long long)adapter
->stats
.gprc
);
4078 device_printf(dev
, "Good Packets Xmtd = %lld\n",
4079 (long long)adapter
->stats
.gptc
);
4083 em_print_nvm_info(struct adapter
*adapter
)
4085 uint16_t eeprom_data
;
4088 /* Its a bit crude, but it gets the job done */
4089 kprintf("\nInterface EEPROM Dump:\n");
4090 kprintf("Offset\n0x0000 ");
4091 for (i
= 0, j
= 0; i
< 32; i
++, j
++) {
4092 if (j
== 8) { /* Make the offset block */
4094 kprintf("\n0x00%x0 ",row
);
4096 e1000_read_nvm(&adapter
->hw
, i
, 1, &eeprom_data
);
4097 kprintf("%04x ", eeprom_data
);
4103 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS
)
4105 struct adapter
*adapter
;
4110 error
= sysctl_handle_int(oidp
, &result
, 0, req
);
4111 if (error
|| !req
->newptr
)
4114 adapter
= (struct adapter
*)arg1
;
4115 ifp
= &adapter
->arpcom
.ac_if
;
4117 lwkt_serialize_enter(ifp
->if_serializer
);
4120 em_print_debug_info(adapter
);
4123 * This value will cause a hex dump of the
4124 * first 32 16-bit words of the EEPROM to
4128 em_print_nvm_info(adapter
);
4130 lwkt_serialize_exit(ifp
->if_serializer
);
4136 em_sysctl_stats(SYSCTL_HANDLER_ARGS
)
4141 error
= sysctl_handle_int(oidp
, &result
, 0, req
);
4142 if (error
|| !req
->newptr
)
4146 struct adapter
*adapter
= (struct adapter
*)arg1
;
4147 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
4149 lwkt_serialize_enter(ifp
->if_serializer
);
4150 em_print_hw_stats(adapter
);
4151 lwkt_serialize_exit(ifp
->if_serializer
);
4157 em_add_sysctl(struct adapter
*adapter
)
4159 struct sysctl_ctx_list
*ctx
;
4160 struct sysctl_oid
*tree
;
4162 ctx
= device_get_sysctl_ctx(adapter
->dev
);
4163 tree
= device_get_sysctl_tree(adapter
->dev
);
4164 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
4165 OID_AUTO
, "debug", CTLTYPE_INT
|CTLFLAG_RW
, adapter
, 0,
4166 em_sysctl_debug_info
, "I", "Debug Information");
4168 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
4169 OID_AUTO
, "stats", CTLTYPE_INT
|CTLFLAG_RW
, adapter
, 0,
4170 em_sysctl_stats
, "I", "Statistics");
4172 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
),
4173 OID_AUTO
, "rxd", CTLFLAG_RD
,
4174 &adapter
->num_rx_desc
, 0, NULL
);
4175 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
),
4176 OID_AUTO
, "txd", CTLFLAG_RD
,
4177 &adapter
->num_tx_desc
, 0, NULL
);
4179 if (adapter
->hw
.mac
.type
>= e1000_82540
) {
4180 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
4181 OID_AUTO
, "int_throttle_ceil",
4182 CTLTYPE_INT
|CTLFLAG_RW
, adapter
, 0,
4183 em_sysctl_int_throttle
, "I",
4184 "interrupt throttling rate");
4186 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
4187 OID_AUTO
, "int_tx_nsegs",
4188 CTLTYPE_INT
|CTLFLAG_RW
, adapter
, 0,
4189 em_sysctl_int_tx_nsegs
, "I",
4190 "# segments per TX interrupt");
4191 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
),
4192 OID_AUTO
, "wreg_tx_nsegs", CTLFLAG_RW
,
4193 &adapter
->tx_wreg_nsegs
, 0,
4194 "# segments before write to hardware register");
4198 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS
)
4200 struct adapter
*adapter
= (void *)arg1
;
4201 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
4202 int error
, throttle
;
4204 throttle
= adapter
->int_throttle_ceil
;
4205 error
= sysctl_handle_int(oidp
, &throttle
, 0, req
);
4206 if (error
|| req
->newptr
== NULL
)
4208 if (throttle
< 0 || throttle
> 1000000000 / 256)
4213 * Set the interrupt throttling rate in 256ns increments,
4214 * recalculate sysctl value assignment to get exact frequency.
4216 throttle
= 1000000000 / 256 / throttle
;
4218 /* Upper 16bits of ITR is reserved and should be zero */
4219 if (throttle
& 0xffff0000)
4223 lwkt_serialize_enter(ifp
->if_serializer
);
4226 adapter
->int_throttle_ceil
= 1000000000 / 256 / throttle
;
4228 adapter
->int_throttle_ceil
= 0;
4230 if (ifp
->if_flags
& IFF_RUNNING
)
4231 em_set_itr(adapter
, throttle
);
4233 lwkt_serialize_exit(ifp
->if_serializer
);
4236 if_printf(ifp
, "Interrupt moderation set to %d/sec\n",
4237 adapter
->int_throttle_ceil
);
4243 em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS
)
4245 struct adapter
*adapter
= (void *)arg1
;
4246 struct ifnet
*ifp
= &adapter
->arpcom
.ac_if
;
4249 segs
= adapter
->tx_int_nsegs
;
4250 error
= sysctl_handle_int(oidp
, &segs
, 0, req
);
4251 if (error
|| req
->newptr
== NULL
)
4256 lwkt_serialize_enter(ifp
->if_serializer
);
4259 * Don't allow int_tx_nsegs to become:
4260 * o Less the oact_tx_desc
4261 * o Too large that no TX desc will cause TX interrupt to
4262 * be generated (OACTIVE will never recover)
4263 * o Too small that will cause tx_dd[] overflow
4265 if (segs
< adapter
->oact_tx_desc
||
4266 segs
>= adapter
->num_tx_desc
- adapter
->oact_tx_desc
||
4267 segs
< adapter
->num_tx_desc
/ EM_TXDD_SAFE
) {
4271 adapter
->tx_int_nsegs
= segs
;
4274 lwkt_serialize_exit(ifp
->if_serializer
);
4280 em_set_itr(struct adapter
*adapter
, uint32_t itr
)
4282 E1000_WRITE_REG(&adapter
->hw
, E1000_ITR
, itr
);
4283 if (adapter
->hw
.mac
.type
== e1000_82574
) {
4287 * When using MSIX interrupts we need to
4288 * throttle using the EITR register
4290 for (i
= 0; i
< 4; ++i
) {
4291 E1000_WRITE_REG(&adapter
->hw
,
4292 E1000_EITR_82574(i
), itr
);
4298 em_disable_aspm(struct adapter
*adapter
)
4300 uint16_t link_cap
, link_ctrl
, disable
;
4301 uint8_t pcie_ptr
, reg
;
4302 device_t dev
= adapter
->dev
;
4304 switch (adapter
->hw
.mac
.type
) {
4309 * 82573 specification update
4310 * errata #8 disable L0s
4311 * errata #41 disable L1
4313 * 82571/82572 specification update
4314 # errata #13 disable L1
4315 * errata #68 disable L0s
4317 disable
= PCIEM_LNKCTL_ASPM_L0S
| PCIEM_LNKCTL_ASPM_L1
;
4323 * 82574 specification update errata #20
4324 * 82583 specification update errata #9
4326 * There is no need to disable L1
4328 disable
= PCIEM_LNKCTL_ASPM_L0S
;
4335 pcie_ptr
= pci_get_pciecap_ptr(dev
);
4339 link_cap
= pci_read_config(dev
, pcie_ptr
+ PCIER_LINKCAP
, 2);
4340 if ((link_cap
& PCIEM_LNKCAP_ASPM_MASK
) == 0)
4344 if_printf(&adapter
->arpcom
.ac_if
,
4345 "disable ASPM %#02x\n", disable
);
4348 reg
= pcie_ptr
+ PCIER_LINKCTRL
;
4349 link_ctrl
= pci_read_config(dev
, reg
, 2);
4350 link_ctrl
&= ~disable
;
4351 pci_write_config(dev
, reg
, link_ctrl
, 2);
4355 em_tso_pullup(struct adapter
*adapter
, struct mbuf
**mp
)
4357 int iphlen
, hoff
, thoff
, ex
= 0;
4362 KASSERT(M_WRITABLE(m
), ("TSO mbuf not writable"));
4364 iphlen
= m
->m_pkthdr
.csum_iphlen
;
4365 thoff
= m
->m_pkthdr
.csum_thlen
;
4366 hoff
= m
->m_pkthdr
.csum_lhlen
;
4368 KASSERT(iphlen
> 0, ("invalid ip hlen"));
4369 KASSERT(thoff
> 0, ("invalid tcp hlen"));
4370 KASSERT(hoff
> 0, ("invalid ether hlen"));
4372 if (adapter
->flags
& EM_FLAG_TSO_PULLEX
)
4375 if (m
->m_len
< hoff
+ iphlen
+ thoff
+ ex
) {
4376 m
= m_pullup(m
, hoff
+ iphlen
+ thoff
+ ex
);
4383 ip
= mtodoff(m
, struct ip
*, hoff
);
4390 em_tso_setup(struct adapter
*adapter
, struct mbuf
*mp
,
4391 uint32_t *txd_upper
, uint32_t *txd_lower
)
4393 struct e1000_context_desc
*TXD
;
4394 int hoff
, iphlen
, thoff
, hlen
;
4395 int mss
, pktlen
, curr_txd
;
4397 iphlen
= mp
->m_pkthdr
.csum_iphlen
;
4398 thoff
= mp
->m_pkthdr
.csum_thlen
;
4399 hoff
= mp
->m_pkthdr
.csum_lhlen
;
4400 mss
= mp
->m_pkthdr
.tso_segsz
;
4401 pktlen
= mp
->m_pkthdr
.len
;
4403 if (adapter
->csum_flags
== CSUM_TSO
&&
4404 adapter
->csum_iphlen
== iphlen
&&
4405 adapter
->csum_lhlen
== hoff
&&
4406 adapter
->csum_thlen
== thoff
&&
4407 adapter
->csum_mss
== mss
&&
4408 adapter
->csum_pktlen
== pktlen
) {
4409 *txd_upper
= adapter
->csum_txd_upper
;
4410 *txd_lower
= adapter
->csum_txd_lower
;
4413 hlen
= hoff
+ iphlen
+ thoff
;
4416 * Setup a new TSO context.
4419 curr_txd
= adapter
->next_avail_tx_desc
;
4420 TXD
= (struct e1000_context_desc
*)&adapter
->tx_desc_base
[curr_txd
];
4422 *txd_lower
= E1000_TXD_CMD_DEXT
| /* Extended descr type */
4423 E1000_TXD_DTYP_D
| /* Data descr type */
4424 E1000_TXD_CMD_TSE
; /* Do TSE on this packet */
4426 /* IP and/or TCP header checksum calculation and insertion. */
4427 *txd_upper
= (E1000_TXD_POPTS_IXSM
| E1000_TXD_POPTS_TXSM
) << 8;
4430 * Start offset for header checksum calculation.
4431 * End offset for header checksum calculation.
4432 * Offset of place put the checksum.
4434 TXD
->lower_setup
.ip_fields
.ipcss
= hoff
;
4435 TXD
->lower_setup
.ip_fields
.ipcse
= htole16(hoff
+ iphlen
- 1);
4436 TXD
->lower_setup
.ip_fields
.ipcso
= hoff
+ offsetof(struct ip
, ip_sum
);
4439 * Start offset for payload checksum calculation.
4440 * End offset for payload checksum calculation.
4441 * Offset of place to put the checksum.
4443 TXD
->upper_setup
.tcp_fields
.tucss
= hoff
+ iphlen
;
4444 TXD
->upper_setup
.tcp_fields
.tucse
= 0;
4445 TXD
->upper_setup
.tcp_fields
.tucso
=
4446 hoff
+ iphlen
+ offsetof(struct tcphdr
, th_sum
);
4449 * Payload size per packet w/o any headers.
4450 * Length of all headers up to payload.
4452 TXD
->tcp_seg_setup
.fields
.mss
= htole16(mss
);
4453 TXD
->tcp_seg_setup
.fields
.hdr_len
= hlen
;
4454 TXD
->cmd_and_length
= htole32(E1000_TXD_CMD_IFCS
|
4455 E1000_TXD_CMD_DEXT
| /* Extended descr */
4456 E1000_TXD_CMD_TSE
| /* TSE context */
4457 E1000_TXD_CMD_IP
| /* Do IP csum */
4458 E1000_TXD_CMD_TCP
| /* Do TCP checksum */
4459 (pktlen
- hlen
)); /* Total len */
4461 /* Save the information for this TSO context */
4462 adapter
->csum_flags
= CSUM_TSO
;
4463 adapter
->csum_lhlen
= hoff
;
4464 adapter
->csum_iphlen
= iphlen
;
4465 adapter
->csum_thlen
= thoff
;
4466 adapter
->csum_mss
= mss
;
4467 adapter
->csum_pktlen
= pktlen
;
4468 adapter
->csum_txd_upper
= *txd_upper
;
4469 adapter
->csum_txd_lower
= *txd_lower
;
4471 if (++curr_txd
== adapter
->num_tx_desc
)
4474 KKASSERT(adapter
->num_tx_desc_avail
> 0);
4475 adapter
->num_tx_desc_avail
--;
4477 adapter
->next_avail_tx_desc
= curr_txd
;