2 * Copyright (c) 2001-2013, Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include "opt_ifpoll.h"
35 #include <sys/param.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
44 #include <sys/serialize.h>
45 #include <sys/serialize2.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
52 #include <net/ethernet.h>
54 #include <net/if_arp.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/ifq_var.h>
58 #include <net/if_ringmap.h>
59 #include <net/toeplitz.h>
60 #include <net/toeplitz2.h>
61 #include <net/vlan/if_vlan_var.h>
62 #include <net/vlan/if_vlan_ether.h>
63 #include <net/if_poll.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
69 #include <bus/pci/pcivar.h>
70 #include <bus/pci/pcireg.h>
72 #include <dev/netif/ig_hal/e1000_api.h>
73 #include <dev/netif/ig_hal/e1000_82575.h>
74 #include <dev/netif/ig_hal/e1000_dragonfly.h>
75 #include <dev/netif/igb/if_igb.h>
78 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \
80 if (sc->rss_debug >= lvl) \
81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
83 #else /* !IGB_RSS_DEBUG */
84 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
85 #endif /* IGB_RSS_DEBUG */
87 #define IGB_NAME "Intel(R) PRO/1000 "
88 #define IGB_DEVICE(id) \
89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id }
90 #define IGB_DEVICE_NULL { 0, 0, NULL }
92 static struct igb_device
{
97 IGB_DEVICE(82575EB_COPPER
),
98 IGB_DEVICE(82575EB_FIBER_SERDES
),
99 IGB_DEVICE(82575GB_QUAD_COPPER
),
101 IGB_DEVICE(82576_NS
),
102 IGB_DEVICE(82576_NS_SERDES
),
103 IGB_DEVICE(82576_FIBER
),
104 IGB_DEVICE(82576_SERDES
),
105 IGB_DEVICE(82576_SERDES_QUAD
),
106 IGB_DEVICE(82576_QUAD_COPPER
),
107 IGB_DEVICE(82576_QUAD_COPPER_ET2
),
108 IGB_DEVICE(82576_VF
),
109 IGB_DEVICE(82580_COPPER
),
110 IGB_DEVICE(82580_FIBER
),
111 IGB_DEVICE(82580_SERDES
),
112 IGB_DEVICE(82580_SGMII
),
113 IGB_DEVICE(82580_COPPER_DUAL
),
114 IGB_DEVICE(82580_QUAD_FIBER
),
115 IGB_DEVICE(DH89XXCC_SERDES
),
116 IGB_DEVICE(DH89XXCC_SGMII
),
117 IGB_DEVICE(DH89XXCC_SFP
),
118 IGB_DEVICE(DH89XXCC_BACKPLANE
),
119 IGB_DEVICE(I350_COPPER
),
120 IGB_DEVICE(I350_FIBER
),
121 IGB_DEVICE(I350_SERDES
),
122 IGB_DEVICE(I350_SGMII
),
124 IGB_DEVICE(I210_COPPER
),
125 IGB_DEVICE(I210_COPPER_IT
),
126 IGB_DEVICE(I210_COPPER_OEM1
),
127 IGB_DEVICE(I210_COPPER_FLASHLESS
),
128 IGB_DEVICE(I210_SERDES_FLASHLESS
),
129 IGB_DEVICE(I210_FIBER
),
130 IGB_DEVICE(I210_SERDES
),
131 IGB_DEVICE(I210_SGMII
),
132 IGB_DEVICE(I211_COPPER
),
133 IGB_DEVICE(I354_BACKPLANE_1GBPS
),
134 IGB_DEVICE(I354_BACKPLANE_2_5GBPS
),
135 IGB_DEVICE(I354_SGMII
),
137 /* required last entry */
141 static int igb_probe(device_t
);
142 static int igb_attach(device_t
);
143 static int igb_detach(device_t
);
144 static int igb_shutdown(device_t
);
145 static int igb_suspend(device_t
);
146 static int igb_resume(device_t
);
148 static boolean_t
igb_is_valid_ether_addr(const uint8_t *);
149 static void igb_setup_ifp(struct igb_softc
*);
150 static boolean_t
igb_txcsum_ctx(struct igb_tx_ring
*, struct mbuf
*);
151 static int igb_tso_pullup(struct igb_tx_ring
*, struct mbuf
**);
152 static void igb_tso_ctx(struct igb_tx_ring
*, struct mbuf
*, uint32_t *);
153 static void igb_add_sysctl(struct igb_softc
*);
154 static void igb_add_intr_rate_sysctl(struct igb_softc
*, int,
155 const char *, const char *);
156 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS
);
157 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS
);
158 static int igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS
);
159 static int igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS
);
160 static void igb_set_ring_inuse(struct igb_softc
*, boolean_t
);
161 static int igb_get_rxring_inuse(const struct igb_softc
*, boolean_t
);
162 static int igb_get_txring_inuse(const struct igb_softc
*, boolean_t
);
163 static void igb_set_timer_cpuid(struct igb_softc
*, boolean_t
);
165 static void igb_vf_init_stats(struct igb_softc
*);
166 static void igb_reset(struct igb_softc
*, boolean_t
);
167 static void igb_update_stats_counters(struct igb_softc
*);
168 static void igb_update_vf_stats_counters(struct igb_softc
*);
169 static void igb_update_link_status(struct igb_softc
*);
170 static void igb_init_tx_unit(struct igb_softc
*);
171 static void igb_init_rx_unit(struct igb_softc
*, boolean_t
);
172 static void igb_init_dmac(struct igb_softc
*, uint32_t);
173 static void igb_reg_dump(struct igb_softc
*);
174 static int igb_sysctl_reg_dump(SYSCTL_HANDLER_ARGS
);
176 static void igb_set_vlan(struct igb_softc
*);
177 static void igb_set_multi(struct igb_softc
*);
178 static void igb_set_promisc(struct igb_softc
*);
179 static void igb_disable_promisc(struct igb_softc
*);
181 static int igb_get_ring_max(const struct igb_softc
*);
182 static void igb_get_rxring_cnt(const struct igb_softc
*, int *, int *);
183 static void igb_get_txring_cnt(const struct igb_softc
*, int *, int *);
184 static int igb_alloc_rings(struct igb_softc
*);
185 static void igb_free_rings(struct igb_softc
*);
186 static int igb_create_tx_ring(struct igb_tx_ring
*);
187 static int igb_create_rx_ring(struct igb_rx_ring
*);
188 static void igb_free_tx_ring(struct igb_tx_ring
*);
189 static void igb_free_rx_ring(struct igb_rx_ring
*);
190 static void igb_destroy_tx_ring(struct igb_tx_ring
*, int);
191 static void igb_destroy_rx_ring(struct igb_rx_ring
*, int);
192 static void igb_init_tx_ring(struct igb_tx_ring
*);
193 static int igb_init_rx_ring(struct igb_rx_ring
*);
194 static int igb_newbuf(struct igb_rx_ring
*, int, boolean_t
);
195 static int igb_encap(struct igb_tx_ring
*, struct mbuf
**, int *, int *);
196 static void igb_rx_refresh(struct igb_rx_ring
*, int);
197 static void igb_setup_serialize(struct igb_softc
*);
199 static void igb_stop(struct igb_softc
*);
200 static void igb_init(void *);
201 static int igb_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
202 static void igb_media_status(struct ifnet
*, struct ifmediareq
*);
203 static int igb_media_change(struct ifnet
*);
204 static void igb_timer(void *);
205 static void igb_watchdog(struct ifaltq_subque
*);
206 static void igb_start(struct ifnet
*, struct ifaltq_subque
*);
208 static void igb_npoll(struct ifnet
*, struct ifpoll_info
*);
209 static void igb_npoll_rx(struct ifnet
*, void *, int);
210 static void igb_npoll_tx(struct ifnet
*, void *, int);
211 static void igb_npoll_status(struct ifnet
*);
213 static void igb_serialize(struct ifnet
*, enum ifnet_serialize
);
214 static void igb_deserialize(struct ifnet
*, enum ifnet_serialize
);
215 static int igb_tryserialize(struct ifnet
*, enum ifnet_serialize
);
217 static void igb_serialize_assert(struct ifnet
*, enum ifnet_serialize
,
221 static void igb_intr(void *);
222 static void igb_intr_shared(void *);
223 static void igb_rxeof(struct igb_rx_ring
*, int);
224 static void igb_txeof(struct igb_tx_ring
*, int);
225 static void igb_txgc(struct igb_tx_ring
*);
226 static void igb_txgc_timer(void *);
227 static void igb_set_eitr(struct igb_softc
*, int, int);
228 static void igb_enable_intr(struct igb_softc
*);
229 static void igb_disable_intr(struct igb_softc
*);
230 static void igb_init_unshared_intr(struct igb_softc
*);
231 static void igb_init_intr(struct igb_softc
*);
232 static int igb_setup_intr(struct igb_softc
*);
233 static void igb_set_txintr_mask(struct igb_tx_ring
*, int *, int);
234 static void igb_set_rxintr_mask(struct igb_rx_ring
*, int *, int);
235 static void igb_set_intr_mask(struct igb_softc
*);
236 static int igb_alloc_intr(struct igb_softc
*);
237 static void igb_free_intr(struct igb_softc
*);
238 static void igb_teardown_intr(struct igb_softc
*, int);
239 static void igb_alloc_msix(struct igb_softc
*);
240 static void igb_free_msix(struct igb_softc
*, boolean_t
);
241 static void igb_msix_rx(void *);
242 static void igb_msix_tx(void *);
243 static void igb_msix_status(void *);
244 static void igb_msix_rxtx(void *);
246 /* Management and WOL Support */
247 static void igb_get_mgmt(struct igb_softc
*);
248 static void igb_rel_mgmt(struct igb_softc
*);
249 static void igb_get_hw_control(struct igb_softc
*);
250 static void igb_rel_hw_control(struct igb_softc
*);
251 static void igb_enable_wol(struct igb_softc
*);
252 static int igb_enable_phy_wol(struct igb_softc
*);
254 static device_method_t igb_methods
[] = {
255 /* Device interface */
256 DEVMETHOD(device_probe
, igb_probe
),
257 DEVMETHOD(device_attach
, igb_attach
),
258 DEVMETHOD(device_detach
, igb_detach
),
259 DEVMETHOD(device_shutdown
, igb_shutdown
),
260 DEVMETHOD(device_suspend
, igb_suspend
),
261 DEVMETHOD(device_resume
, igb_resume
),
265 static driver_t igb_driver
= {
268 sizeof(struct igb_softc
),
271 static devclass_t igb_devclass
;
273 DECLARE_DUMMY_MODULE(if_igb
);
274 MODULE_DEPEND(igb
, ig_hal
, 1, 1, 1);
275 DRIVER_MODULE(if_igb
, pci
, igb_driver
, igb_devclass
, NULL
, NULL
);
277 static int igb_rxd
= IGB_DEFAULT_RXD
;
278 static int igb_txd
= IGB_DEFAULT_TXD
;
279 static int igb_rxr
= 0;
280 static int igb_txr
= 0;
281 static int igb_msi_enable
= 1;
282 static int igb_msix_enable
= 1;
283 static int igb_eee_disabled
= 1; /* Energy Efficient Ethernet */
285 static char igb_flowctrl
[IFM_ETH_FC_STRLEN
] = IFM_ETH_FC_NONE
;
288 * DMA Coalescing, only for i350 - default to off,
289 * this feature is for power savings
291 static int igb_dma_coalesce
= 0;
293 TUNABLE_INT("hw.igb.rxd", &igb_rxd
);
294 TUNABLE_INT("hw.igb.txd", &igb_txd
);
295 TUNABLE_INT("hw.igb.rxr", &igb_rxr
);
296 TUNABLE_INT("hw.igb.txr", &igb_txr
);
297 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable
);
298 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable
);
299 TUNABLE_STR("hw.igb.flow_ctrl", igb_flowctrl
, sizeof(igb_flowctrl
));
302 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled
);
303 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce
);
306 igb_tx_intr(struct igb_tx_ring
*txr
, int hdr
)
310 if (!ifsq_is_empty(txr
->ifsq
))
311 ifsq_devstart(txr
->ifsq
);
315 igb_try_txgc(struct igb_tx_ring
*txr
, int16_t dec
)
318 if (txr
->tx_running
> 0) {
319 txr
->tx_running
-= dec
;
320 if (txr
->tx_running
<= 0 && txr
->tx_nmbuf
&&
321 txr
->tx_avail
< txr
->num_tx_desc
&&
322 txr
->tx_avail
+ txr
->intr_nsegs
> txr
->num_tx_desc
)
328 igb_txgc_timer(void *xtxr
)
330 struct igb_tx_ring
*txr
= xtxr
;
331 struct ifnet
*ifp
= &txr
->sc
->arpcom
.ac_if
;
333 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_UP
| IFF_NPOLLING
)) !=
334 (IFF_RUNNING
| IFF_UP
))
337 if (!lwkt_serialize_try(&txr
->tx_serialize
))
340 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_UP
| IFF_NPOLLING
)) !=
341 (IFF_RUNNING
| IFF_UP
)) {
342 lwkt_serialize_exit(&txr
->tx_serialize
);
345 igb_try_txgc(txr
, IGB_TX_RUNNING_DEC
);
347 lwkt_serialize_exit(&txr
->tx_serialize
);
349 callout_reset(&txr
->tx_gc_timer
, 1, igb_txgc_timer
, txr
);
353 igb_free_txbuf(struct igb_tx_ring
*txr
, struct igb_tx_buf
*txbuf
)
356 KKASSERT(txbuf
->m_head
!= NULL
);
357 KKASSERT(txr
->tx_nmbuf
> 0);
360 bus_dmamap_unload(txr
->tx_tag
, txbuf
->map
);
361 m_freem(txbuf
->m_head
);
362 txbuf
->m_head
= NULL
;
366 igb_rxcsum(uint32_t staterr
, struct mbuf
*mp
)
368 /* Ignore Checksum bit is set */
369 if (staterr
& E1000_RXD_STAT_IXSM
)
372 if ((staterr
& (E1000_RXD_STAT_IPCS
| E1000_RXDEXT_STATERR_IPE
)) ==
374 mp
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
| CSUM_IP_VALID
;
376 if (staterr
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
)) {
377 if ((staterr
& E1000_RXDEXT_STATERR_TCPE
) == 0) {
378 mp
->m_pkthdr
.csum_flags
|= CSUM_DATA_VALID
|
379 CSUM_PSEUDO_HDR
| CSUM_FRAG_NOT_CHECKED
;
380 mp
->m_pkthdr
.csum_data
= htons(0xffff);
385 static __inline
struct pktinfo
*
386 igb_rssinfo(struct mbuf
*m
, struct pktinfo
*pi
,
387 uint32_t hash
, uint32_t hashtype
, uint32_t staterr
)
390 case E1000_RXDADV_RSSTYPE_IPV4_TCP
:
391 pi
->pi_netisr
= NETISR_IP
;
393 pi
->pi_l3proto
= IPPROTO_TCP
;
396 case E1000_RXDADV_RSSTYPE_IPV4
:
397 if (staterr
& E1000_RXD_STAT_IXSM
)
401 (E1000_RXD_STAT_TCPCS
| E1000_RXDEXT_STATERR_TCPE
)) ==
402 E1000_RXD_STAT_TCPCS
) {
403 pi
->pi_netisr
= NETISR_IP
;
405 pi
->pi_l3proto
= IPPROTO_UDP
;
413 m_sethash(m
, toeplitz_hash(hash
));
418 igb_get_ring_max(const struct igb_softc
*sc
)
421 switch (sc
->hw
.mac
.type
) {
423 return (IGB_MAX_RING_82575
);
426 return (IGB_MAX_RING_82576
);
429 return (IGB_MAX_RING_82580
);
432 return (IGB_MAX_RING_I350
);
435 return (IGB_MAX_RING_I354
);
438 return (IGB_MAX_RING_I210
);
441 return (IGB_MAX_RING_I211
);
444 return (IGB_MIN_RING
);
449 igb_get_rxring_cnt(const struct igb_softc
*sc
, int *ring_cnt
, int *ring_max
)
452 *ring_max
= igb_get_ring_max(sc
);
453 *ring_cnt
= device_getenv_int(sc
->dev
, "rxr", igb_rxr
);
457 igb_get_txring_cnt(const struct igb_softc
*sc
, int *ring_cnt
, int *ring_max
)
460 *ring_max
= igb_get_ring_max(sc
);
461 *ring_cnt
= device_getenv_int(sc
->dev
, "txr", igb_txr
);
465 igb_probe(device_t dev
)
467 const struct igb_device
*d
;
470 vid
= pci_get_vendor(dev
);
471 did
= pci_get_device(dev
);
473 for (d
= igb_devices
; d
->desc
!= NULL
; ++d
) {
474 if (vid
== d
->vid
&& did
== d
->did
) {
475 device_set_desc(dev
, d
->desc
);
483 igb_attach(device_t dev
)
485 struct igb_softc
*sc
= device_get_softc(dev
);
486 uint16_t eeprom_data
;
487 int error
= 0, ring_max
, ring_cnt
;
488 char flowctrl
[IFM_ETH_FC_STRLEN
];
492 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev
),
493 SYSCTL_CHILDREN(device_get_sysctl_tree(dev
)),
494 OID_AUTO
, "nvm", CTLTYPE_INT
|CTLFLAG_RW
, adapter
, 0,
495 igb_sysctl_nvm_info
, "I", "NVM Information");
498 ifmedia_init(&sc
->media
, IFM_IMASK
| IFM_ETH_FCMASK
,
499 igb_media_change
, igb_media_status
);
500 callout_init_mp(&sc
->timer
);
501 lwkt_serialize_init(&sc
->main_serialize
);
503 if_initname(&sc
->arpcom
.ac_if
, device_get_name(dev
),
504 device_get_unit(dev
));
505 sc
->dev
= sc
->osdep
.dev
= dev
;
507 /* Enable bus mastering */
508 pci_enable_busmaster(dev
);
511 * Determine hardware and mac type
513 sc
->hw
.vendor_id
= pci_get_vendor(dev
);
514 sc
->hw
.device_id
= pci_get_device(dev
);
515 sc
->hw
.revision_id
= pci_read_config(dev
, PCIR_REVID
, 1);
516 sc
->hw
.subsystem_vendor_id
= pci_read_config(dev
, PCIR_SUBVEND_0
, 2);
517 sc
->hw
.subsystem_device_id
= pci_read_config(dev
, PCIR_SUBDEV_0
, 2);
519 if (e1000_set_mac_type(&sc
->hw
))
522 /* Are we a VF device? */
523 if (sc
->hw
.mac
.type
== e1000_vfadapt
||
524 sc
->hw
.mac
.type
== e1000_vfadapt_i350
)
530 * Configure total supported RX/TX ring count
532 igb_get_rxring_cnt(sc
, &ring_cnt
, &ring_max
);
533 sc
->rx_rmap
= if_ringmap_alloc(dev
, ring_cnt
, ring_max
);
534 igb_get_txring_cnt(sc
, &ring_cnt
, &ring_max
);
535 sc
->tx_rmap
= if_ringmap_alloc(dev
, ring_cnt
, ring_max
);
536 if_ringmap_match(dev
, sc
->rx_rmap
, sc
->tx_rmap
);
538 sc
->rx_ring_cnt
= if_ringmap_count(sc
->rx_rmap
);
539 sc
->rx_ring_inuse
= sc
->rx_ring_cnt
;
540 sc
->tx_ring_cnt
= if_ringmap_count(sc
->tx_rmap
);
541 sc
->tx_ring_inuse
= sc
->tx_ring_cnt
;
543 /* Setup flow control. */
544 device_getenv_string(dev
, "flow_ctrl", flowctrl
, sizeof(flowctrl
),
546 sc
->ifm_flowctrl
= ifmedia_str2ethfc(flowctrl
);
551 sc
->mem_rid
= PCIR_BAR(0);
552 sc
->mem_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
, &sc
->mem_rid
,
554 if (sc
->mem_res
== NULL
) {
555 device_printf(dev
, "Unable to allocate bus resource: memory\n");
559 sc
->osdep
.mem_bus_space_tag
= rman_get_bustag(sc
->mem_res
);
560 sc
->osdep
.mem_bus_space_handle
= rman_get_bushandle(sc
->mem_res
);
562 sc
->hw
.hw_addr
= (uint8_t *)&sc
->osdep
.mem_bus_space_handle
;
564 /* Save PCI command register for Shared Code */
565 sc
->hw
.bus
.pci_cmd_word
= pci_read_config(dev
, PCIR_COMMAND
, 2);
566 sc
->hw
.back
= &sc
->osdep
;
568 /* Do Shared Code initialization */
569 if (e1000_setup_init_funcs(&sc
->hw
, TRUE
)) {
570 device_printf(dev
, "Setup of Shared code failed\n");
575 e1000_get_bus_info(&sc
->hw
);
577 sc
->hw
.mac
.autoneg
= DO_AUTO_NEG
;
578 sc
->hw
.phy
.autoneg_wait_to_complete
= FALSE
;
579 sc
->hw
.phy
.autoneg_advertised
= AUTONEG_ADV_DEFAULT
;
582 if (sc
->hw
.phy
.media_type
== e1000_media_type_copper
) {
583 sc
->hw
.phy
.mdix
= AUTO_ALL_MODES
;
584 sc
->hw
.phy
.disable_polarity_correction
= FALSE
;
585 sc
->hw
.phy
.ms_type
= IGB_MASTER_SLAVE
;
588 /* Set the frame limits assuming standard ethernet sized frames. */
589 sc
->max_frame_size
= ETHERMTU
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
;
591 /* Allocate RX/TX rings */
592 error
= igb_alloc_rings(sc
);
596 /* Allocate interrupt */
597 error
= igb_alloc_intr(sc
);
601 /* Setup serializes */
602 igb_setup_serialize(sc
);
604 /* Allocate the appropriate stats memory */
606 sc
->stats
= kmalloc(sizeof(struct e1000_vf_stats
), M_DEVBUF
,
608 igb_vf_init_stats(sc
);
610 sc
->stats
= kmalloc(sizeof(struct e1000_hw_stats
), M_DEVBUF
,
614 /* Allocate multicast array memory. */
615 sc
->mta
= kmalloc(ETHER_ADDR_LEN
* MAX_NUM_MULTICAST_ADDRESSES
,
618 /* Some adapter-specific advanced features */
619 if (sc
->hw
.mac
.type
>= e1000_i350
) {
621 igb_set_sysctl_value(adapter
, "dma_coalesce",
622 "configure dma coalesce",
623 &adapter
->dma_coalesce
, igb_dma_coalesce
);
624 igb_set_sysctl_value(adapter
, "eee_disabled",
625 "enable Energy Efficient Ethernet",
626 &adapter
->hw
.dev_spec
._82575
.eee_disable
,
629 sc
->dma_coalesce
= igb_dma_coalesce
;
630 sc
->hw
.dev_spec
._82575
.eee_disable
= igb_eee_disabled
;
632 if (sc
->hw
.phy
.media_type
== e1000_media_type_copper
) {
633 if (sc
->hw
.mac
.type
== e1000_i354
)
634 e1000_set_eee_i354(&sc
->hw
, TRUE
, TRUE
);
636 e1000_set_eee_i350(&sc
->hw
, TRUE
, TRUE
);
641 * Start from a known state, this is important in reading the nvm and
644 e1000_reset_hw(&sc
->hw
);
646 /* Make sure we have a good EEPROM before we read from it */
647 if (sc
->hw
.mac
.type
!= e1000_i210
&& sc
->hw
.mac
.type
!= e1000_i211
&&
648 e1000_validate_nvm_checksum(&sc
->hw
) < 0) {
650 * Some PCI-E parts fail the first check due to
651 * the link being in sleep state, call it again,
652 * if it fails a second time its a real issue.
654 if (e1000_validate_nvm_checksum(&sc
->hw
) < 0) {
656 "The EEPROM Checksum Is Not Valid\n");
662 /* Copy the permanent MAC address out of the EEPROM */
663 if (e1000_read_mac_addr(&sc
->hw
) < 0) {
664 device_printf(dev
, "EEPROM read error while reading MAC"
669 if (!igb_is_valid_ether_addr(sc
->hw
.mac
.addr
)) {
670 device_printf(dev
, "Invalid MAC address\n");
675 /* Setup OS specific network interface */
678 /* Add sysctl tree, must after igb_setup_ifp() */
681 /* Now get a good starting state */
682 igb_reset(sc
, FALSE
);
684 /* Initialize statistics */
685 igb_update_stats_counters(sc
);
687 sc
->hw
.mac
.get_link_status
= 1;
688 igb_update_link_status(sc
);
690 /* Indicate SOL/IDER usage */
691 if (e1000_check_reset_block(&sc
->hw
)) {
693 "PHY reset is blocked due to SOL/IDER session.\n");
696 /* Determine if we have to control management hardware */
697 if (e1000_enable_mng_pass_thru(&sc
->hw
))
698 sc
->flags
|= IGB_FLAG_HAS_MGMT
;
703 /* APME bit in EEPROM is mapped to WUC.APME */
704 eeprom_data
= E1000_READ_REG(&sc
->hw
, E1000_WUC
) & E1000_WUC_APME
;
706 /* XXX E1000_WUFC_MC always be cleared from E1000_WUC. */
707 sc
->wol
= E1000_WUFC_MAG
| E1000_WUFC_MC
;
708 device_printf(dev
, "has WOL\n");
712 /* Register for VLAN events */
713 adapter
->vlan_attach
= EVENTHANDLER_REGISTER(vlan_config
,
714 igb_register_vlan
, adapter
, EVENTHANDLER_PRI_FIRST
);
715 adapter
->vlan_detach
= EVENTHANDLER_REGISTER(vlan_unconfig
,
716 igb_unregister_vlan
, adapter
, EVENTHANDLER_PRI_FIRST
);
720 igb_add_hw_stats(adapter
);
724 * Disable interrupt to prevent spurious interrupts (line based
725 * interrupt, MSI or even MSI-X), which had been observed on
726 * several types of LOMs, from being handled.
728 igb_disable_intr(sc
);
730 error
= igb_setup_intr(sc
);
732 ether_ifdetach(&sc
->arpcom
.ac_if
);
743 igb_detach(device_t dev
)
745 struct igb_softc
*sc
= device_get_softc(dev
);
747 if (device_is_attached(dev
)) {
748 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
750 ifnet_serialize_all(ifp
);
754 e1000_phy_hw_reset(&sc
->hw
);
756 /* Give control back to firmware */
758 igb_rel_hw_control(sc
);
761 igb_teardown_intr(sc
, sc
->intr_cnt
);
763 ifnet_deserialize_all(ifp
);
766 } else if (sc
->mem_res
!= NULL
) {
767 igb_rel_hw_control(sc
);
770 ifmedia_removeall(&sc
->media
);
771 bus_generic_detach(dev
);
775 if (sc
->msix_mem_res
!= NULL
) {
776 bus_release_resource(dev
, SYS_RES_MEMORY
, sc
->msix_mem_rid
,
779 if (sc
->mem_res
!= NULL
) {
780 bus_release_resource(dev
, SYS_RES_MEMORY
, sc
->mem_rid
,
787 kfree(sc
->mta
, M_DEVBUF
);
788 if (sc
->stats
!= NULL
)
789 kfree(sc
->stats
, M_DEVBUF
);
790 if (sc
->serializes
!= NULL
)
791 kfree(sc
->serializes
, M_DEVBUF
);
792 if (sc
->rx_rmap
!= NULL
)
793 if_ringmap_free(sc
->rx_rmap
);
794 if (sc
->rx_rmap_intr
!= NULL
)
795 if_ringmap_free(sc
->rx_rmap_intr
);
796 if (sc
->tx_rmap
!= NULL
)
797 if_ringmap_free(sc
->tx_rmap
);
798 if (sc
->tx_rmap_intr
!= NULL
)
799 if_ringmap_free(sc
->tx_rmap_intr
);
805 igb_shutdown(device_t dev
)
807 return igb_suspend(dev
);
811 igb_suspend(device_t dev
)
813 struct igb_softc
*sc
= device_get_softc(dev
);
814 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
816 ifnet_serialize_all(ifp
);
821 igb_rel_hw_control(sc
);
824 ifnet_deserialize_all(ifp
);
826 return bus_generic_suspend(dev
);
830 igb_resume(device_t dev
)
832 struct igb_softc
*sc
= device_get_softc(dev
);
833 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
836 ifnet_serialize_all(ifp
);
841 for (i
= 0; i
< sc
->tx_ring_inuse
; ++i
)
842 ifsq_devstart_sched(sc
->tx_rings
[i
].ifsq
);
844 ifnet_deserialize_all(ifp
);
846 return bus_generic_resume(dev
);
850 igb_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
852 struct igb_softc
*sc
= ifp
->if_softc
;
853 struct ifreq
*ifr
= (struct ifreq
*)data
;
854 int max_frame_size
, mask
, reinit
;
857 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
861 max_frame_size
= 9234;
862 if (ifr
->ifr_mtu
> max_frame_size
- ETHER_HDR_LEN
-
868 ifp
->if_mtu
= ifr
->ifr_mtu
;
869 sc
->max_frame_size
= ifp
->if_mtu
+ ETHER_HDR_LEN
+
872 if (ifp
->if_flags
& IFF_RUNNING
)
877 if (ifp
->if_flags
& IFF_UP
) {
878 if (ifp
->if_flags
& IFF_RUNNING
) {
879 if ((ifp
->if_flags
^ sc
->if_flags
) &
880 (IFF_PROMISC
| IFF_ALLMULTI
)) {
881 igb_disable_promisc(sc
);
887 } else if (ifp
->if_flags
& IFF_RUNNING
) {
890 sc
->if_flags
= ifp
->if_flags
;
895 if (ifp
->if_flags
& IFF_RUNNING
) {
896 igb_disable_intr(sc
);
899 if (!(ifp
->if_flags
& IFF_NPOLLING
))
906 /* Check SOL/IDER usage */
907 if (e1000_check_reset_block(&sc
->hw
)) {
908 if_printf(ifp
, "Media change is "
909 "blocked due to SOL/IDER session.\n");
915 error
= ifmedia_ioctl(ifp
, ifr
, &sc
->media
, command
);
920 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
921 if (mask
& IFCAP_RXCSUM
) {
922 ifp
->if_capenable
^= IFCAP_RXCSUM
;
925 if (mask
& IFCAP_VLAN_HWTAGGING
) {
926 ifp
->if_capenable
^= IFCAP_VLAN_HWTAGGING
;
929 if (mask
& IFCAP_TXCSUM
) {
930 ifp
->if_capenable
^= IFCAP_TXCSUM
;
931 if (ifp
->if_capenable
& IFCAP_TXCSUM
)
932 ifp
->if_hwassist
|= IGB_CSUM_FEATURES
;
934 ifp
->if_hwassist
&= ~IGB_CSUM_FEATURES
;
936 if (mask
& IFCAP_TSO
) {
937 ifp
->if_capenable
^= IFCAP_TSO
;
938 if (ifp
->if_capenable
& IFCAP_TSO
)
939 ifp
->if_hwassist
|= CSUM_TSO
;
941 ifp
->if_hwassist
&= ~CSUM_TSO
;
943 if (mask
& IFCAP_RSS
)
944 ifp
->if_capenable
^= IFCAP_RSS
;
945 if (reinit
&& (ifp
->if_flags
& IFF_RUNNING
))
950 error
= ether_ioctl(ifp
, command
, data
);
959 struct igb_softc
*sc
= xsc
;
960 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
964 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
968 /* Get the latest mac address, User can use a LAA */
969 bcopy(IF_LLADDR(ifp
), sc
->hw
.mac
.addr
, ETHER_ADDR_LEN
);
971 /* Put the address into the Receive Address Array */
972 e1000_rar_set(&sc
->hw
, sc
->hw
.mac
.addr
, 0);
974 igb_reset(sc
, FALSE
);
975 igb_update_link_status(sc
);
977 E1000_WRITE_REG(&sc
->hw
, E1000_VET
, ETHERTYPE_VLAN
);
979 /* Clear bad data from Rx FIFOs */
980 e1000_rx_fifo_flush_82575(&sc
->hw
);
982 /* Configure for OS presence */
987 if (ifp
->if_flags
& IFF_NPOLLING
)
991 /* Configured used RX/TX rings */
992 igb_set_ring_inuse(sc
, polling
);
993 ifq_set_subq_divisor(&ifp
->if_snd
, sc
->tx_ring_inuse
);
995 /* Initialize interrupt */
998 /* Prepare transmit descriptors and buffers */
999 for (i
= 0; i
< sc
->tx_ring_inuse
; ++i
)
1000 igb_init_tx_ring(&sc
->tx_rings
[i
]);
1001 igb_init_tx_unit(sc
);
1003 /* Setup Multicast table */
1008 * Figure out the desired mbuf pool
1009 * for doing jumbo/packetsplit
1011 if (adapter
->max_frame_size
<= 2048)
1012 adapter
->rx_mbuf_sz
= MCLBYTES
;
1013 else if (adapter
->max_frame_size
<= 4096)
1014 adapter
->rx_mbuf_sz
= MJUMPAGESIZE
;
1016 adapter
->rx_mbuf_sz
= MJUM9BYTES
;
1019 /* Prepare receive descriptors and buffers */
1020 for (i
= 0; i
< sc
->rx_ring_inuse
; ++i
) {
1023 error
= igb_init_rx_ring(&sc
->rx_rings
[i
]);
1025 if_printf(ifp
, "Could not setup receive structures\n");
1030 igb_init_rx_unit(sc
, polling
);
1032 /* Enable VLAN support */
1033 if (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
)
1036 /* Don't lose promiscuous settings */
1037 igb_set_promisc(sc
);
1039 /* Clear counters */
1040 e1000_clear_hw_cntrs_base_generic(&sc
->hw
);
1042 /* This clears any pending interrupts */
1043 E1000_READ_REG(&sc
->hw
, E1000_ICR
);
1046 * Only enable interrupts if we are not polling, make sure
1047 * they are off otherwise.
1050 igb_disable_intr(sc
);
1052 igb_enable_intr(sc
);
1053 E1000_WRITE_REG(&sc
->hw
, E1000_ICS
, E1000_ICS_LSC
);
1056 /* Set Energy Efficient Ethernet */
1057 if (sc
->hw
.phy
.media_type
== e1000_media_type_copper
) {
1058 if (sc
->hw
.mac
.type
== e1000_i354
)
1059 e1000_set_eee_i354(&sc
->hw
, TRUE
, TRUE
);
1061 e1000_set_eee_i350(&sc
->hw
, TRUE
, TRUE
);
1064 ifp
->if_flags
|= IFF_RUNNING
;
1065 for (i
= 0; i
< sc
->tx_ring_inuse
; ++i
) {
1066 struct igb_tx_ring
*txr
= &sc
->tx_rings
[i
];
1068 ifsq_clr_oactive(txr
->ifsq
);
1069 ifsq_watchdog_start(&txr
->tx_watchdog
);
1072 callout_reset_bycpu(&txr
->tx_gc_timer
, 1,
1073 igb_txgc_timer
, txr
, txr
->tx_intr_cpuid
);
1077 igb_set_timer_cpuid(sc
, polling
);
1078 callout_reset_bycpu(&sc
->timer
, hz
, igb_timer
, sc
, sc
->timer_cpuid
);
1082 igb_media_status(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1084 struct igb_softc
*sc
= ifp
->if_softc
;
1086 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
1088 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
1089 sc
->hw
.mac
.get_link_status
= 1;
1090 igb_update_link_status(sc
);
1092 ifmr
->ifm_status
= IFM_AVALID
;
1093 ifmr
->ifm_active
= IFM_ETHER
;
1095 if (!sc
->link_active
) {
1096 if (sc
->hw
.mac
.autoneg
)
1097 ifmr
->ifm_active
|= IFM_NONE
;
1099 ifmr
->ifm_active
|= sc
->media
.ifm_media
;
1103 ifmr
->ifm_status
|= IFM_ACTIVE
;
1104 if (sc
->ifm_flowctrl
& IFM_ETH_FORCEPAUSE
)
1105 ifmr
->ifm_active
|= sc
->ifm_flowctrl
;
1107 switch (sc
->link_speed
) {
1109 ifmr
->ifm_active
|= IFM_10_T
;
1114 * Support for 100Mb SFP - these are Fiber
1115 * but the media type appears as serdes
1117 if (sc
->hw
.phy
.media_type
== e1000_media_type_fiber
||
1118 sc
->hw
.phy
.media_type
== e1000_media_type_internal_serdes
)
1119 ifmr
->ifm_active
|= IFM_100_FX
;
1121 ifmr
->ifm_active
|= IFM_100_TX
;
1125 if (sc
->hw
.phy
.media_type
== e1000_media_type_fiber
||
1126 sc
->hw
.phy
.media_type
== e1000_media_type_internal_serdes
)
1127 ifmr
->ifm_active
|= IFM_1000_SX
;
1129 ifmr
->ifm_active
|= IFM_1000_T
;
1133 ifmr
->ifm_active
|= IFM_2500_SX
;
1137 if (sc
->link_duplex
== FULL_DUPLEX
)
1138 ifmr
->ifm_active
|= IFM_FDX
;
1140 ifmr
->ifm_active
|= IFM_HDX
;
1142 if (sc
->link_duplex
== FULL_DUPLEX
)
1143 ifmr
->ifm_active
|= e1000_fc2ifmedia(sc
->hw
.fc
.current_mode
);
1147 igb_media_change(struct ifnet
*ifp
)
1149 struct igb_softc
*sc
= ifp
->if_softc
;
1150 struct ifmedia
*ifm
= &sc
->media
;
1152 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
1154 if (IFM_TYPE(ifm
->ifm_media
) != IFM_ETHER
)
1157 switch (IFM_SUBTYPE(ifm
->ifm_media
)) {
1159 sc
->hw
.mac
.autoneg
= DO_AUTO_NEG
;
1160 sc
->hw
.phy
.autoneg_advertised
= AUTONEG_ADV_DEFAULT
;
1165 sc
->hw
.mac
.autoneg
= DO_AUTO_NEG
;
1166 sc
->hw
.phy
.autoneg_advertised
= ADVERTISE_1000_FULL
;
1170 if (IFM_OPTIONS(ifm
->ifm_media
) & IFM_FDX
) {
1171 sc
->hw
.mac
.forced_speed_duplex
= ADVERTISE_100_FULL
;
1173 if (IFM_OPTIONS(ifm
->ifm_media
) &
1174 (IFM_ETH_RXPAUSE
| IFM_ETH_TXPAUSE
)) {
1176 if_printf(ifp
, "Flow control is not "
1177 "allowed for half-duplex\n");
1181 sc
->hw
.mac
.forced_speed_duplex
= ADVERTISE_100_HALF
;
1183 sc
->hw
.mac
.autoneg
= FALSE
;
1184 sc
->hw
.phy
.autoneg_advertised
= 0;
1188 if (IFM_OPTIONS(ifm
->ifm_media
) & IFM_FDX
) {
1189 sc
->hw
.mac
.forced_speed_duplex
= ADVERTISE_10_FULL
;
1191 if (IFM_OPTIONS(ifm
->ifm_media
) &
1192 (IFM_ETH_RXPAUSE
| IFM_ETH_TXPAUSE
)) {
1194 if_printf(ifp
, "Flow control is not "
1195 "allowed for half-duplex\n");
1199 sc
->hw
.mac
.forced_speed_duplex
= ADVERTISE_10_HALF
;
1201 sc
->hw
.mac
.autoneg
= FALSE
;
1202 sc
->hw
.phy
.autoneg_advertised
= 0;
1207 if_printf(ifp
, "Unsupported media type %d\n",
1208 IFM_SUBTYPE(ifm
->ifm_media
));
1212 sc
->ifm_flowctrl
= ifm
->ifm_media
& IFM_ETH_FCMASK
;
1214 if (ifp
->if_flags
& IFF_RUNNING
)
1221 igb_set_promisc(struct igb_softc
*sc
)
1223 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1224 struct e1000_hw
*hw
= &sc
->hw
;
1228 e1000_promisc_set_vf(hw
, e1000_promisc_enabled
);
1232 reg
= E1000_READ_REG(hw
, E1000_RCTL
);
1233 if (ifp
->if_flags
& IFF_PROMISC
) {
1234 reg
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
1235 E1000_WRITE_REG(hw
, E1000_RCTL
, reg
);
1236 } else if (ifp
->if_flags
& IFF_ALLMULTI
) {
1237 reg
|= E1000_RCTL_MPE
;
1238 reg
&= ~E1000_RCTL_UPE
;
1239 E1000_WRITE_REG(hw
, E1000_RCTL
, reg
);
1244 igb_disable_promisc(struct igb_softc
*sc
)
1246 struct e1000_hw
*hw
= &sc
->hw
;
1247 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1252 e1000_promisc_set_vf(hw
, e1000_promisc_disabled
);
1255 reg
= E1000_READ_REG(hw
, E1000_RCTL
);
1256 reg
&= ~E1000_RCTL_UPE
;
1257 if (ifp
->if_flags
& IFF_ALLMULTI
) {
1258 mcnt
= MAX_NUM_MULTICAST_ADDRESSES
;
1260 struct ifmultiaddr
*ifma
;
1261 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1262 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
1264 if (mcnt
== MAX_NUM_MULTICAST_ADDRESSES
)
1269 /* Don't disable if in MAX groups */
1270 if (mcnt
< MAX_NUM_MULTICAST_ADDRESSES
)
1271 reg
&= ~E1000_RCTL_MPE
;
1272 E1000_WRITE_REG(hw
, E1000_RCTL
, reg
);
1276 igb_set_multi(struct igb_softc
*sc
)
1278 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1279 struct ifmultiaddr
*ifma
;
1280 uint32_t reg_rctl
= 0;
1285 bzero(mta
, ETH_ADDR_LEN
* MAX_NUM_MULTICAST_ADDRESSES
);
1287 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1288 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
1291 if (mcnt
== MAX_NUM_MULTICAST_ADDRESSES
)
1294 bcopy(LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
),
1295 &mta
[mcnt
* ETH_ADDR_LEN
], ETH_ADDR_LEN
);
1299 if (mcnt
>= MAX_NUM_MULTICAST_ADDRESSES
) {
1300 reg_rctl
= E1000_READ_REG(&sc
->hw
, E1000_RCTL
);
1301 reg_rctl
|= E1000_RCTL_MPE
;
1302 E1000_WRITE_REG(&sc
->hw
, E1000_RCTL
, reg_rctl
);
1304 e1000_update_mc_addr_list(&sc
->hw
, mta
, mcnt
);
1309 igb_timer(void *xsc
)
1311 struct igb_softc
*sc
= xsc
;
1313 lwkt_serialize_enter(&sc
->main_serialize
);
1315 igb_update_link_status(sc
);
1316 igb_update_stats_counters(sc
);
1318 callout_reset_bycpu(&sc
->timer
, hz
, igb_timer
, sc
, sc
->timer_cpuid
);
1320 lwkt_serialize_exit(&sc
->main_serialize
);
1324 igb_update_link_status(struct igb_softc
*sc
)
1326 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1327 struct e1000_hw
*hw
= &sc
->hw
;
1328 uint32_t link_check
, thstat
, ctrl
;
1330 link_check
= thstat
= ctrl
= 0;
1332 /* Get the cached link value or read for real */
1333 switch (hw
->phy
.media_type
) {
1334 case e1000_media_type_copper
:
1335 if (hw
->mac
.get_link_status
) {
1336 /* Do the work to read phy */
1337 e1000_check_for_link(hw
);
1338 link_check
= !hw
->mac
.get_link_status
;
1344 case e1000_media_type_fiber
:
1345 e1000_check_for_link(hw
);
1346 link_check
= E1000_READ_REG(hw
, E1000_STATUS
) & E1000_STATUS_LU
;
1349 case e1000_media_type_internal_serdes
:
1350 e1000_check_for_link(hw
);
1351 link_check
= hw
->mac
.serdes_has_link
;
1354 /* VF device is type_unknown */
1355 case e1000_media_type_unknown
:
1356 e1000_check_for_link(hw
);
1357 link_check
= !hw
->mac
.get_link_status
;
1363 /* Check for thermal downshift or shutdown */
1364 if (hw
->mac
.type
== e1000_i350
) {
1365 thstat
= E1000_READ_REG(hw
, E1000_THSTAT
);
1366 ctrl
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
1369 /* Now we check if a transition has happened */
1370 if (link_check
&& sc
->link_active
== 0) {
1371 e1000_get_speed_and_duplex(hw
,
1372 &sc
->link_speed
, &sc
->link_duplex
);
1374 char flowctrl
[IFM_ETH_FC_STRLEN
];
1376 /* Get the flow control for display */
1377 e1000_fc2str(hw
->fc
.current_mode
, flowctrl
,
1380 if_printf(ifp
, "Link is up %d Mbps %s, "
1381 "Flow control: %s\n",
1383 sc
->link_duplex
== FULL_DUPLEX
?
1384 "Full Duplex" : "Half Duplex",
1387 if (sc
->ifm_flowctrl
& IFM_ETH_FORCEPAUSE
)
1388 e1000_force_flowctrl(hw
, sc
->ifm_flowctrl
);
1389 sc
->link_active
= 1;
1391 ifp
->if_baudrate
= sc
->link_speed
* 1000000;
1392 if ((ctrl
& E1000_CTRL_EXT_LINK_MODE_GMII
) &&
1393 (thstat
& E1000_THSTAT_LINK_THROTTLE
))
1394 if_printf(ifp
, "Link: thermal downshift\n");
1395 /* Delay Link Up for Phy update */
1396 if ((hw
->mac
.type
== e1000_i210
||
1397 hw
->mac
.type
== e1000_i211
) &&
1398 hw
->phy
.id
== I210_I_PHY_ID
)
1399 msec_delay(IGB_I210_LINK_DELAY
);
1401 * Reset if the media type changed.
1402 * Support AutoMediaDetect for Marvell M88 PHY in i354.
1404 if (hw
->dev_spec
._82575
.media_changed
) {
1405 hw
->dev_spec
._82575
.media_changed
= FALSE
;
1406 igb_reset(sc
, TRUE
);
1408 /* This can sleep */
1409 ifp
->if_link_state
= LINK_STATE_UP
;
1410 if_link_state_change(ifp
);
1411 } else if (!link_check
&& sc
->link_active
== 1) {
1412 ifp
->if_baudrate
= sc
->link_speed
= 0;
1413 sc
->link_duplex
= 0;
1415 if_printf(ifp
, "Link is Down\n");
1416 if ((ctrl
& E1000_CTRL_EXT_LINK_MODE_GMII
) &&
1417 (thstat
& E1000_THSTAT_PWR_DOWN
))
1418 if_printf(ifp
, "Link: thermal shutdown\n");
1419 sc
->link_active
= 0;
1420 /* This can sleep */
1421 ifp
->if_link_state
= LINK_STATE_DOWN
;
1422 if_link_state_change(ifp
);
1427 igb_stop(struct igb_softc
*sc
)
1429 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1432 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
1434 igb_disable_intr(sc
);
1436 callout_stop(&sc
->timer
);
1438 ifp
->if_flags
&= ~IFF_RUNNING
;
1439 for (i
= 0; i
< sc
->tx_ring_cnt
; ++i
) {
1440 struct igb_tx_ring
*txr
= &sc
->tx_rings
[i
];
1442 ifsq_clr_oactive(txr
->ifsq
);
1443 ifsq_watchdog_stop(&txr
->tx_watchdog
);
1444 txr
->tx_flags
&= ~IGB_TXFLAG_ENABLED
;
1446 txr
->tx_running
= 0;
1447 callout_stop(&txr
->tx_gc_timer
);
1450 e1000_reset_hw(&sc
->hw
);
1451 E1000_WRITE_REG(&sc
->hw
, E1000_WUC
, 0);
1453 e1000_led_off(&sc
->hw
);
1454 e1000_cleanup_led(&sc
->hw
);
1456 for (i
= 0; i
< sc
->tx_ring_cnt
; ++i
)
1457 igb_free_tx_ring(&sc
->tx_rings
[i
]);
1458 for (i
= 0; i
< sc
->rx_ring_cnt
; ++i
)
1459 igb_free_rx_ring(&sc
->rx_rings
[i
]);
1463 igb_reset(struct igb_softc
*sc
, boolean_t media_reset
)
1465 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1466 struct e1000_hw
*hw
= &sc
->hw
;
1467 struct e1000_fc_info
*fc
= &hw
->fc
;
1471 /* Let the firmware know the OS is in control */
1472 igb_get_hw_control(sc
);
1475 * Packet Buffer Allocation (PBA)
1476 * Writing PBA sets the receive portion of the buffer
1477 * the remainder is used for the transmit buffer.
1479 switch (hw
->mac
.type
) {
1481 pba
= E1000_PBA_32K
;
1486 pba
= E1000_READ_REG(hw
, E1000_RXPBS
);
1487 pba
&= E1000_RXPBS_SIZE_MASK_82576
;
1493 case e1000_vfadapt_i350
:
1494 pba
= E1000_READ_REG(hw
, E1000_RXPBS
);
1495 pba
= e1000_rxpbs_adjust_82580(pba
);
1500 pba
= E1000_PBA_34K
;
1507 /* Special needs in case of Jumbo frames */
1508 if (hw
->mac
.type
== e1000_82575
&& ifp
->if_mtu
> ETHERMTU
) {
1509 uint32_t tx_space
, min_tx
, min_rx
;
1511 pba
= E1000_READ_REG(hw
, E1000_PBA
);
1512 tx_space
= pba
>> 16;
1515 min_tx
= (sc
->max_frame_size
+
1516 sizeof(struct e1000_tx_desc
) - ETHER_CRC_LEN
) * 2;
1517 min_tx
= roundup2(min_tx
, 1024);
1519 min_rx
= sc
->max_frame_size
;
1520 min_rx
= roundup2(min_rx
, 1024);
1522 if (tx_space
< min_tx
&& (min_tx
- tx_space
) < pba
) {
1523 pba
= pba
- (min_tx
- tx_space
);
1525 * if short on rx space, rx wins
1526 * and must trump tx adjustment
1531 E1000_WRITE_REG(hw
, E1000_PBA
, pba
);
1535 * These parameters control the automatic generation (Tx) and
1536 * response (Rx) to Ethernet PAUSE frames.
1537 * - High water mark should allow for at least two frames to be
1538 * received after sending an XOFF.
1539 * - Low water mark works best when it is very near the high water mark.
1540 * This allows the receiver to restart by sending XON when it has
1543 hwm
= min(((pba
<< 10) * 9 / 10),
1544 ((pba
<< 10) - 2 * sc
->max_frame_size
));
1546 if (hw
->mac
.type
< e1000_82576
) {
1547 fc
->high_water
= hwm
& 0xFFF8; /* 8-byte granularity */
1548 fc
->low_water
= fc
->high_water
- 8;
1550 fc
->high_water
= hwm
& 0xFFF0; /* 16-byte granularity */
1551 fc
->low_water
= fc
->high_water
- 16;
1553 fc
->pause_time
= IGB_FC_PAUSE_TIME
;
1554 fc
->send_xon
= TRUE
;
1555 fc
->requested_mode
= e1000_ifmedia2fc(sc
->ifm_flowctrl
);
1557 /* Issue a global reset */
1559 E1000_WRITE_REG(hw
, E1000_WUC
, 0);
1561 /* Reset for AutoMediaDetect */
1563 e1000_setup_init_funcs(hw
, TRUE
);
1564 e1000_get_bus_info(hw
);
1567 if (e1000_init_hw(hw
) < 0)
1568 if_printf(ifp
, "Hardware Initialization Failed\n");
1570 /* Setup DMA Coalescing */
1571 igb_init_dmac(sc
, pba
);
1573 E1000_WRITE_REG(&sc
->hw
, E1000_VET
, ETHERTYPE_VLAN
);
1574 e1000_get_phy_info(hw
);
1575 e1000_check_for_link(hw
);
1579 igb_setup_ifp(struct igb_softc
*sc
)
1581 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1585 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
1586 ifp
->if_init
= igb_init
;
1587 ifp
->if_ioctl
= igb_ioctl
;
1588 ifp
->if_start
= igb_start
;
1589 ifp
->if_serialize
= igb_serialize
;
1590 ifp
->if_deserialize
= igb_deserialize
;
1591 ifp
->if_tryserialize
= igb_tryserialize
;
1593 ifp
->if_serialize_assert
= igb_serialize_assert
;
1595 #ifdef IFPOLL_ENABLE
1596 ifp
->if_npoll
= igb_npoll
;
1599 ifp
->if_nmbclusters
= sc
->rx_ring_cnt
* sc
->rx_rings
[0].num_rx_desc
;
1601 ifq_set_maxlen(&ifp
->if_snd
, sc
->tx_rings
[0].num_tx_desc
- 1);
1602 ifq_set_ready(&ifp
->if_snd
);
1603 ifq_set_subq_cnt(&ifp
->if_snd
, sc
->tx_ring_cnt
);
1605 ifp
->if_mapsubq
= ifq_mapsubq_modulo
;
1606 ifq_set_subq_divisor(&ifp
->if_snd
, 1);
1608 ether_ifattach(ifp
, sc
->hw
.mac
.addr
, NULL
);
1610 ifp
->if_capabilities
=
1611 IFCAP_HWCSUM
| IFCAP_VLAN_HWTAGGING
| IFCAP_VLAN_MTU
| IFCAP_TSO
;
1612 if (IGB_ENABLE_HWRSS(sc
))
1613 ifp
->if_capabilities
|= IFCAP_RSS
;
1614 ifp
->if_capenable
= ifp
->if_capabilities
;
1615 ifp
->if_hwassist
= IGB_CSUM_FEATURES
| CSUM_TSO
;
1618 * Tell the upper layer(s) we support long frames
1620 ifp
->if_data
.ifi_hdrlen
= sizeof(struct ether_vlan_header
);
1622 /* Setup TX rings and subqueues */
1623 for (i
= 0; i
< sc
->tx_ring_cnt
; ++i
) {
1624 struct ifaltq_subque
*ifsq
= ifq_get_subq(&ifp
->if_snd
, i
);
1625 struct igb_tx_ring
*txr
= &sc
->tx_rings
[i
];
1627 ifsq_set_cpuid(ifsq
, txr
->tx_intr_cpuid
);
1628 ifsq_set_priv(ifsq
, txr
);
1629 ifsq_set_hw_serialize(ifsq
, &txr
->tx_serialize
);
1632 ifsq_watchdog_init(&txr
->tx_watchdog
, ifsq
, igb_watchdog
);
1636 * Specify the media types supported by this adapter and register
1637 * callbacks to update media and link information
1639 if (sc
->hw
.phy
.media_type
== e1000_media_type_fiber
||
1640 sc
->hw
.phy
.media_type
== e1000_media_type_internal_serdes
) {
1641 ifmedia_add(&sc
->media
, IFM_ETHER
| IFM_1000_SX
| IFM_FDX
,
1644 ifmedia_add(&sc
->media
, IFM_ETHER
| IFM_10_T
, 0, NULL
);
1645 ifmedia_add(&sc
->media
, IFM_ETHER
| IFM_10_T
| IFM_FDX
,
1647 ifmedia_add(&sc
->media
, IFM_ETHER
| IFM_100_TX
, 0, NULL
);
1648 ifmedia_add(&sc
->media
, IFM_ETHER
| IFM_100_TX
| IFM_FDX
,
1650 if (sc
->hw
.phy
.type
!= e1000_phy_ife
) {
1651 ifmedia_add(&sc
->media
,
1652 IFM_ETHER
| IFM_1000_T
| IFM_FDX
, 0, NULL
);
1655 ifmedia_add(&sc
->media
, IFM_ETHER
| IFM_AUTO
, 0, NULL
);
1656 ifmedia_set(&sc
->media
, IFM_ETHER
| IFM_AUTO
| sc
->ifm_flowctrl
);
1660 igb_add_sysctl(struct igb_softc
*sc
)
1662 struct sysctl_ctx_list
*ctx
;
1663 struct sysctl_oid
*tree
;
1667 ctx
= device_get_sysctl_ctx(sc
->dev
);
1668 tree
= device_get_sysctl_tree(sc
->dev
);
1669 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
),
1670 OID_AUTO
, "rxr", CTLFLAG_RD
, &sc
->rx_ring_cnt
, 0, "# of RX rings");
1671 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
),
1672 OID_AUTO
, "rxr_inuse", CTLFLAG_RD
, &sc
->rx_ring_inuse
, 0,
1673 "# of RX rings used");
1674 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
),
1675 OID_AUTO
, "txr", CTLFLAG_RD
, &sc
->tx_ring_cnt
, 0, "# of TX rings");
1676 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
),
1677 OID_AUTO
, "txr_inuse", CTLFLAG_RD
, &sc
->tx_ring_inuse
, 0,
1678 "# of TX rings used");
1679 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
),
1680 OID_AUTO
, "rxd", CTLFLAG_RD
, &sc
->rx_rings
[0].num_rx_desc
, 0,
1682 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
),
1683 OID_AUTO
, "txd", CTLFLAG_RD
, &sc
->tx_rings
[0].num_tx_desc
, 0,
1686 #define IGB_ADD_INTR_RATE_SYSCTL(sc, use, name) \
1688 igb_add_intr_rate_sysctl(sc, IGB_INTR_USE_##use, #name "_intr_rate", \
1689 #use " interrupt rate"); \
1692 IGB_ADD_INTR_RATE_SYSCTL(sc
, RXTX
, rxtx
);
1693 IGB_ADD_INTR_RATE_SYSCTL(sc
, RX
, rx
);
1694 IGB_ADD_INTR_RATE_SYSCTL(sc
, TX
, tx
);
1695 IGB_ADD_INTR_RATE_SYSCTL(sc
, STATUS
, sts
);
1697 #undef IGB_ADD_INTR_RATE_SYSCTL
1699 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
1700 OID_AUTO
, "tx_intr_nsegs", CTLTYPE_INT
| CTLFLAG_RW
,
1701 sc
, 0, igb_sysctl_tx_intr_nsegs
, "I",
1702 "# of segments per TX interrupt");
1704 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
1705 OID_AUTO
, "tx_wreg_nsegs", CTLTYPE_INT
| CTLFLAG_RW
,
1706 sc
, 0, igb_sysctl_tx_wreg_nsegs
, "I",
1707 "# of segments sent before write to hardware register");
1709 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
1710 OID_AUTO
, "rx_wreg_nsegs", CTLTYPE_INT
| CTLFLAG_RW
,
1711 sc
, 0, igb_sysctl_rx_wreg_nsegs
, "I",
1712 "# of segments received before write to hardware register");
1714 if (sc
->intr_type
== PCI_INTR_TYPE_MSIX
) {
1715 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
1716 OID_AUTO
, "tx_msix_cpumap", CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1717 sc
->tx_rmap_intr
, 0, if_ringmap_cpumap_sysctl
, "I",
1718 "TX MSI-X CPU map");
1719 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
1720 OID_AUTO
, "rx_msix_cpumap", CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1721 sc
->rx_rmap_intr
, 0, if_ringmap_cpumap_sysctl
, "I",
1722 "RX MSI-X CPU map");
1724 #ifdef IFPOLL_ENABLE
1725 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
1726 OID_AUTO
, "tx_poll_cpumap", CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1727 sc
->tx_rmap
, 0, if_ringmap_cpumap_sysctl
, "I",
1728 "TX polling CPU map");
1729 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
1730 OID_AUTO
, "rx_poll_cpumap", CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1731 sc
->rx_rmap
, 0, if_ringmap_cpumap_sysctl
, "I",
1732 "RX polling CPU map");
1735 #ifdef IGB_RSS_DEBUG
1736 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
),
1737 OID_AUTO
, "rss_debug", CTLFLAG_RW
, &sc
->rss_debug
, 0,
1739 for (i
= 0; i
< sc
->rx_ring_cnt
; ++i
) {
1740 ksnprintf(node
, sizeof(node
), "rx%d_pkt", i
);
1741 SYSCTL_ADD_ULONG(ctx
,
1742 SYSCTL_CHILDREN(tree
), OID_AUTO
, node
,
1743 CTLFLAG_RW
, &sc
->rx_rings
[i
].rx_packets
, "RXed packets");
1746 for (i
= 0; i
< sc
->tx_ring_cnt
; ++i
) {
1747 struct igb_tx_ring
*txr
= &sc
->tx_rings
[i
];
1749 #ifdef IGB_TSS_DEBUG
1750 ksnprintf(node
, sizeof(node
), "tx%d_pkt", i
);
1751 SYSCTL_ADD_ULONG(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
, node
,
1752 CTLFLAG_RW
, &txr
->tx_packets
, "TXed packets");
1754 ksnprintf(node
, sizeof(node
), "tx%d_nmbuf", i
);
1755 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
, node
,
1756 CTLFLAG_RD
, &txr
->tx_nmbuf
, 0, "# of pending TX mbufs");
1758 ksnprintf(node
, sizeof(node
), "tx%d_gc", i
);
1759 SYSCTL_ADD_ULONG(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
, node
,
1760 CTLFLAG_RW
, &txr
->tx_gc
, "# of TX desc GC");
1763 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
1764 OID_AUTO
, "dumpreg", CTLTYPE_INT
| CTLFLAG_RW
,
1765 sc
, 0, igb_sysctl_reg_dump
, "I", "dump registers");
1769 igb_alloc_rings(struct igb_softc
*sc
)
1774 * Create top level busdma tag
1776 error
= bus_dma_tag_create(NULL
, 1, 0,
1777 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
, NULL
, NULL
,
1778 BUS_SPACE_MAXSIZE_32BIT
, 0, BUS_SPACE_MAXSIZE_32BIT
, 0,
1781 device_printf(sc
->dev
, "could not create top level DMA tag\n");
1786 * Allocate TX descriptor rings and buffers
1788 sc
->tx_rings
= kmalloc_cachealign(
1789 sizeof(struct igb_tx_ring
) * sc
->tx_ring_cnt
,
1790 M_DEVBUF
, M_WAITOK
| M_ZERO
);
1791 for (i
= 0; i
< sc
->tx_ring_cnt
; ++i
) {
1792 struct igb_tx_ring
*txr
= &sc
->tx_rings
[i
];
1794 /* Set up some basics */
1797 txr
->tx_intr_cpuid
= -1;
1798 lwkt_serialize_init(&txr
->tx_serialize
);
1799 callout_init_mp(&txr
->tx_gc_timer
);
1801 error
= igb_create_tx_ring(txr
);
1807 * Allocate RX descriptor rings and buffers
1809 sc
->rx_rings
= kmalloc_cachealign(
1810 sizeof(struct igb_rx_ring
) * sc
->rx_ring_cnt
,
1811 M_DEVBUF
, M_WAITOK
| M_ZERO
);
1812 for (i
= 0; i
< sc
->rx_ring_cnt
; ++i
) {
1813 struct igb_rx_ring
*rxr
= &sc
->rx_rings
[i
];
1815 /* Set up some basics */
1818 lwkt_serialize_init(&rxr
->rx_serialize
);
1820 error
= igb_create_rx_ring(rxr
);
1829 igb_free_rings(struct igb_softc
*sc
)
1833 if (sc
->tx_rings
!= NULL
) {
1834 for (i
= 0; i
< sc
->tx_ring_cnt
; ++i
) {
1835 struct igb_tx_ring
*txr
= &sc
->tx_rings
[i
];
1837 igb_destroy_tx_ring(txr
, txr
->num_tx_desc
);
1839 kfree(sc
->tx_rings
, M_DEVBUF
);
1842 if (sc
->rx_rings
!= NULL
) {
1843 for (i
= 0; i
< sc
->rx_ring_cnt
; ++i
) {
1844 struct igb_rx_ring
*rxr
= &sc
->rx_rings
[i
];
1846 igb_destroy_rx_ring(rxr
, rxr
->num_rx_desc
);
1848 kfree(sc
->rx_rings
, M_DEVBUF
);
1853 igb_create_tx_ring(struct igb_tx_ring
*txr
)
1855 int tsize
, error
, i
, ntxd
;
1858 * Validate number of transmit descriptors. It must not exceed
1859 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
1861 ntxd
= device_getenv_int(txr
->sc
->dev
, "txd", igb_txd
);
1862 if ((ntxd
* sizeof(struct e1000_tx_desc
)) % IGB_DBA_ALIGN
!= 0 ||
1863 ntxd
> IGB_MAX_TXD
|| ntxd
< IGB_MIN_TXD
) {
1864 device_printf(txr
->sc
->dev
,
1865 "Using %d TX descriptors instead of %d!\n",
1866 IGB_DEFAULT_TXD
, ntxd
);
1867 txr
->num_tx_desc
= IGB_DEFAULT_TXD
;
1869 txr
->num_tx_desc
= ntxd
;
1873 * Allocate TX descriptor ring
1875 tsize
= roundup2(txr
->num_tx_desc
* sizeof(union e1000_adv_tx_desc
),
1877 txr
->txdma
.dma_vaddr
= bus_dmamem_coherent_any(txr
->sc
->parent_tag
,
1878 IGB_DBA_ALIGN
, tsize
, BUS_DMA_WAITOK
,
1879 &txr
->txdma
.dma_tag
, &txr
->txdma
.dma_map
, &txr
->txdma
.dma_paddr
);
1880 if (txr
->txdma
.dma_vaddr
== NULL
) {
1881 device_printf(txr
->sc
->dev
,
1882 "Unable to allocate TX Descriptor memory\n");
1885 txr
->tx_base
= txr
->txdma
.dma_vaddr
;
1886 bzero(txr
->tx_base
, tsize
);
1888 tsize
= __VM_CACHELINE_ALIGN(
1889 sizeof(struct igb_tx_buf
) * txr
->num_tx_desc
);
1890 txr
->tx_buf
= kmalloc_cachealign(tsize
, M_DEVBUF
, M_WAITOK
| M_ZERO
);
1893 * Allocate TX head write-back buffer
1895 txr
->tx_hdr
= bus_dmamem_coherent_any(txr
->sc
->parent_tag
,
1896 __VM_CACHELINE_SIZE
, __VM_CACHELINE_SIZE
, BUS_DMA_WAITOK
,
1897 &txr
->tx_hdr_dtag
, &txr
->tx_hdr_dmap
, &txr
->tx_hdr_paddr
);
1898 if (txr
->tx_hdr
== NULL
) {
1899 device_printf(txr
->sc
->dev
,
1900 "Unable to allocate TX head write-back buffer\n");
1905 * Create DMA tag for TX buffers
1907 error
= bus_dma_tag_create(txr
->sc
->parent_tag
,
1908 1, 0, /* alignment, bounds */
1909 BUS_SPACE_MAXADDR
, /* lowaddr */
1910 BUS_SPACE_MAXADDR
, /* highaddr */
1911 NULL
, NULL
, /* filter, filterarg */
1912 IGB_TSO_SIZE
, /* maxsize */
1913 IGB_MAX_SCATTER
, /* nsegments */
1914 PAGE_SIZE
, /* maxsegsize */
1915 BUS_DMA_WAITOK
| BUS_DMA_ALLOCNOW
|
1916 BUS_DMA_ONEBPAGE
, /* flags */
1919 device_printf(txr
->sc
->dev
, "Unable to allocate TX DMA tag\n");
1920 kfree(txr
->tx_buf
, M_DEVBUF
);
1926 * Create DMA maps for TX buffers
1928 for (i
= 0; i
< txr
->num_tx_desc
; ++i
) {
1929 struct igb_tx_buf
*txbuf
= &txr
->tx_buf
[i
];
1931 error
= bus_dmamap_create(txr
->tx_tag
,
1932 BUS_DMA_WAITOK
| BUS_DMA_ONEBPAGE
, &txbuf
->map
);
1934 device_printf(txr
->sc
->dev
,
1935 "Unable to create TX DMA map\n");
1936 igb_destroy_tx_ring(txr
, i
);
1941 if (txr
->sc
->hw
.mac
.type
== e1000_82575
)
1942 txr
->tx_flags
|= IGB_TXFLAG_TSO_IPLEN0
;
1945 * Initialize various watermark
1947 if (txr
->sc
->hw
.mac
.type
== e1000_82575
) {
1949 * There no ways to GC pending TX mbufs in 'header
1950 * write back' mode with reduced # of RS TX descs,
1951 * since TDH does _not_ move for 82575.
1953 txr
->intr_nsegs
= 1;
1955 txr
->intr_nsegs
= txr
->num_tx_desc
/ 16;
1957 txr
->wreg_nsegs
= IGB_DEF_TXWREG_NSEGS
;
1963 igb_free_tx_ring(struct igb_tx_ring
*txr
)
1967 for (i
= 0; i
< txr
->num_tx_desc
; ++i
) {
1968 struct igb_tx_buf
*txbuf
= &txr
->tx_buf
[i
];
1970 if (txbuf
->m_head
!= NULL
)
1971 igb_free_txbuf(txr
, txbuf
);
1976 igb_destroy_tx_ring(struct igb_tx_ring
*txr
, int ndesc
)
1980 if (txr
->txdma
.dma_vaddr
!= NULL
) {
1981 bus_dmamap_unload(txr
->txdma
.dma_tag
, txr
->txdma
.dma_map
);
1982 bus_dmamem_free(txr
->txdma
.dma_tag
, txr
->txdma
.dma_vaddr
,
1983 txr
->txdma
.dma_map
);
1984 bus_dma_tag_destroy(txr
->txdma
.dma_tag
);
1985 txr
->txdma
.dma_vaddr
= NULL
;
1988 if (txr
->tx_hdr
!= NULL
) {
1989 bus_dmamap_unload(txr
->tx_hdr_dtag
, txr
->tx_hdr_dmap
);
1990 bus_dmamem_free(txr
->tx_hdr_dtag
, txr
->tx_hdr
,
1992 bus_dma_tag_destroy(txr
->tx_hdr_dtag
);
1996 if (txr
->tx_buf
== NULL
)
1999 for (i
= 0; i
< ndesc
; ++i
) {
2000 struct igb_tx_buf
*txbuf
= &txr
->tx_buf
[i
];
2002 KKASSERT(txbuf
->m_head
== NULL
);
2003 bus_dmamap_destroy(txr
->tx_tag
, txbuf
->map
);
2005 bus_dma_tag_destroy(txr
->tx_tag
);
2007 kfree(txr
->tx_buf
, M_DEVBUF
);
2012 igb_init_tx_ring(struct igb_tx_ring
*txr
)
2014 /* Clear the old descriptor contents */
2016 sizeof(union e1000_adv_tx_desc
) * txr
->num_tx_desc
);
2018 /* Clear TX head write-back buffer */
2022 txr
->next_avail_desc
= 0;
2023 txr
->next_to_clean
= 0;
2025 txr
->tx_running
= 0;
2028 /* Set number of descriptors available */
2029 txr
->tx_avail
= txr
->num_tx_desc
;
2031 /* Enable this TX ring */
2032 txr
->tx_flags
|= IGB_TXFLAG_ENABLED
;
2036 igb_init_tx_unit(struct igb_softc
*sc
)
2038 struct e1000_hw
*hw
= &sc
->hw
;
2042 /* Setup the Tx Descriptor Rings */
2043 for (i
= 0; i
< sc
->tx_ring_inuse
; ++i
) {
2044 struct igb_tx_ring
*txr
= &sc
->tx_rings
[i
];
2045 uint64_t bus_addr
= txr
->txdma
.dma_paddr
;
2046 uint64_t hdr_paddr
= txr
->tx_hdr_paddr
;
2047 uint32_t txdctl
= 0;
2048 uint32_t dca_txctrl
;
2050 E1000_WRITE_REG(hw
, E1000_TDLEN(i
),
2051 txr
->num_tx_desc
* sizeof(struct e1000_tx_desc
));
2052 E1000_WRITE_REG(hw
, E1000_TDBAH(i
),
2053 (uint32_t)(bus_addr
>> 32));
2054 E1000_WRITE_REG(hw
, E1000_TDBAL(i
),
2055 (uint32_t)bus_addr
);
2057 /* Setup the HW Tx Head and Tail descriptor pointers */
2058 E1000_WRITE_REG(hw
, E1000_TDT(i
), 0);
2059 E1000_WRITE_REG(hw
, E1000_TDH(i
), 0);
2061 dca_txctrl
= E1000_READ_REG(hw
, E1000_DCA_TXCTRL(i
));
2062 dca_txctrl
&= ~E1000_DCA_TXCTRL_TX_WB_RO_EN
;
2063 E1000_WRITE_REG(hw
, E1000_DCA_TXCTRL(i
), dca_txctrl
);
2066 * Don't set WB_on_EITR:
2067 * - 82575 does not have it
2068 * - It almost has no effect on 82576, see:
2069 * 82576 specification update errata #26
2070 * - It causes unnecessary bus traffic
2072 E1000_WRITE_REG(hw
, E1000_TDWBAH(i
),
2073 (uint32_t)(hdr_paddr
>> 32));
2074 E1000_WRITE_REG(hw
, E1000_TDWBAL(i
),
2075 ((uint32_t)hdr_paddr
) | E1000_TX_HEAD_WB_ENABLE
);
2078 * WTHRESH is ignored by the hardware, since header
2079 * write back mode is used.
2081 txdctl
|= IGB_TX_PTHRESH
;
2082 txdctl
|= IGB_TX_HTHRESH
<< 8;
2083 txdctl
|= IGB_TX_WTHRESH
<< 16;
2084 txdctl
|= E1000_TXDCTL_QUEUE_ENABLE
;
2085 E1000_WRITE_REG(hw
, E1000_TXDCTL(i
), txdctl
);
2091 e1000_config_collision_dist(hw
);
2093 /* Program the Transmit Control Register */
2094 tctl
= E1000_READ_REG(hw
, E1000_TCTL
);
2095 tctl
&= ~E1000_TCTL_CT
;
2096 tctl
|= (E1000_TCTL_PSP
| E1000_TCTL_RTLC
| E1000_TCTL_EN
|
2097 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
));
2099 /* This write will effectively turn on the transmit unit. */
2100 E1000_WRITE_REG(hw
, E1000_TCTL
, tctl
);
2104 igb_txcsum_ctx(struct igb_tx_ring
*txr
, struct mbuf
*mp
)
2106 struct e1000_adv_tx_context_desc
*TXD
;
2107 uint32_t vlan_macip_lens
, type_tucmd_mlhl
, mss_l4len_idx
;
2108 int ehdrlen
, ctxd
, ip_hlen
= 0;
2109 boolean_t offload
= TRUE
;
2111 if ((mp
->m_pkthdr
.csum_flags
& IGB_CSUM_FEATURES
) == 0)
2114 vlan_macip_lens
= type_tucmd_mlhl
= mss_l4len_idx
= 0;
2116 ctxd
= txr
->next_avail_desc
;
2117 TXD
= (struct e1000_adv_tx_context_desc
*)&txr
->tx_base
[ctxd
];
2120 * In advanced descriptors the vlan tag must
2121 * be placed into the context descriptor, thus
2122 * we need to be here just for that setup.
2124 if (mp
->m_flags
& M_VLANTAG
) {
2127 vlantag
= htole16(mp
->m_pkthdr
.ether_vlantag
);
2128 vlan_macip_lens
|= (vlantag
<< E1000_ADVTXD_VLAN_SHIFT
);
2129 } else if (!offload
) {
2133 ehdrlen
= mp
->m_pkthdr
.csum_lhlen
;
2134 KASSERT(ehdrlen
> 0, ("invalid ether hlen"));
2136 /* Set the ether header length */
2137 vlan_macip_lens
|= ehdrlen
<< E1000_ADVTXD_MACLEN_SHIFT
;
2138 if (mp
->m_pkthdr
.csum_flags
& CSUM_IP
) {
2139 type_tucmd_mlhl
|= E1000_ADVTXD_TUCMD_IPV4
;
2140 ip_hlen
= mp
->m_pkthdr
.csum_iphlen
;
2141 KASSERT(ip_hlen
> 0, ("invalid ip hlen"));
2143 vlan_macip_lens
|= ip_hlen
;
2145 type_tucmd_mlhl
|= E1000_ADVTXD_DCMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
;
2146 if (mp
->m_pkthdr
.csum_flags
& CSUM_TCP
)
2147 type_tucmd_mlhl
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
2148 else if (mp
->m_pkthdr
.csum_flags
& CSUM_UDP
)
2149 type_tucmd_mlhl
|= E1000_ADVTXD_TUCMD_L4T_UDP
;
2152 * 82575 needs the TX context index added; the queue
2153 * index is used as TX context index here.
2155 if (txr
->sc
->hw
.mac
.type
== e1000_82575
)
2156 mss_l4len_idx
= txr
->me
<< 4;
2158 /* Now copy bits into descriptor */
2159 TXD
->vlan_macip_lens
= htole32(vlan_macip_lens
);
2160 TXD
->type_tucmd_mlhl
= htole32(type_tucmd_mlhl
);
2161 TXD
->seqnum_seed
= htole32(0);
2162 TXD
->mss_l4len_idx
= htole32(mss_l4len_idx
);
2164 /* We've consumed the first desc, adjust counters */
2165 if (++ctxd
== txr
->num_tx_desc
)
2167 txr
->next_avail_desc
= ctxd
;
2174 igb_txeof(struct igb_tx_ring
*txr
, int hdr
)
2178 if (txr
->tx_avail
== txr
->num_tx_desc
)
2181 first
= txr
->next_to_clean
;
2185 avail
= txr
->tx_avail
;
2186 while (first
!= hdr
) {
2187 struct igb_tx_buf
*txbuf
= &txr
->tx_buf
[first
];
2189 KKASSERT(avail
< txr
->num_tx_desc
);
2193 igb_free_txbuf(txr
, txbuf
);
2195 if (++first
== txr
->num_tx_desc
)
2198 txr
->next_to_clean
= first
;
2199 txr
->tx_avail
= avail
;
2202 * If we have a minimum free, clear OACTIVE
2203 * to tell the stack that it is OK to send packets.
2205 if (txr
->tx_avail
> IGB_MAX_SCATTER
+ IGB_TX_RESERVED
) {
2206 ifsq_clr_oactive(txr
->ifsq
);
2209 * We have enough TX descriptors, turn off
2210 * the watchdog. We allow small amount of
2211 * packets (roughly intr_nsegs) pending on
2212 * the transmit ring.
2214 txr
->tx_watchdog
.wd_timer
= 0;
2216 txr
->tx_running
= IGB_TX_RUNNING
;
2220 igb_txgc(struct igb_tx_ring
*txr
)
2227 if (txr
->tx_avail
== txr
->num_tx_desc
)
2230 hdr
= E1000_READ_REG(&txr
->sc
->hw
, E1000_TDH(txr
->me
)),
2231 first
= txr
->next_to_clean
;
2237 avail
= txr
->tx_avail
;
2239 while (first
!= hdr
) {
2240 struct igb_tx_buf
*txbuf
= &txr
->tx_buf
[first
];
2243 KKASSERT(avail
< txr
->num_tx_desc
);
2247 igb_free_txbuf(txr
, txbuf
);
2249 if (++first
== txr
->num_tx_desc
)
2254 txr
->tx_running
= IGB_TX_RUNNING
;
2258 igb_create_rx_ring(struct igb_rx_ring
*rxr
)
2260 int rsize
, i
, error
, nrxd
;
2263 * Validate number of receive descriptors. It must not exceed
2264 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
2266 nrxd
= device_getenv_int(rxr
->sc
->dev
, "rxd", igb_rxd
);
2267 if ((nrxd
* sizeof(struct e1000_rx_desc
)) % IGB_DBA_ALIGN
!= 0 ||
2268 nrxd
> IGB_MAX_RXD
|| nrxd
< IGB_MIN_RXD
) {
2269 device_printf(rxr
->sc
->dev
,
2270 "Using %d RX descriptors instead of %d!\n",
2271 IGB_DEFAULT_RXD
, nrxd
);
2272 rxr
->num_rx_desc
= IGB_DEFAULT_RXD
;
2274 rxr
->num_rx_desc
= nrxd
;
2278 * Allocate RX descriptor ring
2280 rsize
= roundup2(rxr
->num_rx_desc
* sizeof(union e1000_adv_rx_desc
),
2282 rxr
->rxdma
.dma_vaddr
= bus_dmamem_coherent_any(rxr
->sc
->parent_tag
,
2283 IGB_DBA_ALIGN
, rsize
, BUS_DMA_WAITOK
,
2284 &rxr
->rxdma
.dma_tag
, &rxr
->rxdma
.dma_map
,
2285 &rxr
->rxdma
.dma_paddr
);
2286 if (rxr
->rxdma
.dma_vaddr
== NULL
) {
2287 device_printf(rxr
->sc
->dev
,
2288 "Unable to allocate RxDescriptor memory\n");
2291 rxr
->rx_base
= rxr
->rxdma
.dma_vaddr
;
2292 bzero(rxr
->rx_base
, rsize
);
2294 rsize
= __VM_CACHELINE_ALIGN(
2295 sizeof(struct igb_rx_buf
) * rxr
->num_rx_desc
);
2296 rxr
->rx_buf
= kmalloc_cachealign(rsize
, M_DEVBUF
, M_WAITOK
| M_ZERO
);
2299 * Create DMA tag for RX buffers
2301 error
= bus_dma_tag_create(rxr
->sc
->parent_tag
,
2302 1, 0, /* alignment, bounds */
2303 BUS_SPACE_MAXADDR
, /* lowaddr */
2304 BUS_SPACE_MAXADDR
, /* highaddr */
2305 NULL
, NULL
, /* filter, filterarg */
2306 MCLBYTES
, /* maxsize */
2308 MCLBYTES
, /* maxsegsize */
2309 BUS_DMA_WAITOK
| BUS_DMA_ALLOCNOW
, /* flags */
2312 device_printf(rxr
->sc
->dev
,
2313 "Unable to create RX payload DMA tag\n");
2314 kfree(rxr
->rx_buf
, M_DEVBUF
);
2320 * Create spare DMA map for RX buffers
2322 error
= bus_dmamap_create(rxr
->rx_tag
, BUS_DMA_WAITOK
,
2325 device_printf(rxr
->sc
->dev
,
2326 "Unable to create spare RX DMA maps\n");
2327 bus_dma_tag_destroy(rxr
->rx_tag
);
2328 kfree(rxr
->rx_buf
, M_DEVBUF
);
2334 * Create DMA maps for RX buffers
2336 for (i
= 0; i
< rxr
->num_rx_desc
; i
++) {
2337 struct igb_rx_buf
*rxbuf
= &rxr
->rx_buf
[i
];
2339 error
= bus_dmamap_create(rxr
->rx_tag
,
2340 BUS_DMA_WAITOK
, &rxbuf
->map
);
2342 device_printf(rxr
->sc
->dev
,
2343 "Unable to create RX DMA maps\n");
2344 igb_destroy_rx_ring(rxr
, i
);
2350 * Initialize various watermark
2352 rxr
->wreg_nsegs
= IGB_DEF_RXWREG_NSEGS
;
2358 igb_free_rx_ring(struct igb_rx_ring
*rxr
)
2362 for (i
= 0; i
< rxr
->num_rx_desc
; ++i
) {
2363 struct igb_rx_buf
*rxbuf
= &rxr
->rx_buf
[i
];
2365 if (rxbuf
->m_head
!= NULL
) {
2366 bus_dmamap_unload(rxr
->rx_tag
, rxbuf
->map
);
2367 m_freem(rxbuf
->m_head
);
2368 rxbuf
->m_head
= NULL
;
2372 if (rxr
->fmp
!= NULL
)
2379 igb_destroy_rx_ring(struct igb_rx_ring
*rxr
, int ndesc
)
2383 if (rxr
->rxdma
.dma_vaddr
!= NULL
) {
2384 bus_dmamap_unload(rxr
->rxdma
.dma_tag
, rxr
->rxdma
.dma_map
);
2385 bus_dmamem_free(rxr
->rxdma
.dma_tag
, rxr
->rxdma
.dma_vaddr
,
2386 rxr
->rxdma
.dma_map
);
2387 bus_dma_tag_destroy(rxr
->rxdma
.dma_tag
);
2388 rxr
->rxdma
.dma_vaddr
= NULL
;
2391 if (rxr
->rx_buf
== NULL
)
2394 for (i
= 0; i
< ndesc
; ++i
) {
2395 struct igb_rx_buf
*rxbuf
= &rxr
->rx_buf
[i
];
2397 KKASSERT(rxbuf
->m_head
== NULL
);
2398 bus_dmamap_destroy(rxr
->rx_tag
, rxbuf
->map
);
2400 bus_dmamap_destroy(rxr
->rx_tag
, rxr
->rx_sparemap
);
2401 bus_dma_tag_destroy(rxr
->rx_tag
);
2403 kfree(rxr
->rx_buf
, M_DEVBUF
);
2408 igb_setup_rxdesc(union e1000_adv_rx_desc
*rxd
, const struct igb_rx_buf
*rxbuf
)
2410 rxd
->read
.pkt_addr
= htole64(rxbuf
->paddr
);
2411 rxd
->wb
.upper
.status_error
= 0;
2415 igb_newbuf(struct igb_rx_ring
*rxr
, int i
, boolean_t wait
)
2418 bus_dma_segment_t seg
;
2420 struct igb_rx_buf
*rxbuf
;
2423 m
= m_getcl(wait
? M_WAITOK
: M_NOWAIT
, MT_DATA
, M_PKTHDR
);
2426 if_printf(&rxr
->sc
->arpcom
.ac_if
,
2427 "Unable to allocate RX mbuf\n");
2431 m
->m_len
= m
->m_pkthdr
.len
= MCLBYTES
;
2433 if (rxr
->sc
->max_frame_size
<= MCLBYTES
- ETHER_ALIGN
)
2434 m_adj(m
, ETHER_ALIGN
);
2436 error
= bus_dmamap_load_mbuf_segment(rxr
->rx_tag
,
2437 rxr
->rx_sparemap
, m
, &seg
, 1, &nseg
, BUS_DMA_NOWAIT
);
2441 if_printf(&rxr
->sc
->arpcom
.ac_if
,
2442 "Unable to load RX mbuf\n");
2447 rxbuf
= &rxr
->rx_buf
[i
];
2448 if (rxbuf
->m_head
!= NULL
)
2449 bus_dmamap_unload(rxr
->rx_tag
, rxbuf
->map
);
2452 rxbuf
->map
= rxr
->rx_sparemap
;
2453 rxr
->rx_sparemap
= map
;
2456 rxbuf
->paddr
= seg
.ds_addr
;
2458 igb_setup_rxdesc(&rxr
->rx_base
[i
], rxbuf
);
2463 igb_init_rx_ring(struct igb_rx_ring
*rxr
)
2467 /* Clear the ring contents */
2469 rxr
->num_rx_desc
* sizeof(union e1000_adv_rx_desc
));
2471 /* Now replenish the ring mbufs */
2472 for (i
= 0; i
< rxr
->num_rx_desc
; ++i
) {
2475 error
= igb_newbuf(rxr
, i
, TRUE
);
2480 /* Setup our descriptor indices */
2481 rxr
->next_to_check
= 0;
2485 rxr
->discard
= FALSE
;
2491 igb_init_rx_unit(struct igb_softc
*sc
, boolean_t polling
)
2493 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2494 struct e1000_hw
*hw
= &sc
->hw
;
2495 uint32_t rctl
, rxcsum
, srrctl
= 0;
2499 * Make sure receives are disabled while setting
2500 * up the descriptor ring
2502 rctl
= E1000_READ_REG(hw
, E1000_RCTL
);
2503 E1000_WRITE_REG(hw
, E1000_RCTL
, rctl
& ~E1000_RCTL_EN
);
2507 ** Set up for header split
2509 if (igb_header_split
) {
2510 /* Use a standard mbuf for the header */
2511 srrctl
|= IGB_HDR_BUF
<< E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
2512 srrctl
|= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
2515 srrctl
|= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
;
2518 ** Set up for jumbo frames
2520 if (ifp
->if_mtu
> ETHERMTU
) {
2521 rctl
|= E1000_RCTL_LPE
;
2523 if (adapter
->rx_mbuf_sz
== MJUMPAGESIZE
) {
2524 srrctl
|= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT
;
2525 rctl
|= E1000_RCTL_SZ_4096
| E1000_RCTL_BSEX
;
2526 } else if (adapter
->rx_mbuf_sz
> MJUMPAGESIZE
) {
2527 srrctl
|= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT
;
2528 rctl
|= E1000_RCTL_SZ_8192
| E1000_RCTL_BSEX
;
2530 /* Set maximum packet len */
2531 psize
= adapter
->max_frame_size
;
2532 /* are we on a vlan? */
2533 if (adapter
->ifp
->if_vlantrunk
!= NULL
)
2534 psize
+= VLAN_TAG_SIZE
;
2535 E1000_WRITE_REG(&adapter
->hw
, E1000_RLPML
, psize
);
2537 srrctl
|= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT
;
2538 rctl
|= E1000_RCTL_SZ_2048
;
2541 rctl
&= ~E1000_RCTL_LPE
;
2542 srrctl
|= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT
;
2543 rctl
|= E1000_RCTL_SZ_2048
;
2547 * If TX flow control is disabled and more the 1 RX rings
2548 * are enabled, enable DROP.
2550 * This drops frames rather than hanging the RX MAC for all
2553 if (sc
->rx_ring_inuse
> 1 &&
2554 (sc
->ifm_flowctrl
& IFM_ETH_TXPAUSE
) == 0) {
2555 srrctl
|= E1000_SRRCTL_DROP_EN
;
2557 if_printf(ifp
, "enable RX drop\n");
2560 /* Setup the Base and Length of the Rx Descriptor Rings */
2561 for (i
= 0; i
< sc
->rx_ring_inuse
; ++i
) {
2562 struct igb_rx_ring
*rxr
= &sc
->rx_rings
[i
];
2563 uint64_t bus_addr
= rxr
->rxdma
.dma_paddr
;
2566 E1000_WRITE_REG(hw
, E1000_RDLEN(i
),
2567 rxr
->num_rx_desc
* sizeof(struct e1000_rx_desc
));
2568 E1000_WRITE_REG(hw
, E1000_RDBAH(i
),
2569 (uint32_t)(bus_addr
>> 32));
2570 E1000_WRITE_REG(hw
, E1000_RDBAL(i
),
2571 (uint32_t)bus_addr
);
2572 E1000_WRITE_REG(hw
, E1000_SRRCTL(i
), srrctl
);
2573 /* Enable this Queue */
2574 rxdctl
= E1000_READ_REG(hw
, E1000_RXDCTL(i
));
2575 rxdctl
|= E1000_RXDCTL_QUEUE_ENABLE
;
2576 rxdctl
&= 0xFFF00000;
2577 rxdctl
|= IGB_RX_PTHRESH
;
2578 rxdctl
|= IGB_RX_HTHRESH
<< 8;
2580 * Don't set WTHRESH to a value above 1 on 82576, see:
2581 * 82576 specification update errata #26
2583 rxdctl
|= IGB_RX_WTHRESH
<< 16;
2584 E1000_WRITE_REG(hw
, E1000_RXDCTL(i
), rxdctl
);
2587 rxcsum
= E1000_READ_REG(&sc
->hw
, E1000_RXCSUM
);
2588 rxcsum
&= ~(E1000_RXCSUM_PCSS_MASK
| E1000_RXCSUM_IPPCSE
);
2591 * Receive Checksum Offload for TCP and UDP
2593 * Checksum offloading is also enabled if multiple receive
2594 * queue is to be supported, since we need it to figure out
2597 if ((ifp
->if_capenable
& IFCAP_RXCSUM
) || IGB_ENABLE_HWRSS(sc
)) {
2600 * PCSD must be enabled to enable multiple
2603 rxcsum
|= E1000_RXCSUM_IPOFL
| E1000_RXCSUM_TUOFL
|
2606 rxcsum
&= ~(E1000_RXCSUM_IPOFL
| E1000_RXCSUM_TUOFL
|
2609 E1000_WRITE_REG(&sc
->hw
, E1000_RXCSUM
, rxcsum
);
2611 if (sc
->rx_ring_inuse
> 1) {
2612 uint8_t key
[IGB_NRSSRK
* IGB_RSSRK_SIZE
];
2613 const struct if_ringmap
*rm
;
2614 uint32_t reta_shift
;
2619 * When we reach here, RSS has already been disabled
2620 * in igb_stop(), so we could safely configure RSS key
2621 * and redirect table.
2627 toeplitz_get_key(key
, sizeof(key
));
2628 for (i
= 0; i
< IGB_NRSSRK
; ++i
) {
2631 rssrk
= IGB_RSSRK_VAL(key
, i
);
2632 IGB_RSS_DPRINTF(sc
, 1, "rssrk%d 0x%08x\n", i
, rssrk
);
2634 E1000_WRITE_REG(hw
, E1000_RSSRK(i
), rssrk
);
2638 * Configure RSS redirect table
2643 rm
= sc
->rx_rmap_intr
;
2644 if_ringmap_rdrtable(rm
, sc
->rdr_table
, IGB_RDRTABLE_SIZE
);
2646 reta_shift
= IGB_RETA_SHIFT
;
2647 if (hw
->mac
.type
== e1000_82575
)
2648 reta_shift
= IGB_RETA_SHIFT_82575
;
2651 for (j
= 0; j
< IGB_NRETA
; ++j
) {
2654 for (i
= 0; i
< IGB_RETA_SIZE
; ++i
) {
2657 q
= sc
->rdr_table
[r
] << reta_shift
;
2658 reta
|= q
<< (8 * i
);
2661 IGB_RSS_DPRINTF(sc
, 1, "reta 0x%08x\n", reta
);
2662 E1000_WRITE_REG(hw
, E1000_RETA(j
), reta
);
2666 * Enable multiple receive queues.
2667 * Enable IPv4 RSS standard hash functions.
2668 * Disable RSS interrupt on 82575
2670 E1000_WRITE_REG(&sc
->hw
, E1000_MRQC
,
2671 E1000_MRQC_ENABLE_RSS_4Q
|
2672 E1000_MRQC_RSS_FIELD_IPV4_TCP
|
2673 E1000_MRQC_RSS_FIELD_IPV4
);
2676 /* Setup the Receive Control Register */
2677 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
2678 rctl
|= E1000_RCTL_EN
| E1000_RCTL_BAM
| E1000_RCTL_LBM_NO
|
2679 E1000_RCTL_RDMTS_HALF
|
2680 (hw
->mac
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
2681 /* Strip CRC bytes. */
2682 rctl
|= E1000_RCTL_SECRC
;
2683 /* Make sure VLAN Filters are off */
2684 rctl
&= ~E1000_RCTL_VFE
;
2685 /* Don't store bad packets */
2686 rctl
&= ~E1000_RCTL_SBP
;
2688 /* Enable Receives */
2689 E1000_WRITE_REG(hw
, E1000_RCTL
, rctl
);
2692 * Setup the HW Rx Head and Tail Descriptor Pointers
2693 * - needs to be after enable
2695 for (i
= 0; i
< sc
->rx_ring_inuse
; ++i
) {
2696 struct igb_rx_ring
*rxr
= &sc
->rx_rings
[i
];
2698 E1000_WRITE_REG(hw
, E1000_RDH(i
), rxr
->next_to_check
);
2699 E1000_WRITE_REG(hw
, E1000_RDT(i
), rxr
->num_rx_desc
- 1);
2704 igb_rx_refresh(struct igb_rx_ring
*rxr
, int i
)
2707 i
= rxr
->num_rx_desc
- 1;
2708 E1000_WRITE_REG(&rxr
->sc
->hw
, E1000_RDT(rxr
->me
), i
);
2712 igb_rxeof(struct igb_rx_ring
*rxr
, int count
)
2714 struct ifnet
*ifp
= &rxr
->sc
->arpcom
.ac_if
;
2715 union e1000_adv_rx_desc
*cur
;
2717 int i
, ncoll
= 0, cpuid
= mycpuid
;
2719 i
= rxr
->next_to_check
;
2720 cur
= &rxr
->rx_base
[i
];
2721 staterr
= le32toh(cur
->wb
.upper
.status_error
);
2723 if ((staterr
& E1000_RXD_STAT_DD
) == 0)
2726 while ((staterr
& E1000_RXD_STAT_DD
) && count
!= 0) {
2727 struct pktinfo
*pi
= NULL
, pi0
;
2728 struct igb_rx_buf
*rxbuf
= &rxr
->rx_buf
[i
];
2729 struct mbuf
*m
= NULL
;
2732 eop
= (staterr
& E1000_RXD_STAT_EOP
) ? TRUE
: FALSE
;
2737 if ((staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) == 0 &&
2739 struct mbuf
*mp
= rxbuf
->m_head
;
2740 uint32_t hash
, hashtype
;
2744 len
= le16toh(cur
->wb
.upper
.length
);
2745 if ((rxr
->sc
->hw
.mac
.type
== e1000_i350
||
2746 rxr
->sc
->hw
.mac
.type
== e1000_i354
) &&
2747 (staterr
& E1000_RXDEXT_STATERR_LB
))
2748 vlan
= be16toh(cur
->wb
.upper
.vlan
);
2750 vlan
= le16toh(cur
->wb
.upper
.vlan
);
2752 hash
= le32toh(cur
->wb
.lower
.hi_dword
.rss
);
2753 hashtype
= le32toh(cur
->wb
.lower
.lo_dword
.data
) &
2754 E1000_RXDADV_RSSTYPE_MASK
;
2756 IGB_RSS_DPRINTF(rxr
->sc
, 10,
2757 "ring%d, hash 0x%08x, hashtype %u\n",
2758 rxr
->me
, hash
, hashtype
);
2760 bus_dmamap_sync(rxr
->rx_tag
, rxbuf
->map
,
2761 BUS_DMASYNC_POSTREAD
);
2763 if (igb_newbuf(rxr
, i
, FALSE
) != 0) {
2764 IFNET_STAT_INC(ifp
, iqdrops
, 1);
2769 if (rxr
->fmp
== NULL
) {
2770 mp
->m_pkthdr
.len
= len
;
2774 rxr
->lmp
->m_next
= mp
;
2775 rxr
->lmp
= rxr
->lmp
->m_next
;
2776 rxr
->fmp
->m_pkthdr
.len
+= len
;
2784 m
->m_pkthdr
.rcvif
= ifp
;
2785 IFNET_STAT_INC(ifp
, ipackets
, 1);
2787 if (ifp
->if_capenable
& IFCAP_RXCSUM
)
2788 igb_rxcsum(staterr
, m
);
2790 if (staterr
& E1000_RXD_STAT_VP
) {
2791 m
->m_pkthdr
.ether_vlantag
= vlan
;
2792 m
->m_flags
|= M_VLANTAG
;
2795 if (ifp
->if_capenable
& IFCAP_RSS
) {
2796 pi
= igb_rssinfo(m
, &pi0
,
2797 hash
, hashtype
, staterr
);
2799 #ifdef IGB_RSS_DEBUG
2804 IFNET_STAT_INC(ifp
, ierrors
, 1);
2806 igb_setup_rxdesc(cur
, rxbuf
);
2808 rxr
->discard
= TRUE
;
2810 rxr
->discard
= FALSE
;
2811 if (rxr
->fmp
!= NULL
) {
2820 ifp
->if_input(ifp
, m
, pi
, cpuid
);
2822 /* Advance our pointers to the next descriptor. */
2823 if (++i
== rxr
->num_rx_desc
)
2826 if (ncoll
>= rxr
->wreg_nsegs
) {
2827 igb_rx_refresh(rxr
, i
);
2831 cur
= &rxr
->rx_base
[i
];
2832 staterr
= le32toh(cur
->wb
.upper
.status_error
);
2834 rxr
->next_to_check
= i
;
2837 igb_rx_refresh(rxr
, i
);
2842 igb_set_vlan(struct igb_softc
*sc
)
2844 struct e1000_hw
*hw
= &sc
->hw
;
2847 struct ifnet
*ifp
= sc
->arpcom
.ac_if
;
2851 e1000_rlpml_set_vf(hw
, sc
->max_frame_size
+ VLAN_TAG_SIZE
);
2855 reg
= E1000_READ_REG(hw
, E1000_CTRL
);
2856 reg
|= E1000_CTRL_VME
;
2857 E1000_WRITE_REG(hw
, E1000_CTRL
, reg
);
2860 /* Enable the Filter Table */
2861 if (ifp
->if_capenable
& IFCAP_VLAN_HWFILTER
) {
2862 reg
= E1000_READ_REG(hw
, E1000_RCTL
);
2863 reg
&= ~E1000_RCTL_CFIEN
;
2864 reg
|= E1000_RCTL_VFE
;
2865 E1000_WRITE_REG(hw
, E1000_RCTL
, reg
);
2869 /* Update the frame size */
2870 E1000_WRITE_REG(&sc
->hw
, E1000_RLPML
,
2871 sc
->max_frame_size
+ VLAN_TAG_SIZE
);
2874 /* Don't bother with table if no vlans */
2875 if ((adapter
->num_vlans
== 0) ||
2876 ((ifp
->if_capenable
& IFCAP_VLAN_HWFILTER
) == 0))
2879 ** A soft reset zero's out the VFTA, so
2880 ** we need to repopulate it now.
2882 for (int i
= 0; i
< IGB_VFTA_SIZE
; i
++)
2883 if (adapter
->shadow_vfta
[i
] != 0) {
2884 if (adapter
->vf_ifp
)
2885 e1000_vfta_set_vf(hw
,
2886 adapter
->shadow_vfta
[i
], TRUE
);
2888 E1000_WRITE_REG_ARRAY(hw
, E1000_VFTA
,
2889 i
, adapter
->shadow_vfta
[i
]);
2895 igb_enable_intr(struct igb_softc
*sc
)
2899 for (i
= 0; i
< sc
->intr_cnt
; ++i
)
2900 lwkt_serialize_handler_enable(sc
->intr_data
[i
].intr_serialize
);
2902 if ((sc
->flags
& IGB_FLAG_SHARED_INTR
) == 0) {
2903 if (sc
->intr_type
== PCI_INTR_TYPE_MSIX
)
2904 E1000_WRITE_REG(&sc
->hw
, E1000_EIAC
, sc
->intr_mask
);
2906 E1000_WRITE_REG(&sc
->hw
, E1000_EIAC
, 0);
2907 E1000_WRITE_REG(&sc
->hw
, E1000_EIAM
, sc
->intr_mask
);
2908 E1000_WRITE_REG(&sc
->hw
, E1000_EIMS
, sc
->intr_mask
);
2909 E1000_WRITE_REG(&sc
->hw
, E1000_IMS
, E1000_IMS_LSC
);
2911 E1000_WRITE_REG(&sc
->hw
, E1000_IMS
, IMS_ENABLE_MASK
);
2913 E1000_WRITE_FLUSH(&sc
->hw
);
2917 igb_disable_intr(struct igb_softc
*sc
)
2921 if ((sc
->flags
& IGB_FLAG_SHARED_INTR
) == 0) {
2922 E1000_WRITE_REG(&sc
->hw
, E1000_EIMC
, 0xffffffff);
2923 E1000_WRITE_REG(&sc
->hw
, E1000_EIAC
, 0);
2925 E1000_WRITE_REG(&sc
->hw
, E1000_IMC
, 0xffffffff);
2926 E1000_WRITE_FLUSH(&sc
->hw
);
2928 for (i
= 0; i
< sc
->intr_cnt
; ++i
)
2929 lwkt_serialize_handler_disable(sc
->intr_data
[i
].intr_serialize
);
2933 * Bit of a misnomer, what this really means is
2934 * to enable OS management of the system... aka
2935 * to disable special hardware management features
2938 igb_get_mgmt(struct igb_softc
*sc
)
2940 if (sc
->flags
& IGB_FLAG_HAS_MGMT
) {
2941 int manc2h
= E1000_READ_REG(&sc
->hw
, E1000_MANC2H
);
2942 int manc
= E1000_READ_REG(&sc
->hw
, E1000_MANC
);
2944 /* disable hardware interception of ARP */
2945 manc
&= ~E1000_MANC_ARP_EN
;
2947 /* enable receiving management packets to the host */
2948 manc
|= E1000_MANC_EN_MNG2HOST
;
2949 manc2h
|= 1 << 5; /* Mng Port 623 */
2950 manc2h
|= 1 << 6; /* Mng Port 664 */
2951 E1000_WRITE_REG(&sc
->hw
, E1000_MANC2H
, manc2h
);
2952 E1000_WRITE_REG(&sc
->hw
, E1000_MANC
, manc
);
2957 * Give control back to hardware management controller
2961 igb_rel_mgmt(struct igb_softc
*sc
)
2963 if (sc
->flags
& IGB_FLAG_HAS_MGMT
) {
2964 int manc
= E1000_READ_REG(&sc
->hw
, E1000_MANC
);
2966 /* Re-enable hardware interception of ARP */
2967 manc
|= E1000_MANC_ARP_EN
;
2968 manc
&= ~E1000_MANC_EN_MNG2HOST
;
2970 E1000_WRITE_REG(&sc
->hw
, E1000_MANC
, manc
);
2975 * Sets CTRL_EXT:DRV_LOAD bit.
2977 * For ASF and Pass Through versions of f/w this means that
2978 * the driver is loaded.
2981 igb_get_hw_control(struct igb_softc
*sc
)
2988 /* Let firmware know the driver has taken over */
2989 ctrl_ext
= E1000_READ_REG(&sc
->hw
, E1000_CTRL_EXT
);
2990 E1000_WRITE_REG(&sc
->hw
, E1000_CTRL_EXT
,
2991 ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
2995 * Resets CTRL_EXT:DRV_LOAD bit.
2997 * For ASF and Pass Through versions of f/w this means that the
2998 * driver is no longer loaded.
3001 igb_rel_hw_control(struct igb_softc
*sc
)
3008 /* Let firmware taken over control of h/w */
3009 ctrl_ext
= E1000_READ_REG(&sc
->hw
, E1000_CTRL_EXT
);
3010 E1000_WRITE_REG(&sc
->hw
, E1000_CTRL_EXT
,
3011 ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
3015 igb_is_valid_ether_addr(const uint8_t *addr
)
3017 uint8_t zero_addr
[ETHER_ADDR_LEN
] = { 0, 0, 0, 0, 0, 0 };
3019 if ((addr
[0] & 1) || !bcmp(addr
, zero_addr
, ETHER_ADDR_LEN
))
3025 * Enable PCI Wake On Lan capability
3028 igb_enable_wol(struct igb_softc
*sc
)
3030 device_t dev
= sc
->dev
;
3035 if (pci_find_extcap(dev
, PCIY_PMG
, &pmc
) != 0) {
3036 device_printf(dev
, "no PMG\n");
3041 * Set the type of wakeup.
3043 sc
->wol
&= ~(E1000_WUFC_EX
| E1000_WUFC_MC
);
3044 if ((sc
->wol
& (E1000_WUFC_EX
| E1000_WUFC_MAG
| E1000_WUFC_MC
)) == 0)
3048 * Advertise the wakeup capabilities.
3050 ctrl
= E1000_READ_REG(&sc
->hw
, E1000_CTRL
);
3051 ctrl
|= (E1000_CTRL_SWDPIN2
| E1000_CTRL_SWDPIN3
);
3052 E1000_WRITE_REG(&sc
->hw
, E1000_CTRL
, ctrl
);
3055 * Keep the laser running on Fiber adapters.
3057 if (sc
->hw
.phy
.media_type
== e1000_media_type_fiber
||
3058 sc
->hw
.phy
.media_type
== e1000_media_type_internal_serdes
) {
3061 ctrl_ext
= E1000_READ_REG(&sc
->hw
, E1000_CTRL_EXT
);
3062 ctrl_ext
|= E1000_CTRL_EXT_SDP3_DATA
;
3063 E1000_WRITE_REG(&sc
->hw
, E1000_CTRL_EXT
, ctrl_ext
);
3066 error
= igb_enable_phy_wol(sc
);
3070 /* XXX will this happen? ich/pch specific. */
3071 if (sc
->hw
.phy
.type
== e1000_phy_igp_3
)
3072 e1000_igp3_phy_powerdown_workaround_ich8lan(&sc
->hw
);
3075 status
= pci_read_config(dev
, pmc
+ PCIR_POWER_STATUS
, 2);
3076 status
&= ~(PCIM_PSTAT_PME
| PCIM_PSTAT_PMEENABLE
);
3078 status
|= PCIM_PSTAT_PME
| PCIM_PSTAT_PMEENABLE
;
3079 pci_write_config(dev
, pmc
+ PCIR_POWER_STATUS
, status
, 2);
3083 * WOL in the newer chipset interfaces (pchlan)
3084 * require thing to be copied into the phy
3087 igb_enable_phy_wol(struct igb_softc
*sc
)
3089 struct e1000_hw
*hw
= &sc
->hw
;
3094 /* Copy MAC RARs to PHY RARs */
3095 e1000_copy_rx_addrs_to_phy_ich8lan(hw
);
3097 /* Copy MAC MTA to PHY MTA */
3098 for (i
= 0; i
< hw
->mac
.mta_reg_count
; i
++) {
3099 mreg
= E1000_READ_REG_ARRAY(hw
, E1000_MTA
, i
);
3100 e1000_write_phy_reg(hw
, BM_MTA(i
), (uint16_t)(mreg
& 0xFFFF));
3101 e1000_write_phy_reg(hw
, BM_MTA(i
) + 1,
3102 (uint16_t)((mreg
>> 16) & 0xFFFF));
3105 /* Configure PHY Rx Control register */
3106 e1000_read_phy_reg(hw
, BM_RCTL
, &preg
);
3107 mreg
= E1000_READ_REG(hw
, E1000_RCTL
);
3108 if (mreg
& E1000_RCTL_UPE
)
3109 preg
|= BM_RCTL_UPE
;
3110 if (mreg
& E1000_RCTL_MPE
)
3111 preg
|= BM_RCTL_MPE
;
3112 preg
&= ~(BM_RCTL_MO_MASK
);
3113 if (mreg
& E1000_RCTL_MO_3
) {
3114 preg
|= (((mreg
& E1000_RCTL_MO_3
) >> E1000_RCTL_MO_SHIFT
)
3115 << BM_RCTL_MO_SHIFT
);
3117 if (mreg
& E1000_RCTL_BAM
)
3118 preg
|= BM_RCTL_BAM
;
3119 if (mreg
& E1000_RCTL_PMCF
)
3120 preg
|= BM_RCTL_PMCF
;
3121 mreg
= E1000_READ_REG(hw
, E1000_CTRL
);
3122 if (mreg
& E1000_CTRL_RFCE
)
3123 preg
|= BM_RCTL_RFCE
;
3124 e1000_write_phy_reg(&sc
->hw
, BM_RCTL
, preg
);
3126 /* Enable PHY wakeup in MAC register. */
3127 E1000_WRITE_REG(hw
, E1000_WUC
,
3128 E1000_WUC_PHY_WAKE
| E1000_WUC_PME_EN
| E1000_WUC_APME
);
3129 E1000_WRITE_REG(hw
, E1000_WUFC
, sc
->wol
);
3131 /* Configure and enable PHY wakeup in PHY registers */
3132 e1000_write_phy_reg(hw
, BM_WUFC
, sc
->wol
);
3133 e1000_write_phy_reg(hw
, BM_WUC
, E1000_WUC_PME_EN
);
3134 /* Activate PHY wakeup */
3135 ret
= hw
->phy
.ops
.acquire(hw
);
3137 if_printf(&sc
->arpcom
.ac_if
, "Could not acquire PHY\n");
3140 e1000_write_phy_reg_mdic(hw
, IGP01E1000_PHY_PAGE_SELECT
,
3141 (BM_WUC_ENABLE_PAGE
<< IGP_PAGE_SHIFT
));
3142 ret
= e1000_read_phy_reg_mdic(hw
, BM_WUC_ENABLE_REG
, &preg
);
3144 if_printf(&sc
->arpcom
.ac_if
, "Could not read PHY page 769\n");
3147 preg
|= BM_WUC_ENABLE_BIT
| BM_WUC_HOST_WU_BIT
;
3148 ret
= e1000_write_phy_reg_mdic(hw
, BM_WUC_ENABLE_REG
, preg
);
3150 if_printf(&sc
->arpcom
.ac_if
,
3151 "Could not set PHY Host Wakeup bit\n");
3154 hw
->phy
.ops
.release(hw
);
3159 igb_update_stats_counters(struct igb_softc
*sc
)
3161 struct e1000_hw
*hw
= &sc
->hw
;
3162 struct e1000_hw_stats
*stats
;
3163 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3166 * The virtual function adapter has only a
3167 * small controlled set of stats, do only
3171 igb_update_vf_stats_counters(sc
);
3176 if (sc
->hw
.phy
.media_type
== e1000_media_type_copper
||
3177 (E1000_READ_REG(hw
, E1000_STATUS
) & E1000_STATUS_LU
)) {
3179 E1000_READ_REG(hw
,E1000_SYMERRS
);
3180 stats
->sec
+= E1000_READ_REG(hw
, E1000_SEC
);
3183 stats
->crcerrs
+= E1000_READ_REG(hw
, E1000_CRCERRS
);
3184 stats
->mpc
+= E1000_READ_REG(hw
, E1000_MPC
);
3185 stats
->scc
+= E1000_READ_REG(hw
, E1000_SCC
);
3186 stats
->ecol
+= E1000_READ_REG(hw
, E1000_ECOL
);
3188 stats
->mcc
+= E1000_READ_REG(hw
, E1000_MCC
);
3189 stats
->latecol
+= E1000_READ_REG(hw
, E1000_LATECOL
);
3190 stats
->colc
+= E1000_READ_REG(hw
, E1000_COLC
);
3191 stats
->dc
+= E1000_READ_REG(hw
, E1000_DC
);
3192 stats
->rlec
+= E1000_READ_REG(hw
, E1000_RLEC
);
3193 stats
->xonrxc
+= E1000_READ_REG(hw
, E1000_XONRXC
);
3194 stats
->xontxc
+= E1000_READ_REG(hw
, E1000_XONTXC
);
3197 * For watchdog management we need to know if we have been
3198 * paused during the last interval, so capture that here.
3200 sc
->pause_frames
= E1000_READ_REG(hw
, E1000_XOFFRXC
);
3201 stats
->xoffrxc
+= sc
->pause_frames
;
3202 stats
->xofftxc
+= E1000_READ_REG(hw
, E1000_XOFFTXC
);
3203 stats
->fcruc
+= E1000_READ_REG(hw
, E1000_FCRUC
);
3204 stats
->prc64
+= E1000_READ_REG(hw
, E1000_PRC64
);
3205 stats
->prc127
+= E1000_READ_REG(hw
, E1000_PRC127
);
3206 stats
->prc255
+= E1000_READ_REG(hw
, E1000_PRC255
);
3207 stats
->prc511
+= E1000_READ_REG(hw
, E1000_PRC511
);
3208 stats
->prc1023
+= E1000_READ_REG(hw
, E1000_PRC1023
);
3209 stats
->prc1522
+= E1000_READ_REG(hw
, E1000_PRC1522
);
3210 stats
->gprc
+= E1000_READ_REG(hw
, E1000_GPRC
);
3211 stats
->bprc
+= E1000_READ_REG(hw
, E1000_BPRC
);
3212 stats
->mprc
+= E1000_READ_REG(hw
, E1000_MPRC
);
3213 stats
->gptc
+= E1000_READ_REG(hw
, E1000_GPTC
);
3215 /* For the 64-bit byte counters the low dword must be read first. */
3216 /* Both registers clear on the read of the high dword */
3218 stats
->gorc
+= E1000_READ_REG(hw
, E1000_GORCL
) +
3219 ((uint64_t)E1000_READ_REG(hw
, E1000_GORCH
) << 32);
3220 stats
->gotc
+= E1000_READ_REG(hw
, E1000_GOTCL
) +
3221 ((uint64_t)E1000_READ_REG(hw
, E1000_GOTCH
) << 32);
3223 stats
->rnbc
+= E1000_READ_REG(hw
, E1000_RNBC
);
3224 stats
->ruc
+= E1000_READ_REG(hw
, E1000_RUC
);
3225 stats
->rfc
+= E1000_READ_REG(hw
, E1000_RFC
);
3226 stats
->roc
+= E1000_READ_REG(hw
, E1000_ROC
);
3227 stats
->rjc
+= E1000_READ_REG(hw
, E1000_RJC
);
3229 stats
->mgprc
+= E1000_READ_REG(hw
, E1000_MGTPRC
);
3230 stats
->mgpdc
+= E1000_READ_REG(hw
, E1000_MGTPDC
);
3231 stats
->mgptc
+= E1000_READ_REG(hw
, E1000_MGTPTC
);
3233 stats
->tor
+= E1000_READ_REG(hw
, E1000_TORL
) +
3234 ((uint64_t)E1000_READ_REG(hw
, E1000_TORH
) << 32);
3235 stats
->tot
+= E1000_READ_REG(hw
, E1000_TOTL
) +
3236 ((uint64_t)E1000_READ_REG(hw
, E1000_TOTH
) << 32);
3238 stats
->tpr
+= E1000_READ_REG(hw
, E1000_TPR
);
3239 stats
->tpt
+= E1000_READ_REG(hw
, E1000_TPT
);
3240 stats
->ptc64
+= E1000_READ_REG(hw
, E1000_PTC64
);
3241 stats
->ptc127
+= E1000_READ_REG(hw
, E1000_PTC127
);
3242 stats
->ptc255
+= E1000_READ_REG(hw
, E1000_PTC255
);
3243 stats
->ptc511
+= E1000_READ_REG(hw
, E1000_PTC511
);
3244 stats
->ptc1023
+= E1000_READ_REG(hw
, E1000_PTC1023
);
3245 stats
->ptc1522
+= E1000_READ_REG(hw
, E1000_PTC1522
);
3246 stats
->mptc
+= E1000_READ_REG(hw
, E1000_MPTC
);
3247 stats
->bptc
+= E1000_READ_REG(hw
, E1000_BPTC
);
3249 /* Interrupt Counts */
3251 stats
->iac
+= E1000_READ_REG(hw
, E1000_IAC
);
3252 stats
->icrxptc
+= E1000_READ_REG(hw
, E1000_ICRXPTC
);
3253 stats
->icrxatc
+= E1000_READ_REG(hw
, E1000_ICRXATC
);
3254 stats
->ictxptc
+= E1000_READ_REG(hw
, E1000_ICTXPTC
);
3255 stats
->ictxatc
+= E1000_READ_REG(hw
, E1000_ICTXATC
);
3256 stats
->ictxqec
+= E1000_READ_REG(hw
, E1000_ICTXQEC
);
3257 stats
->ictxqmtc
+= E1000_READ_REG(hw
, E1000_ICTXQMTC
);
3258 stats
->icrxdmtc
+= E1000_READ_REG(hw
, E1000_ICRXDMTC
);
3259 stats
->icrxoc
+= E1000_READ_REG(hw
, E1000_ICRXOC
);
3261 /* Host to Card Statistics */
3263 stats
->cbtmpc
+= E1000_READ_REG(hw
, E1000_CBTMPC
);
3264 stats
->htdpmc
+= E1000_READ_REG(hw
, E1000_HTDPMC
);
3265 stats
->cbrdpc
+= E1000_READ_REG(hw
, E1000_CBRDPC
);
3266 stats
->cbrmpc
+= E1000_READ_REG(hw
, E1000_CBRMPC
);
3267 stats
->rpthc
+= E1000_READ_REG(hw
, E1000_RPTHC
);
3268 stats
->hgptc
+= E1000_READ_REG(hw
, E1000_HGPTC
);
3269 stats
->htcbdpc
+= E1000_READ_REG(hw
, E1000_HTCBDPC
);
3270 stats
->hgorc
+= (E1000_READ_REG(hw
, E1000_HGORCL
) +
3271 ((uint64_t)E1000_READ_REG(hw
, E1000_HGORCH
) << 32));
3272 stats
->hgotc
+= (E1000_READ_REG(hw
, E1000_HGOTCL
) +
3273 ((uint64_t)E1000_READ_REG(hw
, E1000_HGOTCH
) << 32));
3274 stats
->lenerrs
+= E1000_READ_REG(hw
, E1000_LENERRS
);
3275 stats
->scvpc
+= E1000_READ_REG(hw
, E1000_SCVPC
);
3276 stats
->hrmpc
+= E1000_READ_REG(hw
, E1000_HRMPC
);
3278 stats
->algnerrc
+= E1000_READ_REG(hw
, E1000_ALGNERRC
);
3279 stats
->rxerrc
+= E1000_READ_REG(hw
, E1000_RXERRC
);
3280 stats
->tncrs
+= E1000_READ_REG(hw
, E1000_TNCRS
);
3281 stats
->cexterr
+= E1000_READ_REG(hw
, E1000_CEXTERR
);
3282 stats
->tsctc
+= E1000_READ_REG(hw
, E1000_TSCTC
);
3283 stats
->tsctfc
+= E1000_READ_REG(hw
, E1000_TSCTFC
);
3285 IFNET_STAT_SET(ifp
, collisions
, stats
->colc
);
3288 IFNET_STAT_SET(ifp
, ierrors
,
3289 stats
->rxerrc
+ stats
->crcerrs
+ stats
->algnerrc
+
3290 stats
->ruc
+ stats
->roc
+ stats
->mpc
+ stats
->cexterr
);
3293 IFNET_STAT_SET(ifp
, oerrors
,
3294 stats
->ecol
+ stats
->latecol
+ sc
->watchdog_events
);
3296 /* Driver specific counters */
3297 sc
->device_control
= E1000_READ_REG(hw
, E1000_CTRL
);
3298 sc
->rx_control
= E1000_READ_REG(hw
, E1000_RCTL
);
3299 sc
->int_mask
= E1000_READ_REG(hw
, E1000_IMS
);
3300 sc
->eint_mask
= E1000_READ_REG(hw
, E1000_EIMS
);
3301 sc
->packet_buf_alloc_tx
=
3302 ((E1000_READ_REG(hw
, E1000_PBA
) & 0xffff0000) >> 16);
3303 sc
->packet_buf_alloc_rx
=
3304 (E1000_READ_REG(hw
, E1000_PBA
) & 0xffff);
3308 igb_vf_init_stats(struct igb_softc
*sc
)
3310 struct e1000_hw
*hw
= &sc
->hw
;
3311 struct e1000_vf_stats
*stats
;
3314 stats
->last_gprc
= E1000_READ_REG(hw
, E1000_VFGPRC
);
3315 stats
->last_gorc
= E1000_READ_REG(hw
, E1000_VFGORC
);
3316 stats
->last_gptc
= E1000_READ_REG(hw
, E1000_VFGPTC
);
3317 stats
->last_gotc
= E1000_READ_REG(hw
, E1000_VFGOTC
);
3318 stats
->last_mprc
= E1000_READ_REG(hw
, E1000_VFMPRC
);
3322 igb_update_vf_stats_counters(struct igb_softc
*sc
)
3324 struct e1000_hw
*hw
= &sc
->hw
;
3325 struct e1000_vf_stats
*stats
;
3327 if (sc
->link_speed
== 0)
3331 UPDATE_VF_REG(E1000_VFGPRC
, stats
->last_gprc
, stats
->gprc
);
3332 UPDATE_VF_REG(E1000_VFGORC
, stats
->last_gorc
, stats
->gorc
);
3333 UPDATE_VF_REG(E1000_VFGPTC
, stats
->last_gptc
, stats
->gptc
);
3334 UPDATE_VF_REG(E1000_VFGOTC
, stats
->last_gotc
, stats
->gotc
);
3335 UPDATE_VF_REG(E1000_VFMPRC
, stats
->last_mprc
, stats
->mprc
);
3338 #ifdef IFPOLL_ENABLE
3341 igb_npoll_status(struct ifnet
*ifp
)
3343 struct igb_softc
*sc
= ifp
->if_softc
;
3346 ASSERT_SERIALIZED(&sc
->main_serialize
);
3348 reg_icr
= E1000_READ_REG(&sc
->hw
, E1000_ICR
);
3349 if (reg_icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
3350 sc
->hw
.mac
.get_link_status
= 1;
3351 igb_update_link_status(sc
);
3356 igb_npoll_tx(struct ifnet
*ifp
, void *arg
, int cycle __unused
)
3358 struct igb_tx_ring
*txr
= arg
;
3360 ASSERT_SERIALIZED(&txr
->tx_serialize
);
3361 igb_tx_intr(txr
, *(txr
->tx_hdr
));
3362 igb_try_txgc(txr
, 1);
3366 igb_npoll_rx(struct ifnet
*ifp __unused
, void *arg
, int cycle
)
3368 struct igb_rx_ring
*rxr
= arg
;
3370 ASSERT_SERIALIZED(&rxr
->rx_serialize
);
3372 igb_rxeof(rxr
, cycle
);
3376 igb_npoll(struct ifnet
*ifp
, struct ifpoll_info
*info
)
3378 struct igb_softc
*sc
= ifp
->if_softc
;
3379 int i
, txr_cnt
, rxr_cnt
;
3381 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3386 info
->ifpi_status
.status_func
= igb_npoll_status
;
3387 info
->ifpi_status
.serializer
= &sc
->main_serialize
;
3389 txr_cnt
= igb_get_txring_inuse(sc
, TRUE
);
3390 for (i
= 0; i
< txr_cnt
; ++i
) {
3391 struct igb_tx_ring
*txr
= &sc
->tx_rings
[i
];
3393 cpu
= if_ringmap_cpumap(sc
->tx_rmap
, i
);
3394 KKASSERT(cpu
< netisr_ncpus
);
3395 info
->ifpi_tx
[cpu
].poll_func
= igb_npoll_tx
;
3396 info
->ifpi_tx
[cpu
].arg
= txr
;
3397 info
->ifpi_tx
[cpu
].serializer
= &txr
->tx_serialize
;
3398 ifsq_set_cpuid(txr
->ifsq
, cpu
);
3401 rxr_cnt
= igb_get_rxring_inuse(sc
, TRUE
);
3402 for (i
= 0; i
< rxr_cnt
; ++i
) {
3403 struct igb_rx_ring
*rxr
= &sc
->rx_rings
[i
];
3405 cpu
= if_ringmap_cpumap(sc
->rx_rmap
, i
);
3406 KKASSERT(cpu
< netisr_ncpus
);
3407 info
->ifpi_rx
[cpu
].poll_func
= igb_npoll_rx
;
3408 info
->ifpi_rx
[cpu
].arg
= rxr
;
3409 info
->ifpi_rx
[cpu
].serializer
= &rxr
->rx_serialize
;
3412 for (i
= 0; i
< sc
->tx_ring_cnt
; ++i
) {
3413 struct igb_tx_ring
*txr
= &sc
->tx_rings
[i
];
3415 ifsq_set_cpuid(txr
->ifsq
, txr
->tx_intr_cpuid
);
3418 if (ifp
->if_flags
& IFF_RUNNING
)
3422 #endif /* IFPOLL_ENABLE */
3427 struct igb_softc
*sc
= xsc
;
3428 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3431 ASSERT_SERIALIZED(&sc
->main_serialize
);
3433 eicr
= E1000_READ_REG(&sc
->hw
, E1000_EICR
);
3438 if (ifp
->if_flags
& IFF_RUNNING
) {
3439 struct igb_tx_ring
*txr
= &sc
->tx_rings
[0];
3442 for (i
= 0; i
< sc
->rx_ring_inuse
; ++i
) {
3443 struct igb_rx_ring
*rxr
= &sc
->rx_rings
[i
];
3445 if (eicr
& rxr
->rx_intr_mask
) {
3446 lwkt_serialize_enter(&rxr
->rx_serialize
);
3448 lwkt_serialize_exit(&rxr
->rx_serialize
);
3452 if (eicr
& txr
->tx_intr_mask
) {
3453 lwkt_serialize_enter(&txr
->tx_serialize
);
3454 igb_tx_intr(txr
, *(txr
->tx_hdr
));
3455 lwkt_serialize_exit(&txr
->tx_serialize
);
3459 if (eicr
& E1000_EICR_OTHER
) {
3460 uint32_t icr
= E1000_READ_REG(&sc
->hw
, E1000_ICR
);
3462 /* Link status change */
3463 if (icr
& E1000_ICR_LSC
) {
3464 sc
->hw
.mac
.get_link_status
= 1;
3465 igb_update_link_status(sc
);
3470 * Reading EICR has the side effect to clear interrupt mask,
3471 * so all interrupts need to be enabled here.
3473 E1000_WRITE_REG(&sc
->hw
, E1000_EIMS
, sc
->intr_mask
);
3477 igb_intr_shared(void *xsc
)
3479 struct igb_softc
*sc
= xsc
;
3480 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3483 ASSERT_SERIALIZED(&sc
->main_serialize
);
3485 reg_icr
= E1000_READ_REG(&sc
->hw
, E1000_ICR
);
3488 if (reg_icr
== 0xffffffff)
3491 /* Definitely not our interrupt. */
3495 if ((reg_icr
& E1000_ICR_INT_ASSERTED
) == 0)
3498 if (ifp
->if_flags
& IFF_RUNNING
) {
3500 (E1000_ICR_RXT0
| E1000_ICR_RXDMT0
| E1000_ICR_RXO
)) {
3503 for (i
= 0; i
< sc
->rx_ring_inuse
; ++i
) {
3504 struct igb_rx_ring
*rxr
= &sc
->rx_rings
[i
];
3506 lwkt_serialize_enter(&rxr
->rx_serialize
);
3508 lwkt_serialize_exit(&rxr
->rx_serialize
);
3512 if (reg_icr
& E1000_ICR_TXDW
) {
3513 struct igb_tx_ring
*txr
= &sc
->tx_rings
[0];
3515 lwkt_serialize_enter(&txr
->tx_serialize
);
3516 igb_tx_intr(txr
, *(txr
->tx_hdr
));
3517 lwkt_serialize_exit(&txr
->tx_serialize
);
3521 /* Link status change */
3522 if (reg_icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
3523 sc
->hw
.mac
.get_link_status
= 1;
3524 igb_update_link_status(sc
);
3527 if (reg_icr
& E1000_ICR_RXO
)
3532 igb_encap(struct igb_tx_ring
*txr
, struct mbuf
**m_headp
,
3533 int *segs_used
, int *idx
)
3535 bus_dma_segment_t segs
[IGB_MAX_SCATTER
];
3537 struct igb_tx_buf
*tx_buf
, *tx_buf_mapped
;
3538 union e1000_adv_tx_desc
*txd
= NULL
;
3539 struct mbuf
*m_head
= *m_headp
;
3540 uint32_t olinfo_status
= 0, cmd_type_len
= 0, cmd_rs
= 0;
3541 int maxsegs
, nsegs
, i
, j
, error
;
3542 uint32_t hdrlen
= 0;
3544 if (m_head
->m_pkthdr
.csum_flags
& CSUM_TSO
) {
3545 error
= igb_tso_pullup(txr
, m_headp
);
3551 /* Set basic descriptor constants */
3552 cmd_type_len
|= E1000_ADVTXD_DTYP_DATA
;
3553 cmd_type_len
|= E1000_ADVTXD_DCMD_IFCS
| E1000_ADVTXD_DCMD_DEXT
;
3554 if (m_head
->m_flags
& M_VLANTAG
)
3555 cmd_type_len
|= E1000_ADVTXD_DCMD_VLE
;
3558 * Map the packet for DMA.
3560 tx_buf
= &txr
->tx_buf
[txr
->next_avail_desc
];
3561 tx_buf_mapped
= tx_buf
;
3564 maxsegs
= txr
->tx_avail
- IGB_TX_RESERVED
;
3565 if (maxsegs
> IGB_MAX_SCATTER
)
3566 maxsegs
= IGB_MAX_SCATTER
;
3568 error
= bus_dmamap_load_mbuf_defrag(txr
->tx_tag
, map
, m_headp
,
3569 segs
, maxsegs
, &nsegs
, BUS_DMA_NOWAIT
);
3571 if (error
== ENOBUFS
)
3572 txr
->sc
->mbuf_defrag_failed
++;
3574 txr
->sc
->no_tx_dma_setup
++;
3580 bus_dmamap_sync(txr
->tx_tag
, map
, BUS_DMASYNC_PREWRITE
);
3585 * Set up the TX context descriptor, if any hardware offloading is
3586 * needed. This includes CSUM, VLAN, and TSO. It will consume one
3589 * Unlike these chips' predecessors (em/emx), TX context descriptor
3590 * will _not_ interfere TX data fetching pipelining.
3592 if (m_head
->m_pkthdr
.csum_flags
& CSUM_TSO
) {
3593 igb_tso_ctx(txr
, m_head
, &hdrlen
);
3594 cmd_type_len
|= E1000_ADVTXD_DCMD_TSE
;
3595 olinfo_status
|= E1000_TXD_POPTS_IXSM
<< 8;
3596 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
3599 } else if (igb_txcsum_ctx(txr
, m_head
)) {
3600 if (m_head
->m_pkthdr
.csum_flags
& CSUM_IP
)
3601 olinfo_status
|= (E1000_TXD_POPTS_IXSM
<< 8);
3602 if (m_head
->m_pkthdr
.csum_flags
& (CSUM_UDP
| CSUM_TCP
))
3603 olinfo_status
|= (E1000_TXD_POPTS_TXSM
<< 8);
3608 *segs_used
+= nsegs
;
3609 txr
->tx_nsegs
+= nsegs
;
3610 if (txr
->tx_nsegs
>= txr
->intr_nsegs
) {
3612 * Report Status (RS) is turned on every intr_nsegs
3613 * descriptors (roughly).
3616 cmd_rs
= E1000_ADVTXD_DCMD_RS
;
3619 /* Calculate payload length */
3620 olinfo_status
|= ((m_head
->m_pkthdr
.len
- hdrlen
)
3621 << E1000_ADVTXD_PAYLEN_SHIFT
);
3624 * 82575 needs the TX context index added; the queue
3625 * index is used as TX context index here.
3627 if (txr
->sc
->hw
.mac
.type
== e1000_82575
)
3628 olinfo_status
|= txr
->me
<< 4;
3630 /* Set up our transmit descriptors */
3631 i
= txr
->next_avail_desc
;
3632 for (j
= 0; j
< nsegs
; j
++) {
3634 bus_addr_t seg_addr
;
3636 tx_buf
= &txr
->tx_buf
[i
];
3637 txd
= (union e1000_adv_tx_desc
*)&txr
->tx_base
[i
];
3638 seg_addr
= segs
[j
].ds_addr
;
3639 seg_len
= segs
[j
].ds_len
;
3641 txd
->read
.buffer_addr
= htole64(seg_addr
);
3642 txd
->read
.cmd_type_len
= htole32(cmd_type_len
| seg_len
);
3643 txd
->read
.olinfo_status
= htole32(olinfo_status
);
3644 if (++i
== txr
->num_tx_desc
)
3646 tx_buf
->m_head
= NULL
;
3649 KASSERT(txr
->tx_avail
> nsegs
, ("invalid avail TX desc\n"));
3650 txr
->next_avail_desc
= i
;
3651 txr
->tx_avail
-= nsegs
;
3654 tx_buf
->m_head
= m_head
;
3655 tx_buf_mapped
->map
= tx_buf
->map
;
3659 * Last Descriptor of Packet needs End Of Packet (EOP)
3661 txd
->read
.cmd_type_len
|= htole32(E1000_ADVTXD_DCMD_EOP
| cmd_rs
);
3664 * Defer TDT updating, until enough descrptors are setup
3667 #ifdef IGB_TSS_DEBUG
3675 igb_start(struct ifnet
*ifp
, struct ifaltq_subque
*ifsq
)
3677 struct igb_softc
*sc
= ifp
->if_softc
;
3678 struct igb_tx_ring
*txr
= ifsq_get_priv(ifsq
);
3679 struct mbuf
*m_head
;
3680 int idx
= -1, nsegs
= 0;
3682 KKASSERT(txr
->ifsq
== ifsq
);
3683 ASSERT_SERIALIZED(&txr
->tx_serialize
);
3685 if ((ifp
->if_flags
& IFF_RUNNING
) == 0 || ifsq_is_oactive(ifsq
))
3688 if (!sc
->link_active
|| (txr
->tx_flags
& IGB_TXFLAG_ENABLED
) == 0) {
3693 while (!ifsq_is_empty(ifsq
)) {
3694 if (txr
->tx_avail
<= IGB_MAX_SCATTER
+ IGB_TX_RESERVED
) {
3695 ifsq_set_oactive(ifsq
);
3696 /* Set watchdog on */
3697 txr
->tx_watchdog
.wd_timer
= 5;
3701 m_head
= ifsq_dequeue(ifsq
);
3705 if (igb_encap(txr
, &m_head
, &nsegs
, &idx
)) {
3706 IFNET_STAT_INC(ifp
, oerrors
, 1);
3711 * TX interrupt are aggressively aggregated, so increasing
3712 * opackets at TX interrupt time will make the opackets
3713 * statistics vastly inaccurate; we do the opackets increment
3716 IFNET_STAT_INC(ifp
, opackets
, 1);
3718 if (nsegs
>= txr
->wreg_nsegs
) {
3719 E1000_WRITE_REG(&txr
->sc
->hw
, E1000_TDT(txr
->me
), idx
);
3724 /* Send a copy of the frame to the BPF listener */
3725 ETHER_BPF_MTAP(ifp
, m_head
);
3728 E1000_WRITE_REG(&txr
->sc
->hw
, E1000_TDT(txr
->me
), idx
);
3729 txr
->tx_running
= IGB_TX_RUNNING
;
3733 igb_watchdog(struct ifaltq_subque
*ifsq
)
3735 struct igb_tx_ring
*txr
= ifsq_get_priv(ifsq
);
3736 struct ifnet
*ifp
= ifsq_get_ifp(ifsq
);
3737 struct igb_softc
*sc
= ifp
->if_softc
;
3740 KKASSERT(txr
->ifsq
== ifsq
);
3741 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3744 * If flow control has paused us since last checking
3745 * it invalidates the watchdog timing, so dont run it.
3747 if (sc
->pause_frames
) {
3748 sc
->pause_frames
= 0;
3749 txr
->tx_watchdog
.wd_timer
= 5;
3753 if_printf(ifp
, "Watchdog timeout -- resetting\n");
3754 if_printf(ifp
, "Queue(%d) tdh = %d, hw tdt = %d\n", txr
->me
,
3755 E1000_READ_REG(&sc
->hw
, E1000_TDH(txr
->me
)),
3756 E1000_READ_REG(&sc
->hw
, E1000_TDT(txr
->me
)));
3757 if_printf(ifp
, "TX(%d) desc avail = %d, "
3758 "Next TX to Clean = %d\n",
3759 txr
->me
, txr
->tx_avail
, txr
->next_to_clean
);
3761 IFNET_STAT_INC(ifp
, oerrors
, 1);
3762 sc
->watchdog_events
++;
3765 for (i
= 0; i
< sc
->tx_ring_inuse
; ++i
)
3766 ifsq_devstart_sched(sc
->tx_rings
[i
].ifsq
);
3770 igb_set_eitr(struct igb_softc
*sc
, int idx
, int rate
)
3775 if (sc
->hw
.mac
.type
== e1000_82575
) {
3776 eitr
= 1000000000 / 256 / rate
;
3779 * Document is wrong on the 2 bits left shift
3782 eitr
= 1000000 / rate
;
3783 eitr
<<= IGB_EITR_INTVL_SHIFT
;
3787 /* Don't disable it */
3788 eitr
= 1 << IGB_EITR_INTVL_SHIFT
;
3789 } else if (eitr
> IGB_EITR_INTVL_MASK
) {
3790 /* Don't allow it to be too large */
3791 eitr
= IGB_EITR_INTVL_MASK
;
3794 if (sc
->hw
.mac
.type
== e1000_82575
)
3797 eitr
|= E1000_EITR_CNT_IGNR
;
3798 E1000_WRITE_REG(&sc
->hw
, E1000_EITR(idx
), eitr
);
3802 igb_add_intr_rate_sysctl(struct igb_softc
*sc
, int use
,
3803 const char *name
, const char *desc
)
3807 for (i
= 0; i
< sc
->intr_cnt
; ++i
) {
3808 if (sc
->intr_data
[i
].intr_use
== use
) {
3809 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc
->dev
),
3810 SYSCTL_CHILDREN(device_get_sysctl_tree(sc
->dev
)),
3811 OID_AUTO
, name
, CTLTYPE_INT
| CTLFLAG_RW
,
3812 sc
, use
, igb_sysctl_intr_rate
, "I", desc
);
3819 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS
)
3821 struct igb_softc
*sc
= (void *)arg1
;
3823 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3825 struct igb_intr_data
*intr
;
3828 for (i
= 0; i
< sc
->intr_cnt
; ++i
) {
3829 intr
= &sc
->intr_data
[i
];
3830 if (intr
->intr_use
== use
) {
3831 rate
= intr
->intr_rate
;
3836 error
= sysctl_handle_int(oidp
, &rate
, 0, req
);
3837 if (error
|| req
->newptr
== NULL
)
3842 ifnet_serialize_all(ifp
);
3844 for (i
= 0; i
< sc
->intr_cnt
; ++i
) {
3845 intr
= &sc
->intr_data
[i
];
3846 if (intr
->intr_use
== use
&& intr
->intr_rate
!= rate
) {
3847 intr
->intr_rate
= rate
;
3848 if (ifp
->if_flags
& IFF_RUNNING
)
3849 igb_set_eitr(sc
, i
, rate
);
3853 ifnet_deserialize_all(ifp
);
3859 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS
)
3861 struct igb_softc
*sc
= (void *)arg1
;
3862 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3863 struct igb_tx_ring
*txr
= &sc
->tx_rings
[0];
3866 nsegs
= txr
->intr_nsegs
;
3867 error
= sysctl_handle_int(oidp
, &nsegs
, 0, req
);
3868 if (error
|| req
->newptr
== NULL
)
3873 ifnet_serialize_all(ifp
);
3875 if (nsegs
>= txr
->num_tx_desc
- IGB_MAX_SCATTER
- IGB_TX_RESERVED
) {
3881 for (i
= 0; i
< sc
->tx_ring_cnt
; ++i
)
3882 sc
->tx_rings
[i
].intr_nsegs
= nsegs
;
3885 ifnet_deserialize_all(ifp
);
3891 igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS
)
3893 struct igb_softc
*sc
= (void *)arg1
;
3894 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3895 int error
, nsegs
, i
;
3897 nsegs
= sc
->rx_rings
[0].wreg_nsegs
;
3898 error
= sysctl_handle_int(oidp
, &nsegs
, 0, req
);
3899 if (error
|| req
->newptr
== NULL
)
3902 ifnet_serialize_all(ifp
);
3903 for (i
= 0; i
< sc
->rx_ring_cnt
; ++i
)
3904 sc
->rx_rings
[i
].wreg_nsegs
= nsegs
;
3905 ifnet_deserialize_all(ifp
);
3911 igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS
)
3913 struct igb_softc
*sc
= (void *)arg1
;
3914 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3915 int error
, nsegs
, i
;
3917 nsegs
= sc
->tx_rings
[0].wreg_nsegs
;
3918 error
= sysctl_handle_int(oidp
, &nsegs
, 0, req
);
3919 if (error
|| req
->newptr
== NULL
)
3922 ifnet_serialize_all(ifp
);
3923 for (i
= 0; i
< sc
->tx_ring_cnt
; ++i
)
3924 sc
->tx_rings
[i
].wreg_nsegs
= nsegs
;
3925 ifnet_deserialize_all(ifp
);
3931 igb_init_intr(struct igb_softc
*sc
)
3935 igb_set_intr_mask(sc
);
3937 if ((sc
->flags
& IGB_FLAG_SHARED_INTR
) == 0)
3938 igb_init_unshared_intr(sc
);
3940 for (i
= 0; i
< sc
->intr_cnt
; ++i
)
3941 igb_set_eitr(sc
, i
, sc
->intr_data
[i
].intr_rate
);
3945 igb_init_unshared_intr(struct igb_softc
*sc
)
3947 struct e1000_hw
*hw
= &sc
->hw
;
3948 const struct igb_rx_ring
*rxr
;
3949 const struct igb_tx_ring
*txr
;
3950 uint32_t ivar
, index
;
3954 * Enable extended mode
3956 if (sc
->hw
.mac
.type
!= e1000_82575
) {
3960 gpie
= E1000_GPIE_NSICR
;
3961 if (sc
->intr_type
== PCI_INTR_TYPE_MSIX
) {
3962 gpie
|= E1000_GPIE_MSIX_MODE
|
3966 E1000_WRITE_REG(hw
, E1000_GPIE
, gpie
);
3971 switch (sc
->hw
.mac
.type
) {
3973 ivar_max
= IGB_MAX_IVAR_82576
;
3977 ivar_max
= IGB_MAX_IVAR_82580
;
3981 ivar_max
= IGB_MAX_IVAR_I350
;
3985 ivar_max
= IGB_MAX_IVAR_I354
;
3989 case e1000_vfadapt_i350
:
3990 ivar_max
= IGB_MAX_IVAR_VF
;
3994 ivar_max
= IGB_MAX_IVAR_I210
;
3998 ivar_max
= IGB_MAX_IVAR_I211
;
4002 panic("unknown mac type %d\n", sc
->hw
.mac
.type
);
4004 for (i
= 0; i
< ivar_max
; ++i
)
4005 E1000_WRITE_REG_ARRAY(hw
, E1000_IVAR0
, i
, 0);
4006 E1000_WRITE_REG(hw
, E1000_IVAR_MISC
, 0);
4010 KASSERT(sc
->intr_type
!= PCI_INTR_TYPE_MSIX
,
4011 ("82575 w/ MSI-X"));
4012 tmp
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
4013 tmp
|= E1000_CTRL_EXT_IRCA
;
4014 E1000_WRITE_REG(hw
, E1000_CTRL_EXT
, tmp
);
4018 * Map TX/RX interrupts to EICR
4020 switch (sc
->hw
.mac
.type
) {
4025 case e1000_vfadapt_i350
:
4029 for (i
= 0; i
< sc
->rx_ring_inuse
; ++i
) {
4030 rxr
= &sc
->rx_rings
[i
];
4033 ivar
= E1000_READ_REG_ARRAY(hw
, E1000_IVAR0
, index
);
4038 (rxr
->rx_intr_vec
| E1000_IVAR_VALID
) << 16;
4042 (rxr
->rx_intr_vec
| E1000_IVAR_VALID
);
4044 E1000_WRITE_REG_ARRAY(hw
, E1000_IVAR0
, index
, ivar
);
4047 for (i
= 0; i
< sc
->tx_ring_inuse
; ++i
) {
4048 txr
= &sc
->tx_rings
[i
];
4051 ivar
= E1000_READ_REG_ARRAY(hw
, E1000_IVAR0
, index
);
4056 (txr
->tx_intr_vec
| E1000_IVAR_VALID
) << 24;
4060 (txr
->tx_intr_vec
| E1000_IVAR_VALID
) << 8;
4062 E1000_WRITE_REG_ARRAY(hw
, E1000_IVAR0
, index
, ivar
);
4064 if (sc
->intr_type
== PCI_INTR_TYPE_MSIX
) {
4065 ivar
= (sc
->sts_msix_vec
| E1000_IVAR_VALID
) << 8;
4066 E1000_WRITE_REG(hw
, E1000_IVAR_MISC
, ivar
);
4072 for (i
= 0; i
< sc
->rx_ring_inuse
; ++i
) {
4073 rxr
= &sc
->rx_rings
[i
];
4075 index
= i
& 0x7; /* Each IVAR has two entries */
4076 ivar
= E1000_READ_REG_ARRAY(hw
, E1000_IVAR0
, index
);
4081 (rxr
->rx_intr_vec
| E1000_IVAR_VALID
);
4085 (rxr
->rx_intr_vec
| E1000_IVAR_VALID
) << 16;
4087 E1000_WRITE_REG_ARRAY(hw
, E1000_IVAR0
, index
, ivar
);
4090 for (i
= 0; i
< sc
->tx_ring_inuse
; ++i
) {
4091 txr
= &sc
->tx_rings
[i
];
4093 index
= i
& 0x7; /* Each IVAR has two entries */
4094 ivar
= E1000_READ_REG_ARRAY(hw
, E1000_IVAR0
, index
);
4099 (txr
->tx_intr_vec
| E1000_IVAR_VALID
) << 8;
4103 (txr
->tx_intr_vec
| E1000_IVAR_VALID
) << 24;
4105 E1000_WRITE_REG_ARRAY(hw
, E1000_IVAR0
, index
, ivar
);
4107 if (sc
->intr_type
== PCI_INTR_TYPE_MSIX
) {
4108 ivar
= (sc
->sts_msix_vec
| E1000_IVAR_VALID
) << 8;
4109 E1000_WRITE_REG(hw
, E1000_IVAR_MISC
, ivar
);
4115 * Enable necessary interrupt bits.
4117 * The name of the register is confusing; in addition to
4118 * configuring the first vector of MSI-X, it also configures
4119 * which bits of EICR could be set by the hardware even when
4120 * MSI or line interrupt is used; it thus controls interrupt
4121 * generation. It MUST be configured explicitly; the default
4122 * value mentioned in the datasheet is wrong: RX queue0 and
4123 * TX queue0 are NOT enabled by default.
4125 E1000_WRITE_REG(&sc
->hw
, E1000_MSIXBM(0), sc
->intr_mask
);
4129 panic("unknown mac type %d\n", sc
->hw
.mac
.type
);
4134 igb_setup_intr(struct igb_softc
*sc
)
4138 for (i
= 0; i
< sc
->intr_cnt
; ++i
) {
4139 struct igb_intr_data
*intr
= &sc
->intr_data
[i
];
4142 error
= bus_setup_intr_descr(sc
->dev
, intr
->intr_res
,
4143 INTR_MPSAFE
, intr
->intr_func
, intr
->intr_funcarg
,
4144 &intr
->intr_hand
, intr
->intr_serialize
, intr
->intr_desc
);
4146 device_printf(sc
->dev
, "can't setup %dth intr\n", i
);
4147 igb_teardown_intr(sc
, i
);
4155 igb_set_txintr_mask(struct igb_tx_ring
*txr
, int *intr_vec0
, int intr_vecmax
)
4157 if (txr
->sc
->hw
.mac
.type
== e1000_82575
) {
4158 txr
->tx_intr_vec
= 0; /* unused */
4161 txr
->tx_intr_mask
= E1000_EICR_TX_QUEUE0
;
4164 txr
->tx_intr_mask
= E1000_EICR_TX_QUEUE1
;
4167 txr
->tx_intr_mask
= E1000_EICR_TX_QUEUE2
;
4170 txr
->tx_intr_mask
= E1000_EICR_TX_QUEUE3
;
4173 panic("unsupported # of TX ring, %d\n", txr
->me
);
4176 int intr_vec
= *intr_vec0
;
4178 txr
->tx_intr_vec
= intr_vec
% intr_vecmax
;
4179 txr
->tx_intr_mask
= 1 << txr
->tx_intr_vec
;
4181 *intr_vec0
= intr_vec
+ 1;
4186 igb_set_rxintr_mask(struct igb_rx_ring
*rxr
, int *intr_vec0
, int intr_vecmax
)
4188 if (rxr
->sc
->hw
.mac
.type
== e1000_82575
) {
4189 rxr
->rx_intr_vec
= 0; /* unused */
4192 rxr
->rx_intr_mask
= E1000_EICR_RX_QUEUE0
;
4195 rxr
->rx_intr_mask
= E1000_EICR_RX_QUEUE1
;
4198 rxr
->rx_intr_mask
= E1000_EICR_RX_QUEUE2
;
4201 rxr
->rx_intr_mask
= E1000_EICR_RX_QUEUE3
;
4204 panic("unsupported # of RX ring, %d\n", rxr
->me
);
4207 int intr_vec
= *intr_vec0
;
4209 rxr
->rx_intr_vec
= intr_vec
% intr_vecmax
;
4210 rxr
->rx_intr_mask
= 1 << rxr
->rx_intr_vec
;
4212 *intr_vec0
= intr_vec
+ 1;
4217 igb_serialize(struct ifnet
*ifp
, enum ifnet_serialize slz
)
4219 struct igb_softc
*sc
= ifp
->if_softc
;
4221 ifnet_serialize_array_enter(sc
->serializes
, sc
->serialize_cnt
, slz
);
4225 igb_deserialize(struct ifnet
*ifp
, enum ifnet_serialize slz
)
4227 struct igb_softc
*sc
= ifp
->if_softc
;
4229 ifnet_serialize_array_exit(sc
->serializes
, sc
->serialize_cnt
, slz
);
4233 igb_tryserialize(struct ifnet
*ifp
, enum ifnet_serialize slz
)
4235 struct igb_softc
*sc
= ifp
->if_softc
;
4237 return ifnet_serialize_array_try(sc
->serializes
, sc
->serialize_cnt
,
4244 igb_serialize_assert(struct ifnet
*ifp
, enum ifnet_serialize slz
,
4245 boolean_t serialized
)
4247 struct igb_softc
*sc
= ifp
->if_softc
;
4249 ifnet_serialize_array_assert(sc
->serializes
, sc
->serialize_cnt
,
4253 #endif /* INVARIANTS */
4256 igb_set_intr_mask(struct igb_softc
*sc
)
4260 sc
->intr_mask
= sc
->sts_intr_mask
;
4261 for (i
= 0; i
< sc
->rx_ring_inuse
; ++i
)
4262 sc
->intr_mask
|= sc
->rx_rings
[i
].rx_intr_mask
;
4263 for (i
= 0; i
< sc
->tx_ring_inuse
; ++i
)
4264 sc
->intr_mask
|= sc
->tx_rings
[i
].tx_intr_mask
;
4266 if_printf(&sc
->arpcom
.ac_if
, "intr mask 0x%08x\n",
4272 igb_alloc_intr(struct igb_softc
*sc
)
4274 struct igb_tx_ring
*txr
;
4275 struct igb_intr_data
*intr
;
4276 int i
, intr_vec
, intr_vecmax
;
4280 if (sc
->intr_type
== PCI_INTR_TYPE_MSIX
) {
4281 igb_set_ring_inuse(sc
, FALSE
);
4286 * Reset some settings changed by igb_alloc_msix().
4288 if (sc
->rx_rmap_intr
!= NULL
) {
4289 if_ringmap_free(sc
->rx_rmap_intr
);
4290 sc
->rx_rmap_intr
= NULL
;
4292 if (sc
->tx_rmap_intr
!= NULL
) {
4293 if_ringmap_free(sc
->tx_rmap_intr
);
4294 sc
->tx_rmap_intr
= NULL
;
4296 if (sc
->intr_data
!= NULL
) {
4297 kfree(sc
->intr_data
, M_DEVBUF
);
4298 sc
->intr_data
= NULL
;
4300 for (i
= 0; i
< sc
->tx_ring_cnt
; ++i
) {
4301 txr
= &sc
->tx_rings
[i
];
4302 txr
->tx_intr_vec
= 0;
4303 txr
->tx_intr_mask
= 0;
4304 txr
->tx_intr_cpuid
= -1;
4306 for (i
= 0; i
< sc
->rx_ring_cnt
; ++i
) {
4307 struct igb_rx_ring
*rxr
= &sc
->rx_rings
[i
];
4309 rxr
->rx_intr_vec
= 0;
4310 rxr
->rx_intr_mask
= 0;
4315 sc
->intr_data
= kmalloc(sizeof(struct igb_intr_data
), M_DEVBUF
,
4317 intr
= &sc
->intr_data
[0];
4320 * Allocate MSI/legacy interrupt resource
4322 sc
->intr_type
= pci_alloc_1intr(sc
->dev
, igb_msi_enable
,
4323 &intr
->intr_rid
, &intr_flags
);
4325 if (sc
->intr_type
== PCI_INTR_TYPE_LEGACY
) {
4328 unshared
= device_getenv_int(sc
->dev
, "irq.unshared", 0);
4330 sc
->flags
|= IGB_FLAG_SHARED_INTR
;
4332 device_printf(sc
->dev
, "IRQ shared\n");
4334 intr_flags
&= ~RF_SHAREABLE
;
4336 device_printf(sc
->dev
, "IRQ unshared\n");
4340 intr
->intr_res
= bus_alloc_resource_any(sc
->dev
, SYS_RES_IRQ
,
4341 &intr
->intr_rid
, intr_flags
);
4342 if (intr
->intr_res
== NULL
) {
4343 device_printf(sc
->dev
, "Unable to allocate bus resource: "
4348 intr
->intr_serialize
= &sc
->main_serialize
;
4349 intr
->intr_cpuid
= rman_get_cpuid(intr
->intr_res
);
4350 intr
->intr_func
= (sc
->flags
& IGB_FLAG_SHARED_INTR
) ?
4351 igb_intr_shared
: igb_intr
;
4352 intr
->intr_funcarg
= sc
;
4353 intr
->intr_rate
= IGB_INTR_RATE
;
4354 intr
->intr_use
= IGB_INTR_USE_RXTX
;
4356 sc
->tx_rings
[0].tx_intr_cpuid
= intr
->intr_cpuid
;
4359 * Setup MSI/legacy interrupt mask
4361 switch (sc
->hw
.mac
.type
) {
4363 intr_vecmax
= IGB_MAX_TXRXINT_82575
;
4367 intr_vecmax
= IGB_MAX_TXRXINT_82576
;
4371 intr_vecmax
= IGB_MAX_TXRXINT_82580
;
4375 intr_vecmax
= IGB_MAX_TXRXINT_I350
;
4379 intr_vecmax
= IGB_MAX_TXRXINT_I354
;
4383 intr_vecmax
= IGB_MAX_TXRXINT_I210
;
4387 intr_vecmax
= IGB_MAX_TXRXINT_I211
;
4391 intr_vecmax
= IGB_MIN_TXRXINT
;
4395 for (i
= 0; i
< sc
->tx_ring_cnt
; ++i
)
4396 igb_set_txintr_mask(&sc
->tx_rings
[i
], &intr_vec
, intr_vecmax
);
4397 for (i
= 0; i
< sc
->rx_ring_cnt
; ++i
)
4398 igb_set_rxintr_mask(&sc
->rx_rings
[i
], &intr_vec
, intr_vecmax
);
4399 sc
->sts_intr_mask
= E1000_EICR_OTHER
;
4401 igb_set_ring_inuse(sc
, FALSE
);
4402 KKASSERT(sc
->rx_ring_inuse
<= IGB_MIN_RING_RSS
);
4403 if (sc
->rx_ring_inuse
== IGB_MIN_RING_RSS
) {
4405 * Allocate RX ring map for RSS setup.
4407 sc
->rx_rmap_intr
= if_ringmap_alloc(sc
->dev
,
4408 IGB_MIN_RING_RSS
, IGB_MIN_RING_RSS
);
4409 KASSERT(if_ringmap_count(sc
->rx_rmap_intr
) ==
4410 sc
->rx_ring_inuse
, ("RX ring inuse mismatch"));
4413 igb_set_intr_mask(sc
);
4414 for (i
= 0; i
< sc
->tx_ring_cnt
; ++i
) {
4415 txr
= &sc
->tx_rings
[i
];
4416 if (txr
->tx_intr_cpuid
< 0)
4417 txr
->tx_intr_cpuid
= 0;
4423 igb_free_intr(struct igb_softc
*sc
)
4425 if (sc
->intr_data
== NULL
)
4428 if (sc
->intr_type
!= PCI_INTR_TYPE_MSIX
) {
4429 struct igb_intr_data
*intr
= &sc
->intr_data
[0];
4431 KKASSERT(sc
->intr_cnt
== 1);
4432 if (intr
->intr_res
!= NULL
) {
4433 bus_release_resource(sc
->dev
, SYS_RES_IRQ
,
4434 intr
->intr_rid
, intr
->intr_res
);
4436 if (sc
->intr_type
== PCI_INTR_TYPE_MSI
)
4437 pci_release_msi(sc
->dev
);
4439 kfree(sc
->intr_data
, M_DEVBUF
);
4441 igb_free_msix(sc
, TRUE
);
4446 igb_teardown_intr(struct igb_softc
*sc
, int intr_cnt
)
4450 if (sc
->intr_data
== NULL
)
4453 for (i
= 0; i
< intr_cnt
; ++i
) {
4454 struct igb_intr_data
*intr
= &sc
->intr_data
[i
];
4456 bus_teardown_intr(sc
->dev
, intr
->intr_res
, intr
->intr_hand
);
4461 igb_alloc_msix(struct igb_softc
*sc
)
4463 int msix_enable
, msix_cnt
, msix_ring
, alloc_cnt
;
4465 int ring_cnt
, ring_cntmax
;
4466 struct igb_intr_data
*intr
;
4467 boolean_t setup
= FALSE
;
4470 * Don't enable MSI-X on 82575, see:
4471 * 82575 specification update errata #25
4473 if (sc
->hw
.mac
.type
== e1000_82575
)
4476 /* Don't enable MSI-X on VF */
4480 msix_enable
= device_getenv_int(sc
->dev
, "msix.enable",
4485 msix_cnt
= pci_msix_count(sc
->dev
);
4486 #ifdef IGB_MSIX_DEBUG
4487 msix_cnt
= device_getenv_int(sc
->dev
, "msix.count", msix_cnt
);
4489 if (msix_cnt
<= 1) {
4490 /* One MSI-X model does not make sense. */
4494 device_printf(sc
->dev
, "MSI-X count %d\n", msix_cnt
);
4495 msix_ring
= msix_cnt
- 1; /* -1 for status */
4498 * Configure # of RX/TX rings usable by MSI-X.
4500 igb_get_rxring_cnt(sc
, &ring_cnt
, &ring_cntmax
);
4501 if (ring_cntmax
> msix_ring
)
4502 ring_cntmax
= msix_ring
;
4503 sc
->rx_rmap_intr
= if_ringmap_alloc(sc
->dev
, ring_cnt
, ring_cntmax
);
4505 igb_get_txring_cnt(sc
, &ring_cnt
, &ring_cntmax
);
4506 if (ring_cntmax
> msix_ring
)
4507 ring_cntmax
= msix_ring
;
4508 sc
->tx_rmap_intr
= if_ringmap_alloc(sc
->dev
, ring_cnt
, ring_cntmax
);
4510 if_ringmap_match(sc
->dev
, sc
->rx_rmap_intr
, sc
->tx_rmap_intr
);
4511 sc
->rx_ring_msix
= if_ringmap_count(sc
->rx_rmap_intr
);
4512 KASSERT(sc
->rx_ring_msix
<= sc
->rx_ring_cnt
,
4513 ("total RX ring count %d, MSI-X RX ring count %d",
4514 sc
->rx_ring_cnt
, sc
->rx_ring_msix
));
4515 sc
->tx_ring_msix
= if_ringmap_count(sc
->tx_rmap_intr
);
4516 KASSERT(sc
->tx_ring_msix
<= sc
->tx_ring_cnt
,
4517 ("total TX ring count %d, MSI-X TX ring count %d",
4518 sc
->tx_ring_cnt
, sc
->tx_ring_msix
));
4521 * Aggregate TX/RX MSI-X
4523 ring_cntmax
= sc
->rx_ring_msix
;
4524 if (ring_cntmax
< sc
->tx_ring_msix
)
4525 ring_cntmax
= sc
->tx_ring_msix
;
4526 KASSERT(ring_cntmax
<= msix_ring
,
4527 ("invalid ring count max %d, MSI-X count for rings %d",
4528 ring_cntmax
, msix_ring
));
4530 alloc_cnt
= ring_cntmax
+ 1; /* +1 for status */
4532 device_printf(sc
->dev
, "MSI-X alloc %d, "
4533 "RX ring %d, TX ring %d\n", alloc_cnt
,
4534 sc
->rx_ring_msix
, sc
->tx_ring_msix
);
4537 sc
->msix_mem_rid
= PCIR_BAR(IGB_MSIX_BAR
);
4538 sc
->msix_mem_res
= bus_alloc_resource_any(sc
->dev
, SYS_RES_MEMORY
,
4539 &sc
->msix_mem_rid
, RF_ACTIVE
);
4540 if (sc
->msix_mem_res
== NULL
) {
4541 sc
->msix_mem_rid
= PCIR_BAR(IGB_MSIX_BAR_ALT
);
4542 sc
->msix_mem_res
= bus_alloc_resource_any(sc
->dev
, SYS_RES_MEMORY
,
4543 &sc
->msix_mem_rid
, RF_ACTIVE
);
4544 if (sc
->msix_mem_res
== NULL
) {
4545 device_printf(sc
->dev
, "Unable to map MSI-X table\n");
4550 sc
->intr_cnt
= alloc_cnt
;
4551 sc
->intr_data
= kmalloc(sizeof(struct igb_intr_data
) * sc
->intr_cnt
,
4552 M_DEVBUF
, M_WAITOK
| M_ZERO
);
4553 for (x
= 0; x
< sc
->intr_cnt
; ++x
) {
4554 intr
= &sc
->intr_data
[x
];
4555 intr
->intr_rid
= -1;
4556 intr
->intr_rate
= IGB_INTR_RATE
;
4560 for (i
= 0; i
< sc
->rx_ring_msix
; ++i
) {
4561 struct igb_rx_ring
*rxr
= &sc
->rx_rings
[i
];
4562 struct igb_tx_ring
*txr
= NULL
;
4565 KKASSERT(x
< sc
->intr_cnt
);
4566 rxr
->rx_intr_vec
= x
;
4567 rxr
->rx_intr_mask
= 1 << rxr
->rx_intr_vec
;
4569 cpuid
= if_ringmap_cpumap(sc
->rx_rmap_intr
, i
);
4572 * Try finding TX ring to piggyback.
4574 for (j
= 0; j
< sc
->tx_ring_msix
; ++j
) {
4576 if_ringmap_cpumap(sc
->tx_rmap_intr
, j
)) {
4577 txr
= &sc
->tx_rings
[j
];
4578 KKASSERT(txr
->tx_intr_cpuid
< 0);
4584 intr
= &sc
->intr_data
[x
++];
4585 intr
->intr_serialize
= &rxr
->rx_serialize
;
4586 intr
->intr_cpuid
= cpuid
;
4587 KKASSERT(intr
->intr_cpuid
< netisr_ncpus
);
4588 intr
->intr_funcarg
= rxr
;
4590 intr
->intr_func
= igb_msix_rxtx
;
4591 intr
->intr_use
= IGB_INTR_USE_RXTX
;
4592 ksnprintf(intr
->intr_desc0
, sizeof(intr
->intr_desc0
),
4593 "%s rx%dtx%d", device_get_nameunit(sc
->dev
),
4596 txr
->tx_intr_vec
= rxr
->rx_intr_vec
;
4597 txr
->tx_intr_mask
= rxr
->rx_intr_mask
;
4598 txr
->tx_intr_cpuid
= intr
->intr_cpuid
;
4600 intr
->intr_func
= igb_msix_rx
;
4601 intr
->intr_rate
= IGB_MSIX_RX_RATE
;
4602 intr
->intr_use
= IGB_INTR_USE_RX
;
4604 ksnprintf(intr
->intr_desc0
, sizeof(intr
->intr_desc0
),
4605 "%s rx%d", device_get_nameunit(sc
->dev
), i
);
4607 intr
->intr_desc
= intr
->intr_desc0
;
4610 for (i
= 0; i
< sc
->tx_ring_msix
; ++i
) {
4611 struct igb_tx_ring
*txr
= &sc
->tx_rings
[i
];
4613 if (txr
->tx_intr_cpuid
>= 0) {
4614 /* Piggybacked by RX ring. */
4618 KKASSERT(x
< sc
->intr_cnt
);
4619 txr
->tx_intr_vec
= x
;
4620 txr
->tx_intr_mask
= 1 << txr
->tx_intr_vec
;
4622 intr
= &sc
->intr_data
[x
++];
4623 intr
->intr_serialize
= &txr
->tx_serialize
;
4624 intr
->intr_func
= igb_msix_tx
;
4625 intr
->intr_funcarg
= txr
;
4626 intr
->intr_rate
= IGB_MSIX_TX_RATE
;
4627 intr
->intr_use
= IGB_INTR_USE_TX
;
4629 intr
->intr_cpuid
= if_ringmap_cpumap(sc
->tx_rmap_intr
, i
);
4630 KKASSERT(intr
->intr_cpuid
< netisr_ncpus
);
4631 txr
->tx_intr_cpuid
= intr
->intr_cpuid
;
4633 ksnprintf(intr
->intr_desc0
, sizeof(intr
->intr_desc0
), "%s tx%d",
4634 device_get_nameunit(sc
->dev
), i
);
4635 intr
->intr_desc
= intr
->intr_desc0
;
4641 KKASSERT(x
< sc
->intr_cnt
);
4642 sc
->sts_msix_vec
= x
;
4643 sc
->sts_intr_mask
= 1 << sc
->sts_msix_vec
;
4645 intr
= &sc
->intr_data
[x
++];
4646 intr
->intr_serialize
= &sc
->main_serialize
;
4647 intr
->intr_func
= igb_msix_status
;
4648 intr
->intr_funcarg
= sc
;
4649 intr
->intr_cpuid
= 0;
4650 intr
->intr_use
= IGB_INTR_USE_STATUS
;
4652 ksnprintf(intr
->intr_desc0
, sizeof(intr
->intr_desc0
), "%s sts",
4653 device_get_nameunit(sc
->dev
));
4654 intr
->intr_desc
= intr
->intr_desc0
;
4656 KKASSERT(x
== sc
->intr_cnt
);
4658 error
= pci_setup_msix(sc
->dev
);
4660 device_printf(sc
->dev
, "Setup MSI-X failed\n");
4665 for (i
= 0; i
< sc
->intr_cnt
; ++i
) {
4666 intr
= &sc
->intr_data
[i
];
4668 error
= pci_alloc_msix_vector(sc
->dev
, i
, &intr
->intr_rid
,
4671 device_printf(sc
->dev
,
4672 "Unable to allocate MSI-X %d on cpu%d\n", i
,
4677 intr
->intr_res
= bus_alloc_resource_any(sc
->dev
, SYS_RES_IRQ
,
4678 &intr
->intr_rid
, RF_ACTIVE
);
4679 if (intr
->intr_res
== NULL
) {
4680 device_printf(sc
->dev
,
4681 "Unable to allocate MSI-X %d resource\n", i
);
4687 pci_enable_msix(sc
->dev
);
4688 sc
->intr_type
= PCI_INTR_TYPE_MSIX
;
4691 igb_free_msix(sc
, setup
);
4695 igb_free_msix(struct igb_softc
*sc
, boolean_t setup
)
4699 KKASSERT(sc
->intr_cnt
> 1);
4701 for (i
= 0; i
< sc
->intr_cnt
; ++i
) {
4702 struct igb_intr_data
*intr
= &sc
->intr_data
[i
];
4704 if (intr
->intr_res
!= NULL
) {
4705 bus_release_resource(sc
->dev
, SYS_RES_IRQ
,
4706 intr
->intr_rid
, intr
->intr_res
);
4708 if (intr
->intr_rid
>= 0)
4709 pci_release_msix_vector(sc
->dev
, intr
->intr_rid
);
4712 pci_teardown_msix(sc
->dev
);
4715 kfree(sc
->intr_data
, M_DEVBUF
);
4716 sc
->intr_data
= NULL
;
4720 igb_msix_rx(void *arg
)
4722 struct igb_rx_ring
*rxr
= arg
;
4724 ASSERT_SERIALIZED(&rxr
->rx_serialize
);
4727 E1000_WRITE_REG(&rxr
->sc
->hw
, E1000_EIMS
, rxr
->rx_intr_mask
);
4731 igb_msix_tx(void *arg
)
4733 struct igb_tx_ring
*txr
= arg
;
4735 ASSERT_SERIALIZED(&txr
->tx_serialize
);
4737 igb_tx_intr(txr
, *(txr
->tx_hdr
));
4738 E1000_WRITE_REG(&txr
->sc
->hw
, E1000_EIMS
, txr
->tx_intr_mask
);
4742 igb_msix_status(void *arg
)
4744 struct igb_softc
*sc
= arg
;
4747 ASSERT_SERIALIZED(&sc
->main_serialize
);
4749 icr
= E1000_READ_REG(&sc
->hw
, E1000_ICR
);
4750 if (icr
& E1000_ICR_LSC
) {
4751 sc
->hw
.mac
.get_link_status
= 1;
4752 igb_update_link_status(sc
);
4755 E1000_WRITE_REG(&sc
->hw
, E1000_EIMS
, sc
->sts_intr_mask
);
4759 igb_set_ring_inuse(struct igb_softc
*sc
, boolean_t polling
)
4761 sc
->rx_ring_inuse
= igb_get_rxring_inuse(sc
, polling
);
4762 sc
->tx_ring_inuse
= igb_get_txring_inuse(sc
, polling
);
4764 if_printf(&sc
->arpcom
.ac_if
, "RX rings %d/%d, TX rings %d/%d\n",
4765 sc
->rx_ring_inuse
, sc
->rx_ring_cnt
,
4766 sc
->tx_ring_inuse
, sc
->tx_ring_cnt
);
4771 igb_get_rxring_inuse(const struct igb_softc
*sc
, boolean_t polling
)
4773 if (!IGB_ENABLE_HWRSS(sc
))
4777 return sc
->rx_ring_cnt
;
4778 else if (sc
->intr_type
!= PCI_INTR_TYPE_MSIX
)
4779 return IGB_MIN_RING_RSS
;
4781 return sc
->rx_ring_msix
;
4785 igb_get_txring_inuse(const struct igb_softc
*sc
, boolean_t polling
)
4787 if (!IGB_ENABLE_HWTSS(sc
))
4791 return sc
->tx_ring_cnt
;
4792 else if (sc
->intr_type
!= PCI_INTR_TYPE_MSIX
)
4793 return IGB_MIN_RING
;
4795 return sc
->tx_ring_msix
;
4799 igb_tso_pullup(struct igb_tx_ring
*txr
, struct mbuf
**mp
)
4801 int hoff
, iphlen
, thoff
;
4805 KASSERT(M_WRITABLE(m
), ("TSO mbuf not writable"));
4807 iphlen
= m
->m_pkthdr
.csum_iphlen
;
4808 thoff
= m
->m_pkthdr
.csum_thlen
;
4809 hoff
= m
->m_pkthdr
.csum_lhlen
;
4811 KASSERT(iphlen
> 0, ("invalid ip hlen"));
4812 KASSERT(thoff
> 0, ("invalid tcp hlen"));
4813 KASSERT(hoff
> 0, ("invalid ether hlen"));
4815 if (__predict_false(m
->m_len
< hoff
+ iphlen
+ thoff
)) {
4816 m
= m_pullup(m
, hoff
+ iphlen
+ thoff
);
4823 if (txr
->tx_flags
& IGB_TXFLAG_TSO_IPLEN0
) {
4826 ip
= mtodoff(m
, struct ip
*, hoff
);
4834 igb_tso_ctx(struct igb_tx_ring
*txr
, struct mbuf
*m
, uint32_t *hlen
)
4836 struct e1000_adv_tx_context_desc
*TXD
;
4837 uint32_t vlan_macip_lens
, type_tucmd_mlhl
, mss_l4len_idx
;
4838 int hoff
, ctxd
, iphlen
, thoff
;
4840 iphlen
= m
->m_pkthdr
.csum_iphlen
;
4841 thoff
= m
->m_pkthdr
.csum_thlen
;
4842 hoff
= m
->m_pkthdr
.csum_lhlen
;
4844 vlan_macip_lens
= type_tucmd_mlhl
= mss_l4len_idx
= 0;
4846 ctxd
= txr
->next_avail_desc
;
4847 TXD
= (struct e1000_adv_tx_context_desc
*)&txr
->tx_base
[ctxd
];
4849 if (m
->m_flags
& M_VLANTAG
) {
4852 vlantag
= htole16(m
->m_pkthdr
.ether_vlantag
);
4853 vlan_macip_lens
|= (vlantag
<< E1000_ADVTXD_VLAN_SHIFT
);
4856 vlan_macip_lens
|= (hoff
<< E1000_ADVTXD_MACLEN_SHIFT
);
4857 vlan_macip_lens
|= iphlen
;
4859 type_tucmd_mlhl
|= E1000_ADVTXD_DCMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
;
4860 type_tucmd_mlhl
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
4861 type_tucmd_mlhl
|= E1000_ADVTXD_TUCMD_IPV4
;
4863 mss_l4len_idx
|= (m
->m_pkthdr
.tso_segsz
<< E1000_ADVTXD_MSS_SHIFT
);
4864 mss_l4len_idx
|= (thoff
<< E1000_ADVTXD_L4LEN_SHIFT
);
4867 * 82575 needs the TX context index added; the queue
4868 * index is used as TX context index here.
4870 if (txr
->sc
->hw
.mac
.type
== e1000_82575
)
4871 mss_l4len_idx
|= txr
->me
<< 4;
4873 TXD
->vlan_macip_lens
= htole32(vlan_macip_lens
);
4874 TXD
->type_tucmd_mlhl
= htole32(type_tucmd_mlhl
);
4875 TXD
->seqnum_seed
= htole32(0);
4876 TXD
->mss_l4len_idx
= htole32(mss_l4len_idx
);
4878 /* We've consumed the first desc, adjust counters */
4879 if (++ctxd
== txr
->num_tx_desc
)
4881 txr
->next_avail_desc
= ctxd
;
4884 *hlen
= hoff
+ iphlen
+ thoff
;
4888 igb_setup_serialize(struct igb_softc
*sc
)
4892 /* Main + RX + TX */
4893 sc
->serialize_cnt
= 1 + sc
->rx_ring_cnt
+ sc
->tx_ring_cnt
;
4895 kmalloc(sc
->serialize_cnt
* sizeof(struct lwkt_serialize
*),
4896 M_DEVBUF
, M_WAITOK
| M_ZERO
);
4901 * NOTE: Order is critical
4904 KKASSERT(i
< sc
->serialize_cnt
);
4905 sc
->serializes
[i
++] = &sc
->main_serialize
;
4907 for (j
= 0; j
< sc
->rx_ring_cnt
; ++j
) {
4908 KKASSERT(i
< sc
->serialize_cnt
);
4909 sc
->serializes
[i
++] = &sc
->rx_rings
[j
].rx_serialize
;
4912 for (j
= 0; j
< sc
->tx_ring_cnt
; ++j
) {
4913 KKASSERT(i
< sc
->serialize_cnt
);
4914 sc
->serializes
[i
++] = &sc
->tx_rings
[j
].tx_serialize
;
4917 KKASSERT(i
== sc
->serialize_cnt
);
4921 igb_msix_rxtx(void *arg
)
4923 struct igb_rx_ring
*rxr
= arg
;
4924 struct igb_tx_ring
*txr
;
4927 ASSERT_SERIALIZED(&rxr
->rx_serialize
);
4933 * Since next_to_clean is only changed by igb_txeof(),
4934 * which is called only in interrupt handler, the
4935 * check w/o holding tx serializer is MPSAFE.
4938 hdr
= *(txr
->tx_hdr
);
4939 if (hdr
!= txr
->next_to_clean
) {
4940 lwkt_serialize_enter(&txr
->tx_serialize
);
4941 igb_tx_intr(txr
, hdr
);
4942 lwkt_serialize_exit(&txr
->tx_serialize
);
4945 E1000_WRITE_REG(&rxr
->sc
->hw
, E1000_EIMS
, rxr
->rx_intr_mask
);
4949 igb_set_timer_cpuid(struct igb_softc
*sc
, boolean_t polling
)
4951 if (polling
|| sc
->intr_type
== PCI_INTR_TYPE_MSIX
)
4952 sc
->timer_cpuid
= 0; /* XXX fixed */
4954 sc
->timer_cpuid
= rman_get_cpuid(sc
->intr_data
[0].intr_res
);
4958 igb_init_dmac(struct igb_softc
*sc
, uint32_t pba
)
4960 struct e1000_hw
*hw
= &sc
->hw
;
4963 if (hw
->mac
.type
== e1000_i211
)
4966 if (hw
->mac
.type
> e1000_82580
) {
4970 if (sc
->dma_coalesce
== 0) { /* Disabling it */
4971 reg
= ~E1000_DMACR_DMAC_EN
;
4972 E1000_WRITE_REG(hw
, E1000_DMACR
, reg
);
4975 if_printf(&sc
->arpcom
.ac_if
,
4976 "DMA Coalescing enabled\n");
4979 /* Set starting threshold */
4980 E1000_WRITE_REG(hw
, E1000_DMCTXTH
, 0);
4982 hwm
= 64 * pba
- sc
->max_frame_size
/ 16;
4983 if (hwm
< 64 * (pba
- 6))
4984 hwm
= 64 * (pba
- 6);
4985 reg
= E1000_READ_REG(hw
, E1000_FCRTC
);
4986 reg
&= ~E1000_FCRTC_RTH_COAL_MASK
;
4987 reg
|= ((hwm
<< E1000_FCRTC_RTH_COAL_SHIFT
)
4988 & E1000_FCRTC_RTH_COAL_MASK
);
4989 E1000_WRITE_REG(hw
, E1000_FCRTC
, reg
);
4991 dmac
= pba
- sc
->max_frame_size
/ 512;
4992 if (dmac
< pba
- 10)
4994 reg
= E1000_READ_REG(hw
, E1000_DMACR
);
4995 reg
&= ~E1000_DMACR_DMACTHR_MASK
;
4996 reg
|= ((dmac
<< E1000_DMACR_DMACTHR_SHIFT
)
4997 & E1000_DMACR_DMACTHR_MASK
);
4999 /* transition to L0x or L1 if available..*/
5000 reg
|= (E1000_DMACR_DMAC_EN
| E1000_DMACR_DMAC_LX_MASK
);
5003 * Check if status is 2.5Gb backplane connection
5004 * before configuration of watchdog timer, which
5005 * is in msec values in 12.8usec intervals watchdog
5006 * timer = msec values in 32usec intervals for non
5009 if (hw
->mac
.type
== e1000_i354
) {
5010 int status
= E1000_READ_REG(hw
, E1000_STATUS
);
5012 if ((status
& E1000_STATUS_2P5_SKU
) &&
5013 !(status
& E1000_STATUS_2P5_SKU_OVER
))
5014 reg
|= ((sc
->dma_coalesce
* 5) >> 6);
5016 reg
|= (sc
->dma_coalesce
>> 5);
5018 reg
|= (sc
->dma_coalesce
>> 5);
5021 E1000_WRITE_REG(hw
, E1000_DMACR
, reg
);
5023 E1000_WRITE_REG(hw
, E1000_DMCRTRH
, 0);
5025 /* Set the interval before transition */
5026 reg
= E1000_READ_REG(hw
, E1000_DMCTLX
);
5027 if (hw
->mac
.type
== e1000_i350
)
5028 reg
|= IGB_DMCTLX_DCFLUSH_DIS
;
5030 * In 2.5Gb connection, TTLX unit is 0.4 usec, which
5031 * is 0x4*2 = 0xA. But delay is still 4 usec.
5033 if (hw
->mac
.type
== e1000_i354
) {
5034 int status
= E1000_READ_REG(hw
, E1000_STATUS
);
5036 if ((status
& E1000_STATUS_2P5_SKU
) &&
5037 !(status
& E1000_STATUS_2P5_SKU_OVER
))
5044 E1000_WRITE_REG(hw
, E1000_DMCTLX
, reg
);
5046 /* Free space in tx packet buffer to wake from DMA coal */
5047 E1000_WRITE_REG(hw
, E1000_DMCTXTH
,
5048 (IGB_TXPBSIZE
- (2 * sc
->max_frame_size
)) >> 6);
5050 /* Make low power state decision controlled by DMA coal */
5051 reg
= E1000_READ_REG(hw
, E1000_PCIEMISC
);
5052 reg
&= ~E1000_PCIEMISC_LX_DECISION
;
5053 E1000_WRITE_REG(hw
, E1000_PCIEMISC
, reg
);
5054 } else if (hw
->mac
.type
== e1000_82580
) {
5055 reg
= E1000_READ_REG(hw
, E1000_PCIEMISC
);
5056 E1000_WRITE_REG(hw
, E1000_PCIEMISC
,
5057 reg
& ~E1000_PCIEMISC_LX_DECISION
);
5058 E1000_WRITE_REG(hw
, E1000_DMACR
, 0);
5063 igb_reg_dump(struct igb_softc
*sc
)
5065 device_t dev
= sc
->dev
;
5068 #define DUMPREG(regno) \
5069 kprintf(" %13s=%08x", #regno + 6, E1000_READ_REG(&sc->hw, regno));\
5075 device_printf(dev, "REGISTER DUMP\n");
5076 DUMPREG(E1000_CTRL
);
5077 DUMPREG(E1000_STATUS
);
5078 DUMPREG(E1000_EECD
);
5079 DUMPREG(E1000_EERD
);
5080 DUMPREG(E1000_CTRL_EXT
);
5082 DUMPREG(E1000_MDIC
);
5083 DUMPREG(E1000_SCTL
);
5084 DUMPREG(E1000_FCAL
);
5085 DUMPREG(E1000_FCAH
);
5087 DUMPREG(E1000_CONNSW
);
5092 DUMPREG(E1000_IVAR
);
5093 DUMPREG(E1000_SVCR
);
5095 DUMPREG(E1000_LPIC
);
5096 DUMPREG(E1000_RCTL
);
5097 DUMPREG(E1000_FCTTV
);
5098 DUMPREG(E1000_TXCW
);
5099 DUMPREG(E1000_RXCW
);
5100 DUMPREG(E1000_EIMS
);
5101 DUMPREG(E1000_EIAC
);
5102 DUMPREG(E1000_EIAM
);
5103 DUMPREG(E1000_GPIE
);
5104 DUMPREG(E1000_IVAR0
);
5105 DUMPREG(E1000_IVAR_MISC
);
5106 DUMPREG(E1000_TCTL
);
5107 DUMPREG(E1000_TCTL_EXT
);
5108 DUMPREG(E1000_TIPG
);
5111 DUMPREG(E1000_LEDCTL
);
5112 DUMPREG(E1000_EXTCNF_CTRL
);
5113 DUMPREG(E1000_EXTCNF_SIZE
);
5114 DUMPREG(E1000_PHY_CTRL
);
5117 DUMPREG(E1000_PBECCSTS
);
5118 DUMPREG(E1000_EEMNGCTL
);
5119 DUMPREG(E1000_EEARBC
);
5120 DUMPREG(E1000_FLASHT
);
5121 DUMPREG(E1000_EEARBC_I210
);
5122 DUMPREG(E1000_EEWR
);
5123 DUMPREG(E1000_FLSWCTL
);
5124 DUMPREG(E1000_FLSWDATA
);
5125 DUMPREG(E1000_FLSWCNT
);
5126 DUMPREG(E1000_FLOP
);
5127 DUMPREG(E1000_I2CCMD
);
5128 DUMPREG(E1000_I2CPARAMS
);
5129 DUMPREG(E1000_WDSTP
);
5130 DUMPREG(E1000_SWDSTS
);
5131 DUMPREG(E1000_FRTIMER
);
5132 DUMPREG(E1000_TCPTIMER
);
5133 DUMPREG(E1000_VPDDIAG
);
5134 DUMPREG(E1000_IMS_V2
);
5135 DUMPREG(E1000_IAM_V2
);
5137 DUMPREG(E1000_FCRTL
);
5138 DUMPREG(E1000_FCRTH
);
5139 DUMPREG(E1000_PSRCTL
);
5140 DUMPREG(E1000_RDFH
);
5141 DUMPREG(E1000_RDFT
);
5142 DUMPREG(E1000_RDFHS
);
5143 DUMPREG(E1000_RDFTS
);
5144 DUMPREG(E1000_RDFPC
);
5145 DUMPREG(E1000_PBRTH
);
5146 DUMPREG(E1000_FCRTV
);
5147 DUMPREG(E1000_RDPUMB
);
5148 DUMPREG(E1000_RDPUAD
);
5149 DUMPREG(E1000_RDPUWD
);
5150 DUMPREG(E1000_RDPURD
);
5151 DUMPREG(E1000_RDPUCTL
);
5152 DUMPREG(E1000_PBDIAG
);
5153 DUMPREG(E1000_RXPBS
);
5154 DUMPREG(E1000_IRPBS
);
5155 DUMPREG(E1000_PBRWAC
);
5156 DUMPREG(E1000_RDTR
);
5157 DUMPREG(E1000_RADV
);
5158 DUMPREG(E1000_SRWR
);
5159 DUMPREG(E1000_I210_FLMNGCTL
);
5160 DUMPREG(E1000_I210_FLMNGDATA
);
5161 DUMPREG(E1000_I210_FLMNGCNT
);
5162 DUMPREG(E1000_I210_FLSWCTL
);
5163 DUMPREG(E1000_I210_FLSWDATA
);
5164 DUMPREG(E1000_I210_FLSWCNT
);
5165 DUMPREG(E1000_I210_FLA
);
5166 DUMPREG(E1000_INVM_SIZE
);
5167 DUMPREG(E1000_I210_TQAVCTRL
);
5168 DUMPREG(E1000_RSRPD
);
5169 DUMPREG(E1000_RAID
);
5170 DUMPREG(E1000_TXDMAC
);
5171 DUMPREG(E1000_KABGTXD
);
5172 DUMPREG(E1000_PBSLAC
);
5173 DUMPREG(E1000_TXPBS
);
5174 DUMPREG(E1000_ITPBS
);
5175 DUMPREG(E1000_TDFH
);
5176 DUMPREG(E1000_TDFT
);
5177 DUMPREG(E1000_TDFHS
);
5178 DUMPREG(E1000_TDFTS
);
5179 DUMPREG(E1000_TDFPC
);
5180 DUMPREG(E1000_TDPUMB
);
5181 DUMPREG(E1000_TDPUAD
);
5182 DUMPREG(E1000_TDPUWD
);
5183 DUMPREG(E1000_TDPURD
);
5184 DUMPREG(E1000_TDPUCTL
);
5185 DUMPREG(E1000_DTXCTL
);
5186 DUMPREG(E1000_DTXTCPFLGL
);
5187 DUMPREG(E1000_DTXTCPFLGH
);
5188 DUMPREG(E1000_DTXMXSZRQ
);
5189 DUMPREG(E1000_TIDV
);
5190 DUMPREG(E1000_TADV
);
5191 DUMPREG(E1000_TSPMT
);
5192 DUMPREG(E1000_VFGPRC
);
5193 DUMPREG(E1000_VFGORC
);
5194 DUMPREG(E1000_VFMPRC
);
5195 DUMPREG(E1000_VFGPTC
);
5196 DUMPREG(E1000_VFGOTC
);
5197 DUMPREG(E1000_VFGOTLBC
);
5198 DUMPREG(E1000_VFGPTLBC
);
5199 DUMPREG(E1000_VFGORLBC
);
5200 DUMPREG(E1000_VFGPRLBC
);
5201 DUMPREG(E1000_LSECTXCAP
);
5202 DUMPREG(E1000_LSECRXCAP
);
5203 DUMPREG(E1000_LSECTXCTRL
);
5204 DUMPREG(E1000_LSECRXCTRL
);
5205 DUMPREG(E1000_LSECTXSCL
);
5206 DUMPREG(E1000_LSECTXSCH
);
5207 DUMPREG(E1000_LSECTXSA
);
5208 DUMPREG(E1000_LSECTXPN0
);
5209 DUMPREG(E1000_LSECTXPN1
);
5210 DUMPREG(E1000_LSECRXSCL
);
5211 DUMPREG(E1000_LSECRXSCH
);
5212 DUMPREG(E1000_IPSCTRL
);
5213 DUMPREG(E1000_IPSRXCMD
);
5214 DUMPREG(E1000_IPSRXIDX
);
5215 DUMPREG(E1000_IPSRXSALT
);
5216 DUMPREG(E1000_IPSRXSPI
);
5217 DUMPREG(E1000_IPSTXSALT
);
5218 DUMPREG(E1000_IPSTXIDX
);
5219 DUMPREG(E1000_PCS_CFG0
);
5220 DUMPREG(E1000_PCS_LCTL
);
5221 DUMPREG(E1000_PCS_LSTAT
);
5222 DUMPREG(E1000_PCS_ANADV
);
5223 DUMPREG(E1000_PCS_LPAB
);
5224 DUMPREG(E1000_PCS_NPTX
);
5225 DUMPREG(E1000_PCS_LPABNP
);
5226 DUMPREG(E1000_RXCSUM
);
5227 DUMPREG(E1000_RLPML
);
5228 DUMPREG(E1000_RFCTL
);
5232 DUMPREG(E1000_VFTA
);
5233 DUMPREG(E1000_VT_CTL
);
5234 DUMPREG(E1000_CIAA
);
5235 DUMPREG(E1000_CIAD
);
5236 DUMPREG(E1000_VFQA0
);
5237 DUMPREG(E1000_VFQA1
);
5239 DUMPREG(E1000_WUFC
);
5241 DUMPREG(E1000_MANC
);
5242 DUMPREG(E1000_IPAV
);
5243 DUMPREG(E1000_IP4AT
);
5244 DUMPREG(E1000_IP6AT
);
5245 DUMPREG(E1000_WUPL
);
5246 DUMPREG(E1000_WUPM
);
5247 DUMPREG(E1000_PBACL
);
5248 DUMPREG(E1000_FFLT
);
5249 DUMPREG(E1000_HOST_IF
);
5250 DUMPREG(E1000_HIBBA
);
5251 DUMPREG(E1000_KMRNCTRLSTA
);
5252 DUMPREG(E1000_MANC2H
);
5253 DUMPREG(E1000_CCMCTL
);
5254 DUMPREG(E1000_GIOCTL
);
5255 DUMPREG(E1000_SCCTL
);
5257 #define E1000_WCS 0x558C
5259 #define E1000_GCR_EXT 0x586C
5260 DUMPREG(E1000_GCR_EXT
);
5262 DUMPREG(E1000_GCR2
);
5263 DUMPREG(E1000_FACTPS
);
5264 DUMPREG(E1000_DCA_ID
);
5265 DUMPREG(E1000_DCA_CTRL
);
5266 DUMPREG(E1000_UFUSE
);
5267 DUMPREG(E1000_FFLT_DBG
);
5268 DUMPREG(E1000_HICR
);
5269 DUMPREG(E1000_FWSTS
);
5270 DUMPREG(E1000_CPUVEC
);
5271 DUMPREG(E1000_MRQC
);
5272 DUMPREG(E1000_SWPBS
);
5273 DUMPREG(E1000_MBVFICR
);
5274 DUMPREG(E1000_MBVFIMR
);
5275 DUMPREG(E1000_VFLRE
);
5276 DUMPREG(E1000_VFRE
);
5277 DUMPREG(E1000_VFTE
);
5279 DUMPREG(E1000_DTXSWC
);
5280 DUMPREG(E1000_WVBR
);
5281 DUMPREG(E1000_RPLOLR
);
5283 DUMPREG(E1000_IOVTCL
);
5284 DUMPREG(E1000_VMRCTL
);
5285 DUMPREG(E1000_VMRVLAN
);
5286 DUMPREG(E1000_VMRVM
);
5287 DUMPREG(E1000_LVMMC
);
5288 DUMPREG(E1000_TXSWC
);
5289 DUMPREG(E1000_SCCRL
);
5290 DUMPREG(E1000_BSCTRH
);
5291 DUMPREG(E1000_MSCTRH
);
5292 DUMPREG(E1000_RXSTMPL
);
5293 DUMPREG(E1000_RXSTMPH
);
5294 DUMPREG(E1000_RXSATRL
);
5295 DUMPREG(E1000_RXSATRH
);
5296 DUMPREG(E1000_TXSTMPL
);
5297 DUMPREG(E1000_TXSTMPH
);
5298 DUMPREG(E1000_TIMINCA
);
5299 DUMPREG(E1000_TIMADJL
);
5300 DUMPREG(E1000_TIMADJH
);
5301 DUMPREG(E1000_TSAUXC
);
5302 DUMPREG(E1000_SYSSTMPL
);
5303 DUMPREG(E1000_SYSSTMPH
);
5304 DUMPREG(E1000_PLTSTMPL
);
5305 DUMPREG(E1000_PLTSTMPH
);
5306 DUMPREG(E1000_RXMTRL
);
5307 DUMPREG(E1000_RXUDP
);
5308 DUMPREG(E1000_SYSTIMR
);
5309 DUMPREG(E1000_TSICR
);
5310 DUMPREG(E1000_TSIM
);
5311 DUMPREG(E1000_DMACR
);
5312 DUMPREG(E1000_DMCTXTH
);
5313 DUMPREG(E1000_DMCTLX
);
5314 DUMPREG(E1000_DMCRTRH
);
5315 DUMPREG(E1000_DMCCNT
);
5316 DUMPREG(E1000_FCRTC
);
5317 DUMPREG(E1000_PCIEMISC
);
5318 DUMPREG(E1000_PCIEERRSTS
);
5319 DUMPREG(E1000_IPCNFG
);
5320 DUMPREG(E1000_LTRC
);
5321 DUMPREG(E1000_EEER
);
5322 DUMPREG(E1000_EEE_SU
);
5323 DUMPREG(E1000_TLPIC
);
5324 DUMPREG(E1000_RLPIC
);
5331 igb_sysctl_reg_dump(SYSCTL_HANDLER_ARGS
)
5333 struct igb_softc
*sc
= (void *)arg1
;
5334 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
5335 int error
, dump
= 0;
5337 error
= sysctl_handle_int(oidp
, &dump
, 0, req
);
5338 if (error
|| req
->newptr
== NULL
)
5343 ifnet_serialize_all(ifp
);
5345 ifnet_deserialize_all(ifp
);