3 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
5 * Copyright (c) 2001-2006, Intel Corporation
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived from
20 * this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
37 * This code is derived from software contributed to The DragonFly Project
38 * by Matthew Dillon <dillon@backplane.com>
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * 3. Neither the name of The DragonFly Project nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific, prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
57 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
58 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
60 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
61 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
62 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
63 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
64 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * $DragonFly: src/sys/dev/netif/em/if_em.c,v 1.70 2008/05/02 07:40:32 sephe Exp $
71 * SERIALIZATION API RULES:
73 * - If the driver uses the same serializer for the interrupt as for the
74 * ifnet, most of the serialization will be done automatically for the
77 * - ifmedia entry points will be serialized by the ifmedia code using the
80 * - if_* entry points except for if_input will be serialized by the IF
81 * and protocol layers.
83 * - The device driver must be sure to serialize access from timeout code
84 * installed by the device driver.
86 * - The device driver typically holds the serializer at the time it wishes
87 * to call if_input. If so, it should pass the serializer to if_input and
88 * note that the serializer might be dropped temporarily by if_input
89 * (e.g. in case it has to bridge the packet to another interface).
91 * NOTE! Since callers into the device driver hold the ifnet serializer,
92 * the device driver may be holding a serializer at the time it calls
93 * if_input even if it is not serializer-aware.
96 #include "opt_polling.h"
98 #include "opt_serializer.h"
99 #include "opt_ethernet.h"
101 #include <sys/param.h>
103 #include <sys/endian.h>
104 #include <sys/kernel.h>
106 #include <sys/malloc.h>
107 #include <sys/mbuf.h>
108 #include <sys/module.h>
109 #include <sys/rman.h>
110 #include <sys/serialize.h>
111 #include <sys/socket.h>
112 #include <sys/sockio.h>
113 #include <sys/sysctl.h>
116 #include <net/ethernet.h>
118 #include <net/if_arp.h>
119 #include <net/if_dl.h>
120 #include <net/if_media.h>
121 #include <net/if_types.h>
122 #include <net/ifq_var.h>
123 #include <net/vlan/if_vlan_var.h>
124 #include <net/vlan/if_vlan_ether.h>
127 #include <netinet/in.h>
128 #include <netinet/in_systm.h>
129 #include <netinet/in_var.h>
130 #include <netinet/ip.h>
131 #include <netinet/tcp.h>
132 #include <netinet/udp.h>
135 #include <dev/netif/em/if_em_hw.h>
136 #include <dev/netif/em/if_em.h>
138 #define EM_X60_WORKAROUND
140 /*********************************************************************
141 * Set this to one to display debug statistics
142 *********************************************************************/
143 int em_display_debug_stats
= 0;
145 /*********************************************************************
147 *********************************************************************/
149 char em_driver_version
[] = "6.2.9";
152 /*********************************************************************
153 * PCI Device ID Table
155 * Used by probe to select devices to load on
156 * Last field stores an index into em_strings
157 * Last entry must be all 0s
159 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
160 *********************************************************************/
162 static em_vendor_info_t em_vendor_info_array
[] =
164 /* Intel(R) PRO/1000 Network Connection */
165 { 0x8086, E1000_DEV_ID_82540EM
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
166 { 0x8086, E1000_DEV_ID_82540EM_LOM
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
167 { 0x8086, E1000_DEV_ID_82540EP
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
168 { 0x8086, E1000_DEV_ID_82540EP_LOM
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
169 { 0x8086, E1000_DEV_ID_82540EP_LP
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
171 { 0x8086, E1000_DEV_ID_82541EI
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
172 { 0x8086, E1000_DEV_ID_82541ER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
173 { 0x8086, E1000_DEV_ID_82541ER_LOM
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
174 { 0x8086, E1000_DEV_ID_82541EI_MOBILE
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
175 { 0x8086, E1000_DEV_ID_82541GI
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
176 { 0x8086, E1000_DEV_ID_82541GI_LF
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
177 { 0x8086, E1000_DEV_ID_82541GI_MOBILE
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
179 { 0x8086, E1000_DEV_ID_82542
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
181 { 0x8086, E1000_DEV_ID_82543GC_FIBER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
182 { 0x8086, E1000_DEV_ID_82543GC_COPPER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
184 { 0x8086, E1000_DEV_ID_82544EI_COPPER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
185 { 0x8086, E1000_DEV_ID_82544EI_FIBER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
186 { 0x8086, E1000_DEV_ID_82544GC_COPPER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
187 { 0x8086, E1000_DEV_ID_82544GC_LOM
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
189 { 0x8086, E1000_DEV_ID_82545EM_COPPER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
190 { 0x8086, E1000_DEV_ID_82545EM_FIBER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
191 { 0x8086, E1000_DEV_ID_82545GM_COPPER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
192 { 0x8086, E1000_DEV_ID_82545GM_FIBER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
193 { 0x8086, E1000_DEV_ID_82545GM_SERDES
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
195 { 0x8086, E1000_DEV_ID_82546EB_COPPER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
196 { 0x8086, E1000_DEV_ID_82546EB_FIBER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
197 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
198 { 0x8086, E1000_DEV_ID_82546GB_COPPER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
199 { 0x8086, E1000_DEV_ID_82546GB_FIBER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
200 { 0x8086, E1000_DEV_ID_82546GB_SERDES
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
201 { 0x8086, E1000_DEV_ID_82546GB_PCIE
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
202 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
203 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3
,
204 PCI_ANY_ID
, PCI_ANY_ID
, 0},
206 { 0x8086, E1000_DEV_ID_82547EI
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
207 { 0x8086, E1000_DEV_ID_82547EI_MOBILE
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
208 { 0x8086, E1000_DEV_ID_82547GI
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
210 { 0x8086, E1000_DEV_ID_82571EB_COPPER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
211 { 0x8086, E1000_DEV_ID_82571EB_FIBER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
212 { 0x8086, E1000_DEV_ID_82571EB_SERDES
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
213 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER
,
214 PCI_ANY_ID
, PCI_ANY_ID
, 0},
215 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE
,
216 PCI_ANY_ID
, PCI_ANY_ID
, 0},
218 { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER
,
219 PCI_ANY_ID
, PCI_ANY_ID
, 0},
220 { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER
,
221 PCI_ANY_ID
, PCI_ANY_ID
, 0},
222 { 0x8086, E1000_DEV_ID_82572EI_COPPER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
223 { 0x8086, E1000_DEV_ID_82572EI_FIBER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
224 { 0x8086, E1000_DEV_ID_82572EI_SERDES
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
225 { 0x8086, E1000_DEV_ID_82572EI
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
227 { 0x8086, E1000_DEV_ID_82573E
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
228 { 0x8086, E1000_DEV_ID_82573E_IAMT
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
229 { 0x8086, E1000_DEV_ID_82573L
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
231 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT
,
232 PCI_ANY_ID
, PCI_ANY_ID
, 0},
233 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT
,
234 PCI_ANY_ID
, PCI_ANY_ID
, 0},
235 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT
,
236 PCI_ANY_ID
, PCI_ANY_ID
, 0},
237 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT
,
238 PCI_ANY_ID
, PCI_ANY_ID
, 0},
240 { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
241 { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
242 { 0x8086, E1000_DEV_ID_ICH8_IGP_C
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
243 { 0x8086, E1000_DEV_ID_ICH8_IFE
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
244 { 0x8086, E1000_DEV_ID_ICH8_IFE_GT
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
245 { 0x8086, E1000_DEV_ID_ICH8_IFE_G
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
246 { 0x8086, E1000_DEV_ID_ICH8_IGP_M
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
248 { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
249 { 0x8086, E1000_DEV_ID_ICH9_IGP_C
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
250 { 0x8086, E1000_DEV_ID_ICH9_IFE
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
251 { 0x8086, E1000_DEV_ID_ICH9_IFE_GT
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
252 { 0x8086, E1000_DEV_ID_ICH9_IFE_G
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
254 { 0x8086, E1000_DEV_ID_82575EB_COPPER
, PCI_ANY_ID
, PCI_ANY_ID
, 0},
255 { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES
,
256 PCI_ANY_ID
, PCI_ANY_ID
, 0},
257 { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER
,
258 PCI_ANY_ID
, PCI_ANY_ID
, 0},
259 { 0x8086, 0x101A, PCI_ANY_ID
, PCI_ANY_ID
, 0},
260 { 0x8086, 0x1014, PCI_ANY_ID
, PCI_ANY_ID
, 0},
261 /* required last entry */
265 /*********************************************************************
266 * Table of branding strings for all supported NICs.
267 *********************************************************************/
269 static const char *em_strings
[] = {
270 "Intel(R) PRO/1000 Network Connection"
273 /*********************************************************************
274 * Function prototypes
275 *********************************************************************/
276 static int em_probe(device_t
);
277 static int em_attach(device_t
);
278 static int em_detach(device_t
);
279 static int em_shutdown(device_t
);
280 static void em_intr(void *);
281 static int em_suspend(device_t
);
282 static int em_resume(device_t
);
283 static void em_start(struct ifnet
*);
284 static int em_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
285 static void em_watchdog(struct ifnet
*);
286 static void em_init(void *);
287 static void em_stop(void *);
288 static void em_media_status(struct ifnet
*, struct ifmediareq
*);
289 static int em_media_change(struct ifnet
*);
290 static void em_identify_hardware(struct adapter
*);
291 static int em_allocate_pci_resources(device_t
);
292 static void em_free_pci_resources(device_t
);
293 static void em_local_timer(void *);
294 static int em_hardware_init(struct adapter
*);
295 static void em_setup_interface(device_t
, struct adapter
*);
296 static int em_setup_transmit_structures(struct adapter
*);
297 static void em_initialize_transmit_unit(struct adapter
*);
298 static int em_setup_receive_structures(struct adapter
*);
299 static void em_initialize_receive_unit(struct adapter
*);
300 static void em_enable_intr(struct adapter
*);
301 static void em_disable_intr(struct adapter
*);
302 static void em_free_transmit_structures(struct adapter
*);
303 static void em_free_receive_structures(struct adapter
*);
304 static void em_update_stats_counters(struct adapter
*);
305 static void em_txeof(struct adapter
*);
306 static int em_allocate_receive_structures(struct adapter
*);
307 static void em_rxeof(struct adapter
*, int);
308 static void em_receive_checksum(struct adapter
*, struct em_rx_desc
*,
310 static void em_transmit_checksum_setup(struct adapter
*, struct mbuf
*,
311 uint32_t *, uint32_t *);
312 static void em_set_promisc(struct adapter
*);
313 static void em_disable_promisc(struct adapter
*);
314 static void em_set_multi(struct adapter
*);
315 static void em_print_hw_stats(struct adapter
*);
316 static void em_update_link_status(struct adapter
*);
317 static int em_get_buf(int i
, struct adapter
*, struct mbuf
*, int how
);
318 static void em_enable_vlans(struct adapter
*);
319 static void em_disable_vlans(struct adapter
*);
320 static int em_encap(struct adapter
*, struct mbuf
*);
321 static void em_smartspeed(struct adapter
*);
322 static int em_82547_fifo_workaround(struct adapter
*, int);
323 static void em_82547_update_fifo_head(struct adapter
*, int);
324 static int em_82547_tx_fifo_reset(struct adapter
*);
325 static void em_82547_move_tail(void *);
326 static void em_82547_move_tail_serialized(struct adapter
*);
327 static int em_dma_malloc(struct adapter
*, bus_size_t
,
328 struct em_dma_alloc
*);
329 static void em_dma_free(struct adapter
*, struct em_dma_alloc
*);
330 static void em_print_debug_info(struct adapter
*);
331 static int em_is_valid_ether_addr(uint8_t *);
332 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS
);
333 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS
);
334 static uint32_t em_fill_descriptors(bus_addr_t address
, uint32_t length
,
335 PDESC_ARRAY desc_array
);
336 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS
);
337 static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS
);
338 static void em_add_int_delay_sysctl(struct adapter
*, const char *,
340 struct em_int_delay_info
*, int, int);
342 /*********************************************************************
343 * FreeBSD Device Interface Entry Points
344 *********************************************************************/
346 static device_method_t em_methods
[] = {
347 /* Device interface */
348 DEVMETHOD(device_probe
, em_probe
),
349 DEVMETHOD(device_attach
, em_attach
),
350 DEVMETHOD(device_detach
, em_detach
),
351 DEVMETHOD(device_shutdown
, em_shutdown
),
352 DEVMETHOD(device_suspend
, em_suspend
),
353 DEVMETHOD(device_resume
, em_resume
),
357 static driver_t em_driver
= {
358 "em", em_methods
, sizeof(struct adapter
),
361 static devclass_t em_devclass
;
363 DECLARE_DUMMY_MODULE(if_em
);
364 DRIVER_MODULE(if_em
, pci
, em_driver
, em_devclass
, 0, 0);
366 /*********************************************************************
367 * Tunable default values.
368 *********************************************************************/
370 #define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
371 #define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
373 static int em_tx_int_delay_dflt
= E1000_TICKS_TO_USECS(EM_TIDV
);
374 static int em_rx_int_delay_dflt
= E1000_TICKS_TO_USECS(EM_RDTR
);
375 static int em_tx_abs_int_delay_dflt
= E1000_TICKS_TO_USECS(EM_TADV
);
376 static int em_rx_abs_int_delay_dflt
= E1000_TICKS_TO_USECS(EM_RADV
);
377 static int em_int_throttle_ceil
= 10000;
378 static int em_rxd
= EM_DEFAULT_RXD
;
379 static int em_txd
= EM_DEFAULT_TXD
;
380 static int em_smart_pwr_down
= FALSE
;
382 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt
);
383 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt
);
384 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt
);
385 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt
);
386 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil
);
387 TUNABLE_INT("hw.em.rxd", &em_rxd
);
388 TUNABLE_INT("hw.em.txd", &em_txd
);
389 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down
);
392 * Kernel trace for characterization of operations
394 #if !defined(KTR_IF_EM)
395 #define KTR_IF_EM KTR_ALL
397 KTR_INFO_MASTER(if_em
);
398 KTR_INFO(KTR_IF_EM
, if_em
, intr_beg
, 0, "intr begin", 0);
399 KTR_INFO(KTR_IF_EM
, if_em
, intr_end
, 1, "intr end", 0);
400 KTR_INFO(KTR_IF_EM
, if_em
, pkt_receive
, 4, "rx packet", 0);
401 KTR_INFO(KTR_IF_EM
, if_em
, pkt_txqueue
, 5, "tx packet", 0);
402 KTR_INFO(KTR_IF_EM
, if_em
, pkt_txclean
, 6, "tx clean", 0);
403 #define logif(name) KTR_LOG(if_em_ ## name)
405 /*********************************************************************
406 * Device identification routine
408 * em_probe determines if the driver should be loaded on
409 * adapter based on PCI vendor/device id of the adapter.
411 * return 0 on success, positive on failure
412 *********************************************************************/
415 em_probe(device_t dev
)
417 em_vendor_info_t
*ent
;
419 uint16_t pci_vendor_id
= 0;
420 uint16_t pci_device_id
= 0;
421 uint16_t pci_subvendor_id
= 0;
422 uint16_t pci_subdevice_id
= 0;
423 char adapter_name
[60];
425 INIT_DEBUGOUT("em_probe: begin");
427 pci_vendor_id
= pci_get_vendor(dev
);
428 if (pci_vendor_id
!= EM_VENDOR_ID
)
431 pci_device_id
= pci_get_device(dev
);
432 pci_subvendor_id
= pci_get_subvendor(dev
);
433 pci_subdevice_id
= pci_get_subdevice(dev
);
435 ent
= em_vendor_info_array
;
436 while (ent
->vendor_id
!= 0) {
437 if ((pci_vendor_id
== ent
->vendor_id
) &&
438 (pci_device_id
== ent
->device_id
) &&
440 ((pci_subvendor_id
== ent
->subvendor_id
) ||
441 (ent
->subvendor_id
== PCI_ANY_ID
)) &&
443 ((pci_subdevice_id
== ent
->subdevice_id
) ||
444 (ent
->subdevice_id
== PCI_ANY_ID
))) {
445 ksnprintf(adapter_name
, sizeof(adapter_name
),
446 "%s, Version - %s", em_strings
[ent
->index
],
448 device_set_desc_copy(dev
, adapter_name
);
449 device_set_async_attach(dev
, TRUE
);
458 /*********************************************************************
459 * Device initialization routine
461 * The attach entry point is called when the driver is being loaded.
462 * This routine identifies the type of hardware, allocates all resources
463 * and initializes the hardware.
465 * return 0 on success, positive on failure
466 *********************************************************************/
469 em_attach(device_t dev
)
471 struct adapter
*adapter
;
475 INIT_DEBUGOUT("em_attach: begin");
477 adapter
= device_get_softc(dev
);
479 callout_init(&adapter
->timer
);
480 callout_init(&adapter
->tx_fifo_timer
);
483 adapter
->osdep
.dev
= dev
;
486 sysctl_ctx_init(&adapter
->sysctl_ctx
);
487 adapter
->sysctl_tree
= SYSCTL_ADD_NODE(&adapter
->sysctl_ctx
,
488 SYSCTL_STATIC_CHILDREN(_hw
),
490 device_get_nameunit(dev
),
494 if (adapter
->sysctl_tree
== NULL
) {
495 device_printf(dev
, "Unable to create sysctl tree\n");
499 SYSCTL_ADD_PROC(&adapter
->sysctl_ctx
,
500 SYSCTL_CHILDREN(adapter
->sysctl_tree
),
501 OID_AUTO
, "debug_info", CTLTYPE_INT
|CTLFLAG_RW
,
503 em_sysctl_debug_info
, "I", "Debug Information");
505 SYSCTL_ADD_PROC(&adapter
->sysctl_ctx
,
506 SYSCTL_CHILDREN(adapter
->sysctl_tree
),
507 OID_AUTO
, "stats", CTLTYPE_INT
|CTLFLAG_RW
,
509 em_sysctl_stats
, "I", "Statistics");
511 /* Determine hardware revision */
512 em_identify_hardware(adapter
);
514 /* Set up some sysctls for the tunable interrupt delays */
515 em_add_int_delay_sysctl(adapter
, "rx_int_delay",
516 "receive interrupt delay in usecs",
517 &adapter
->rx_int_delay
,
518 E1000_REG_OFFSET(&adapter
->hw
, RDTR
),
519 em_rx_int_delay_dflt
);
520 em_add_int_delay_sysctl(adapter
, "tx_int_delay",
521 "transmit interrupt delay in usecs",
522 &adapter
->tx_int_delay
,
523 E1000_REG_OFFSET(&adapter
->hw
, TIDV
),
524 em_tx_int_delay_dflt
);
525 if (adapter
->hw
.mac_type
>= em_82540
) {
526 em_add_int_delay_sysctl(adapter
, "rx_abs_int_delay",
527 "receive interrupt delay limit in usecs",
528 &adapter
->rx_abs_int_delay
,
529 E1000_REG_OFFSET(&adapter
->hw
, RADV
),
530 em_rx_abs_int_delay_dflt
);
531 em_add_int_delay_sysctl(adapter
, "tx_abs_int_delay",
532 "transmit interrupt delay limit in usecs",
533 &adapter
->tx_abs_int_delay
,
534 E1000_REG_OFFSET(&adapter
->hw
, TADV
),
535 em_tx_abs_int_delay_dflt
);
536 SYSCTL_ADD_PROC(&adapter
->sysctl_ctx
,
537 SYSCTL_CHILDREN(adapter
->sysctl_tree
),
538 OID_AUTO
, "int_throttle_ceil", CTLTYPE_INT
|CTLFLAG_RW
,
539 adapter
, 0, em_sysctl_int_throttle
, "I", NULL
);
543 * Validate number of transmit and receive descriptors. It
544 * must not exceed hardware maximum, and must be multiple
547 if (((em_txd
* sizeof(struct em_tx_desc
)) % EM_DBA_ALIGN
) != 0 ||
548 (adapter
->hw
.mac_type
>= em_82544
&& em_txd
> EM_MAX_TXD
) ||
549 (adapter
->hw
.mac_type
< em_82544
&& em_txd
> EM_MAX_TXD_82543
) ||
550 (em_txd
< EM_MIN_TXD
)) {
551 device_printf(dev
, "Using %d TX descriptors instead of %d!\n",
552 EM_DEFAULT_TXD
, em_txd
);
553 adapter
->num_tx_desc
= EM_DEFAULT_TXD
;
555 adapter
->num_tx_desc
= em_txd
;
558 if (((em_rxd
* sizeof(struct em_rx_desc
)) % EM_DBA_ALIGN
) != 0 ||
559 (adapter
->hw
.mac_type
>= em_82544
&& em_rxd
> EM_MAX_RXD
) ||
560 (adapter
->hw
.mac_type
< em_82544
&& em_rxd
> EM_MAX_RXD_82543
) ||
561 (em_rxd
< EM_MIN_RXD
)) {
562 device_printf(dev
, "Using %d RX descriptors instead of %d!\n",
563 EM_DEFAULT_RXD
, em_rxd
);
564 adapter
->num_rx_desc
= EM_DEFAULT_RXD
;
566 adapter
->num_rx_desc
= em_rxd
;
569 SYSCTL_ADD_INT(&adapter
->sysctl_ctx
,
570 SYSCTL_CHILDREN(adapter
->sysctl_tree
), OID_AUTO
, "rxd",
571 CTLFLAG_RD
, &adapter
->num_rx_desc
, 0, NULL
);
572 SYSCTL_ADD_INT(&adapter
->sysctl_ctx
,
573 SYSCTL_CHILDREN(adapter
->sysctl_tree
), OID_AUTO
, "txd",
574 CTLFLAG_RD
, &adapter
->num_tx_desc
, 0, NULL
);
576 adapter
->hw
.autoneg
= DO_AUTO_NEG
;
577 adapter
->hw
.wait_autoneg_complete
= WAIT_FOR_AUTO_NEG_DEFAULT
;
578 adapter
->hw
.autoneg_advertised
= AUTONEG_ADV_DEFAULT
;
579 adapter
->hw
.tbi_compatibility_en
= TRUE
;
580 adapter
->rx_buffer_len
= EM_RXBUFFER_2048
;
582 adapter
->hw
.phy_init_script
= 1;
583 adapter
->hw
.phy_reset_disable
= FALSE
;
585 #ifndef EM_MASTER_SLAVE
586 adapter
->hw
.master_slave
= em_ms_hw_default
;
588 adapter
->hw
.master_slave
= EM_MASTER_SLAVE
;
592 * Set the max frame size assuming standard ethernet
595 adapter
->hw
.max_frame_size
= ETHERMTU
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
;
597 adapter
->hw
.min_frame_size
=
598 MINIMUM_ETHERNET_PACKET_SIZE
+ ETHER_CRC_LEN
;
601 * This controls when hardware reports transmit completion
604 adapter
->hw
.report_tx_early
= 1;
606 error
= em_allocate_pci_resources(dev
);
610 /* Initialize eeprom parameters */
611 em_init_eeprom_params(&adapter
->hw
);
613 tsize
= roundup2(adapter
->num_tx_desc
* sizeof(struct em_tx_desc
),
616 /* Allocate Transmit Descriptor ring */
617 error
= em_dma_malloc(adapter
, tsize
, &adapter
->txdma
);
619 device_printf(dev
, "Unable to allocate TxDescriptor memory\n");
622 adapter
->tx_desc_base
= (struct em_tx_desc
*)adapter
->txdma
.dma_vaddr
;
624 rsize
= roundup2(adapter
->num_rx_desc
* sizeof(struct em_rx_desc
),
627 /* Allocate Receive Descriptor ring */
628 error
= em_dma_malloc(adapter
, rsize
, &adapter
->rxdma
);
630 device_printf(dev
, "Unable to allocate rx_desc memory\n");
633 adapter
->rx_desc_base
= (struct em_rx_desc
*)adapter
->rxdma
.dma_vaddr
;
635 /* Initialize the hardware */
636 if (em_hardware_init(adapter
)) {
637 device_printf(dev
, "Unable to initialize the hardware\n");
642 /* Copy the permanent MAC address out of the EEPROM */
643 if (em_read_mac_addr(&adapter
->hw
) < 0) {
645 "EEPROM read error while reading MAC address\n");
650 if (!em_is_valid_ether_addr(adapter
->hw
.mac_addr
)) {
651 device_printf(dev
, "Invalid MAC address\n");
656 /* Setup OS specific network interface */
657 em_setup_interface(dev
, adapter
);
659 /* Initialize statistics */
660 em_clear_hw_cntrs(&adapter
->hw
);
661 em_update_stats_counters(adapter
);
662 adapter
->hw
.get_link_status
= 1;
663 em_update_link_status(adapter
);
665 /* Indicate SOL/IDER usage */
666 if (em_check_phy_reset_block(&adapter
->hw
)) {
667 device_printf(dev
, "PHY reset is blocked due to "
668 "SOL/IDER session.\n");
671 /* Identify 82544 on PCIX */
672 em_get_bus_info(&adapter
->hw
);
673 if (adapter
->hw
.bus_type
== em_bus_type_pcix
&&
674 adapter
->hw
.mac_type
== em_82544
)
675 adapter
->pcix_82544
= TRUE
;
677 adapter
->pcix_82544
= FALSE
;
679 error
= bus_setup_intr(dev
, adapter
->res_interrupt
, INTR_NETSAFE
,
681 &adapter
->int_handler_tag
,
682 adapter
->interface_data
.ac_if
.if_serializer
);
684 device_printf(dev
, "Error registering interrupt handler!\n");
685 ether_ifdetach(&adapter
->interface_data
.ac_if
);
689 INIT_DEBUGOUT("em_attach: end");
697 /*********************************************************************
698 * Device removal routine
700 * The detach entry point is called when the driver is being removed.
701 * This routine stops the adapter and deallocates all the resources
702 * that were allocated for driver operation.
704 * return 0 on success, positive on failure
705 *********************************************************************/
708 em_detach(device_t dev
)
710 struct adapter
*adapter
= device_get_softc(dev
);
712 INIT_DEBUGOUT("em_detach: begin");
714 if (device_is_attached(dev
)) {
715 struct ifnet
*ifp
= &adapter
->interface_data
.ac_if
;
717 lwkt_serialize_enter(ifp
->if_serializer
);
718 adapter
->in_detach
= 1;
720 em_phy_hw_reset(&adapter
->hw
);
721 bus_teardown_intr(dev
, adapter
->res_interrupt
,
722 adapter
->int_handler_tag
);
723 lwkt_serialize_exit(ifp
->if_serializer
);
727 bus_generic_detach(dev
);
729 em_free_pci_resources(dev
);
731 /* Free Transmit Descriptor ring */
732 if (adapter
->tx_desc_base
!= NULL
) {
733 em_dma_free(adapter
, &adapter
->txdma
);
734 adapter
->tx_desc_base
= NULL
;
737 /* Free Receive Descriptor ring */
738 if (adapter
->rx_desc_base
!= NULL
) {
739 em_dma_free(adapter
, &adapter
->rxdma
);
740 adapter
->rx_desc_base
= NULL
;
743 /* Free sysctl tree */
744 if (adapter
->sysctl_tree
!= NULL
) {
745 adapter
->sysctl_tree
= NULL
;
746 sysctl_ctx_free(&adapter
->sysctl_ctx
);
752 /*********************************************************************
754 * Shutdown entry point
756 **********************************************************************/
759 em_shutdown(device_t dev
)
761 struct adapter
*adapter
= device_get_softc(dev
);
762 struct ifnet
*ifp
= &adapter
->interface_data
.ac_if
;
764 lwkt_serialize_enter(ifp
->if_serializer
);
766 lwkt_serialize_exit(ifp
->if_serializer
);
772 * Suspend/resume device methods.
775 em_suspend(device_t dev
)
777 struct adapter
*adapter
= device_get_softc(dev
);
778 struct ifnet
*ifp
= &adapter
->interface_data
.ac_if
;
780 lwkt_serialize_enter(ifp
->if_serializer
);
782 lwkt_serialize_exit(ifp
->if_serializer
);
787 em_resume(device_t dev
)
789 struct adapter
*adapter
= device_get_softc(dev
);
790 struct ifnet
*ifp
= &adapter
->interface_data
.ac_if
;
792 lwkt_serialize_enter(ifp
->if_serializer
);
793 ifp
->if_flags
&= ~IFF_RUNNING
;
795 if ((ifp
->if_flags
& (IFF_UP
| IFF_RUNNING
)) == (IFF_UP
| IFF_RUNNING
))
797 lwkt_serialize_exit(ifp
->if_serializer
);
799 return bus_generic_resume(dev
);
802 /*********************************************************************
803 * Transmit entry point
805 * em_start is called by the stack to initiate a transmit.
806 * The driver will remain in this routine as long as there are
807 * packets to transmit and transmit resources are available.
808 * In case resources are not available stack is notified and
809 * the packet is requeued.
810 **********************************************************************/
813 em_start(struct ifnet
*ifp
)
816 struct adapter
*adapter
= ifp
->if_softc
;
818 ASSERT_SERIALIZED(ifp
->if_serializer
);
820 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_OACTIVE
)) != IFF_RUNNING
)
822 if (!adapter
->link_active
)
824 while (!ifq_is_empty(&ifp
->if_snd
)) {
825 m_head
= ifq_poll(&ifp
->if_snd
);
831 if (em_encap(adapter
, m_head
)) {
832 ifp
->if_flags
|= IFF_OACTIVE
;
835 ifq_dequeue(&ifp
->if_snd
, m_head
);
837 /* Send a copy of the frame to the BPF listener */
838 ETHER_BPF_MTAP(ifp
, m_head
);
840 /* Set timeout in case hardware has problems transmitting. */
841 ifp
->if_timer
= EM_TX_TIMEOUT
;
845 /*********************************************************************
848 * em_ioctl is called when the user wants to configure the
851 * return 0 on success, positive on failure
852 **********************************************************************/
855 em_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
857 int max_frame_size
, mask
, error
= 0, reinit
= 0;
858 struct ifreq
*ifr
= (struct ifreq
*) data
;
859 struct adapter
*adapter
= ifp
->if_softc
;
860 uint16_t eeprom_data
= 0;
862 ASSERT_SERIALIZED(ifp
->if_serializer
);
864 if (adapter
->in_detach
)
869 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
870 switch (adapter
->hw
.mac_type
) {
873 * 82573 only supports jumbo frames
874 * if ASPM is disabled.
876 em_read_eeprom(&adapter
->hw
, EEPROM_INIT_3GIO_3
,
878 if (eeprom_data
& EEPROM_WORD1A_ASPM_MASK
) {
879 max_frame_size
= ETHER_MAX_LEN
;
882 /* Allow Jumbo frames */
887 case em_80003es2lan
: /* Limit Jumbo Frame size */
888 max_frame_size
= 9234;
891 /* ICH8 does not support jumbo frames */
892 max_frame_size
= ETHER_MAX_LEN
;
895 max_frame_size
= MAX_JUMBO_FRAME_SIZE
;
899 max_frame_size
- ETHER_HDR_LEN
- ETHER_CRC_LEN
) {
902 ifp
->if_mtu
= ifr
->ifr_mtu
;
903 adapter
->hw
.max_frame_size
=
904 ifp
->if_mtu
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
;
905 ifp
->if_flags
&= ~IFF_RUNNING
;
910 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS "
911 "(Set Interface Flags)");
912 if (ifp
->if_flags
& IFF_UP
) {
913 if (!(ifp
->if_flags
& IFF_RUNNING
)) {
915 } else if ((ifp
->if_flags
^ adapter
->if_flags
) &
917 em_disable_promisc(adapter
);
918 em_set_promisc(adapter
);
921 if (ifp
->if_flags
& IFF_RUNNING
)
924 adapter
->if_flags
= ifp
->if_flags
;
928 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
929 if (ifp
->if_flags
& IFF_RUNNING
) {
930 em_disable_intr(adapter
);
931 em_set_multi(adapter
);
932 if (adapter
->hw
.mac_type
== em_82542_rev2_0
)
933 em_initialize_receive_unit(adapter
);
934 #ifdef DEVICE_POLLING
935 /* Do not enable interrupt if polling(4) is enabled */
936 if ((ifp
->if_flags
& IFF_POLLING
) == 0)
938 em_enable_intr(adapter
);
942 /* Check SOL/IDER usage */
943 if (em_check_phy_reset_block(&adapter
->hw
)) {
944 if_printf(ifp
, "Media change is blocked due to "
945 "SOL/IDER session.\n");
950 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA "
951 "(Get/Set Interface Media)");
952 error
= ifmedia_ioctl(ifp
, ifr
, &adapter
->media
, command
);
955 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
956 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
957 if (mask
& IFCAP_HWCSUM
) {
958 ifp
->if_capenable
^= IFCAP_HWCSUM
;
961 if (mask
& IFCAP_VLAN_HWTAGGING
) {
962 ifp
->if_capenable
^= IFCAP_VLAN_HWTAGGING
;
965 if (reinit
&& (ifp
->if_flags
& IFF_RUNNING
)) {
966 ifp
->if_flags
&= ~IFF_RUNNING
;
971 error
= ether_ioctl(ifp
, command
, data
);
978 /*********************************************************************
979 * Watchdog entry point
981 * This routine is called whenever hardware quits transmitting.
983 **********************************************************************/
986 em_watchdog(struct ifnet
*ifp
)
988 struct adapter
*adapter
= ifp
->if_softc
;
991 * If we are in this routine because of pause frames, then
992 * don't reset the hardware.
994 if (E1000_READ_REG(&adapter
->hw
, STATUS
) & E1000_STATUS_TXOFF
) {
995 ifp
->if_timer
= EM_TX_TIMEOUT
;
999 if (em_check_for_link(&adapter
->hw
) == 0)
1000 if_printf(ifp
, "watchdog timeout -- resetting\n");
1002 ifp
->if_flags
&= ~IFF_RUNNING
;
1005 adapter
->watchdog_timeouts
++;
1008 /*********************************************************************
1011 * This routine is used in two ways. It is used by the stack as
1012 * init entry point in network interface structure. It is also used
1013 * by the driver as a hw/sw initialization routine to get to a
1016 * return 0 on success, positive on failure
1017 **********************************************************************/
1022 struct adapter
*adapter
= arg
;
1024 struct ifnet
*ifp
= &adapter
->interface_data
.ac_if
;
1026 ASSERT_SERIALIZED(ifp
->if_serializer
);
1028 INIT_DEBUGOUT("em_init: begin");
1030 if (ifp
->if_flags
& IFF_RUNNING
)
1036 * Packet Buffer Allocation (PBA)
1037 * Writing PBA sets the receive portion of the buffer
1038 * the remainder is used for the transmit buffer.
1040 * Devices before the 82547 had a Packet Buffer of 64K.
1041 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1042 * After the 82547 the buffer was reduced to 40K.
1043 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1044 * Note: default does not leave enough room for Jumbo Frame >10k.
1046 switch (adapter
->hw
.mac_type
) {
1048 case em_82547_rev_2
: /* 82547: Total Packet Buffer is 40K */
1049 if (adapter
->hw
.max_frame_size
> EM_RXBUFFER_8192
)
1050 pba
= E1000_PBA_22K
; /* 22K for Rx, 18K for Tx */
1052 pba
= E1000_PBA_30K
; /* 30K for Rx, 10K for Tx */
1054 adapter
->tx_fifo_head
= 0;
1055 adapter
->tx_head_addr
= pba
<< EM_TX_HEAD_ADDR_SHIFT
;
1056 adapter
->tx_fifo_size
=
1057 (E1000_PBA_40K
- pba
) << EM_PBA_BYTES_SHIFT
;
1059 /* Total Packet Buffer on these is 48K */
1062 case em_80003es2lan
:
1063 pba
= E1000_PBA_32K
; /* 32K for Rx, 16K for Tx */
1065 case em_82573
: /* 82573: Total Packet Buffer is 32K */
1066 pba
= E1000_PBA_12K
; /* 12K for Rx, 20K for Tx */
1072 #define E1000_PBA_10K 0x000A
1073 pba
= E1000_PBA_10K
;
1076 /* Devices before 82547 had a Packet Buffer of 64K. */
1077 if(adapter
->hw
.max_frame_size
> EM_RXBUFFER_8192
)
1078 pba
= E1000_PBA_40K
; /* 40K for Rx, 24K for Tx */
1080 pba
= E1000_PBA_48K
; /* 48K for Rx, 16K for Tx */
1083 INIT_DEBUGOUT1("em_init: pba=%dK",pba
);
1084 E1000_WRITE_REG(&adapter
->hw
, PBA
, pba
);
1086 /* Get the latest mac address, User can use a LAA */
1087 bcopy(adapter
->interface_data
.ac_enaddr
, adapter
->hw
.mac_addr
,
1090 /* Initialize the hardware */
1091 if (em_hardware_init(adapter
)) {
1092 if_printf(ifp
, "Unable to initialize the hardware\n");
1095 em_update_link_status(adapter
);
1097 if (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
)
1098 em_enable_vlans(adapter
);
1100 /* Set hardware offload abilities */
1101 if (adapter
->hw
.mac_type
>= em_82543
) {
1102 if (ifp
->if_capenable
& IFCAP_TXCSUM
)
1103 ifp
->if_hwassist
= EM_CHECKSUM_FEATURES
;
1105 ifp
->if_hwassist
= 0;
1108 /* Prepare transmit descriptors and buffers */
1109 if (em_setup_transmit_structures(adapter
)) {
1110 if_printf(ifp
, "Could not setup transmit structures\n");
1114 em_initialize_transmit_unit(adapter
);
1116 /* Setup Multicast table */
1117 em_set_multi(adapter
);
1119 /* Prepare receive descriptors and buffers */
1120 if (em_setup_receive_structures(adapter
)) {
1121 if_printf(ifp
, "Could not setup receive structures\n");
1125 em_initialize_receive_unit(adapter
);
1127 /* Don't lose promiscuous settings */
1128 em_set_promisc(adapter
);
1130 ifp
->if_flags
|= IFF_RUNNING
;
1131 ifp
->if_flags
&= ~IFF_OACTIVE
;
1133 callout_reset(&adapter
->timer
, hz
, em_local_timer
, adapter
);
1134 em_clear_hw_cntrs(&adapter
->hw
);
1136 #ifdef DEVICE_POLLING
1137 /* Do not enable interrupt if polling(4) is enabled */
1138 if (ifp
->if_flags
& IFF_POLLING
)
1139 em_disable_intr(adapter
);
1142 em_enable_intr(adapter
);
1144 /* Don't reset the phy next time init gets called */
1145 adapter
->hw
.phy_reset_disable
= TRUE
;
1148 #ifdef DEVICE_POLLING
1151 em_poll(struct ifnet
*ifp
, enum poll_cmd cmd
, int count
)
1153 struct adapter
*adapter
= ifp
->if_softc
;
1156 ASSERT_SERIALIZED(ifp
->if_serializer
);
1160 em_disable_intr(adapter
);
1162 case POLL_DEREGISTER
:
1163 em_enable_intr(adapter
);
1165 case POLL_AND_CHECK_STATUS
:
1166 reg_icr
= E1000_READ_REG(&adapter
->hw
, ICR
);
1167 if (reg_icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
1168 callout_stop(&adapter
->timer
);
1169 adapter
->hw
.get_link_status
= 1;
1170 em_check_for_link(&adapter
->hw
);
1171 em_update_link_status(adapter
);
1172 callout_reset(&adapter
->timer
, hz
, em_local_timer
,
1177 if (ifp
->if_flags
& IFF_RUNNING
) {
1178 em_rxeof(adapter
, count
);
1181 if (!ifq_is_empty(&ifp
->if_snd
))
1188 #endif /* DEVICE_POLLING */
1190 /*********************************************************************
1192 * Interrupt Service routine
1194 *********************************************************************/
1200 struct adapter
*adapter
= arg
;
1202 ifp
= &adapter
->interface_data
.ac_if
;
1205 ASSERT_SERIALIZED(ifp
->if_serializer
);
1207 reg_icr
= E1000_READ_REG(&adapter
->hw
, ICR
);
1208 if ((adapter
->hw
.mac_type
>= em_82571
&&
1209 (reg_icr
& E1000_ICR_INT_ASSERTED
) == 0) ||
1216 * XXX: some laptops trigger several spurious interrupts on em(4)
1217 * when in the resume cycle. The ICR register reports all-ones
1218 * value in this case. Processing such interrupts would lead to
1219 * a freeze. I don't know why.
1221 if (reg_icr
== 0xffffffff) {
1227 * note: do not attempt to improve efficiency by looping. This
1228 * only results in unnecessary piecemeal collection of received
1229 * packets and unnecessary piecemeal cleanups of the transmit ring.
1231 if (ifp
->if_flags
& IFF_RUNNING
) {
1232 em_rxeof(adapter
, -1);
1236 /* Link status change */
1237 if (reg_icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
1238 callout_stop(&adapter
->timer
);
1239 adapter
->hw
.get_link_status
= 1;
1240 em_check_for_link(&adapter
->hw
);
1241 em_update_link_status(adapter
);
1242 callout_reset(&adapter
->timer
, hz
, em_local_timer
, adapter
);
1245 if (reg_icr
& E1000_ICR_RXO
)
1246 adapter
->rx_overruns
++;
1248 if ((ifp
->if_flags
& IFF_RUNNING
) && !ifq_is_empty(&ifp
->if_snd
))
1253 /*********************************************************************
1255 * Media Ioctl callback
1257 * This routine is called whenever the user queries the status of
1258 * the interface using ifconfig.
1260 **********************************************************************/
1262 em_media_status(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1264 struct adapter
*adapter
= ifp
->if_softc
;
1265 u_char fiber_type
= IFM_1000_SX
;
1267 INIT_DEBUGOUT("em_media_status: begin");
1269 ASSERT_SERIALIZED(ifp
->if_serializer
);
1271 em_check_for_link(&adapter
->hw
);
1272 em_update_link_status(adapter
);
1274 ifmr
->ifm_status
= IFM_AVALID
;
1275 ifmr
->ifm_active
= IFM_ETHER
;
1277 if (!adapter
->link_active
)
1280 ifmr
->ifm_status
|= IFM_ACTIVE
;
1282 if (adapter
->hw
.media_type
== em_media_type_fiber
||
1283 adapter
->hw
.media_type
== em_media_type_internal_serdes
) {
1284 if (adapter
->hw
.mac_type
== em_82545
)
1285 fiber_type
= IFM_1000_LX
;
1286 ifmr
->ifm_active
|= fiber_type
| IFM_FDX
;
1288 switch (adapter
->link_speed
) {
1290 ifmr
->ifm_active
|= IFM_10_T
;
1293 ifmr
->ifm_active
|= IFM_100_TX
;
1296 ifmr
->ifm_active
|= IFM_1000_T
;
1299 if (adapter
->link_duplex
== FULL_DUPLEX
)
1300 ifmr
->ifm_active
|= IFM_FDX
;
1302 ifmr
->ifm_active
|= IFM_HDX
;
1306 /*********************************************************************
1308 * Media Ioctl callback
1310 * This routine is called when the user changes speed/duplex using
1311 * media/mediopt option with ifconfig.
1313 **********************************************************************/
1315 em_media_change(struct ifnet
*ifp
)
1317 struct adapter
*adapter
= ifp
->if_softc
;
1318 struct ifmedia
*ifm
= &adapter
->media
;
1320 INIT_DEBUGOUT("em_media_change: begin");
1322 ASSERT_SERIALIZED(ifp
->if_serializer
);
1324 if (IFM_TYPE(ifm
->ifm_media
) != IFM_ETHER
)
1327 switch (IFM_SUBTYPE(ifm
->ifm_media
)) {
1329 adapter
->hw
.autoneg
= DO_AUTO_NEG
;
1330 adapter
->hw
.autoneg_advertised
= AUTONEG_ADV_DEFAULT
;
1335 adapter
->hw
.autoneg
= DO_AUTO_NEG
;
1336 adapter
->hw
.autoneg_advertised
= ADVERTISE_1000_FULL
;
1339 adapter
->hw
.autoneg
= FALSE
;
1340 adapter
->hw
.autoneg_advertised
= 0;
1341 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
)
1342 adapter
->hw
.forced_speed_duplex
= em_100_full
;
1344 adapter
->hw
.forced_speed_duplex
= em_100_half
;
1347 adapter
->hw
.autoneg
= FALSE
;
1348 adapter
->hw
.autoneg_advertised
= 0;
1349 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
)
1350 adapter
->hw
.forced_speed_duplex
= em_10_full
;
1352 adapter
->hw
.forced_speed_duplex
= em_10_half
;
1355 if_printf(ifp
, "Unsupported media type\n");
1358 * As the speed/duplex settings may have changed we need to
1361 adapter
->hw
.phy_reset_disable
= FALSE
;
1363 ifp
->if_flags
&= ~IFF_RUNNING
;
1370 em_tx_cb(void *arg
, bus_dma_segment_t
*seg
, int nsegs
, bus_size_t mapsize
,
1373 struct em_q
*q
= arg
;
1377 KASSERT(nsegs
<= EM_MAX_SCATTER
,
1378 ("Too many DMA segments returned when mapping tx packet"));
1380 bcopy(seg
, q
->segs
, nsegs
* sizeof(seg
[0]));
1383 /*********************************************************************
1385 * This routine maps the mbufs to tx descriptors.
1387 * return 0 on success, positive on failure
1388 **********************************************************************/
1390 em_encap(struct adapter
*adapter
, struct mbuf
*m_head
)
1392 uint32_t txd_upper
= 0, txd_lower
= 0, txd_used
= 0, txd_saved
= 0;
1393 int i
, j
, error
, last
= 0;
1396 struct em_buffer
*tx_buffer
= NULL
, *tx_buffer_first
;
1398 struct em_tx_desc
*current_tx_desc
= NULL
;
1399 struct ifnet
*ifp
= &adapter
->interface_data
.ac_if
;
1402 * Force a cleanup if number of TX descriptors
1403 * available hits the threshold
1405 if (adapter
->num_tx_desc_avail
<= EM_TX_CLEANUP_THRESHOLD
) {
1407 if (adapter
->num_tx_desc_avail
<= EM_TX_CLEANUP_THRESHOLD
) {
1408 adapter
->no_tx_desc_avail1
++;
1414 * Capture the first descriptor index, this descriptor will have
1415 * the index of the EOP which is the only one that now gets a
1416 * DONE bit writeback.
1418 tx_buffer_first
= &adapter
->tx_buffer_area
[adapter
->next_avail_tx_desc
];
1421 * Map the packet for DMA.
1423 map
= tx_buffer_first
->map
;
1424 error
= bus_dmamap_load_mbuf(adapter
->txtag
, map
, m_head
, em_tx_cb
,
1425 &q
, BUS_DMA_NOWAIT
);
1427 adapter
->no_tx_dma_setup
++;
1430 KASSERT(q
.nsegs
!= 0, ("em_encap: empty packet"));
1432 if (q
.nsegs
> (adapter
->num_tx_desc_avail
- 2)) {
1433 adapter
->no_tx_desc_avail2
++;
1438 if (ifp
->if_hwassist
> 0) {
1439 em_transmit_checksum_setup(adapter
, m_head
,
1440 &txd_upper
, &txd_lower
);
1443 i
= adapter
->next_avail_tx_desc
;
1444 if (adapter
->pcix_82544
)
1447 /* Set up our transmit descriptors */
1448 for (j
= 0; j
< q
.nsegs
; j
++) {
1449 /* If adapter is 82544 and on PCIX bus */
1450 if(adapter
->pcix_82544
) {
1451 DESC_ARRAY desc_array
;
1452 uint32_t array_elements
, counter
;
1455 * Check the Address and Length combination and
1456 * split the data accordingly
1458 array_elements
= em_fill_descriptors(q
.segs
[j
].ds_addr
,
1459 q
.segs
[j
].ds_len
, &desc_array
);
1460 for (counter
= 0; counter
< array_elements
; counter
++) {
1461 if (txd_used
== adapter
->num_tx_desc_avail
) {
1462 adapter
->next_avail_tx_desc
= txd_saved
;
1463 adapter
->no_tx_desc_avail2
++;
1467 tx_buffer
= &adapter
->tx_buffer_area
[i
];
1468 current_tx_desc
= &adapter
->tx_desc_base
[i
];
1469 current_tx_desc
->buffer_addr
= htole64(
1470 desc_array
.descriptor
[counter
].address
);
1471 current_tx_desc
->lower
.data
= htole32(
1472 adapter
->txd_cmd
| txd_lower
|
1473 (uint16_t)desc_array
.descriptor
[counter
].length
);
1474 current_tx_desc
->upper
.data
= htole32(txd_upper
);
1477 if (++i
== adapter
->num_tx_desc
)
1480 tx_buffer
->m_head
= NULL
;
1481 tx_buffer
->next_eop
= -1;
1485 tx_buffer
= &adapter
->tx_buffer_area
[i
];
1486 current_tx_desc
= &adapter
->tx_desc_base
[i
];
1488 current_tx_desc
->buffer_addr
= htole64(q
.segs
[j
].ds_addr
);
1489 current_tx_desc
->lower
.data
= htole32(
1490 adapter
->txd_cmd
| txd_lower
| q
.segs
[j
].ds_len
);
1491 current_tx_desc
->upper
.data
= htole32(txd_upper
);
1494 if (++i
== adapter
->num_tx_desc
)
1497 tx_buffer
->m_head
= NULL
;
1498 tx_buffer
->next_eop
= -1;
1502 adapter
->next_avail_tx_desc
= i
;
1503 if (adapter
->pcix_82544
)
1504 adapter
->num_tx_desc_avail
-= txd_used
;
1506 adapter
->num_tx_desc_avail
-= q
.nsegs
;
1508 /* Find out if we are in vlan mode */
1509 if (m_head
->m_flags
& M_VLANTAG
) {
1510 /* Set the vlan id */
1511 current_tx_desc
->upper
.fields
.special
=
1512 htole16(m_head
->m_pkthdr
.ether_vlantag
);
1514 /* Tell hardware to add tag */
1515 current_tx_desc
->lower
.data
|= htole32(E1000_TXD_CMD_VLE
);
1518 tx_buffer
->m_head
= m_head
;
1519 tx_buffer_first
->map
= tx_buffer
->map
;
1520 tx_buffer
->map
= map
;
1521 bus_dmamap_sync(adapter
->txtag
, map
, BUS_DMASYNC_PREWRITE
);
1524 * Last Descriptor of Packet needs End Of Packet (EOP)
1525 * and Report Status (RS)
1527 current_tx_desc
->lower
.data
|=
1528 htole32(E1000_TXD_CMD_EOP
| E1000_TXD_CMD_RS
);
1531 * Keep track in the first buffer which descriptor will be
1534 tx_buffer_first
->next_eop
= last
;
1536 bus_dmamap_sync(adapter
->txdma
.dma_tag
, adapter
->txdma
.dma_map
,
1537 BUS_DMASYNC_PREWRITE
);
1540 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1541 * that this frame is available to transmit.
1543 if (adapter
->hw
.mac_type
== em_82547
&&
1544 adapter
->link_duplex
== HALF_DUPLEX
) {
1545 em_82547_move_tail_serialized(adapter
);
1547 E1000_WRITE_REG(&adapter
->hw
, TDT
, i
);
1548 if (adapter
->hw
.mac_type
== em_82547
) {
1549 em_82547_update_fifo_head(adapter
,
1550 m_head
->m_pkthdr
.len
);
1556 bus_dmamap_unload(adapter
->txtag
, map
);
1560 /*********************************************************************
1562 * 82547 workaround to avoid controller hang in half-duplex environment.
1563 * The workaround is to avoid queuing a large packet that would span
1564 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1565 * in this case. We do that only when FIFO is quiescent.
1567 **********************************************************************/
1569 em_82547_move_tail(void *arg
)
1571 struct adapter
*adapter
= arg
;
1572 struct ifnet
*ifp
= &adapter
->interface_data
.ac_if
;
1574 lwkt_serialize_enter(ifp
->if_serializer
);
1575 em_82547_move_tail_serialized(adapter
);
1576 lwkt_serialize_exit(ifp
->if_serializer
);
1580 em_82547_move_tail_serialized(struct adapter
*adapter
)
1584 struct em_tx_desc
*tx_desc
;
1585 uint16_t length
= 0;
1588 hw_tdt
= E1000_READ_REG(&adapter
->hw
, TDT
);
1589 sw_tdt
= adapter
->next_avail_tx_desc
;
1591 while (hw_tdt
!= sw_tdt
) {
1592 tx_desc
= &adapter
->tx_desc_base
[hw_tdt
];
1593 length
+= tx_desc
->lower
.flags
.length
;
1594 eop
= tx_desc
->lower
.data
& E1000_TXD_CMD_EOP
;
1595 if (++hw_tdt
== adapter
->num_tx_desc
)
1599 if (em_82547_fifo_workaround(adapter
, length
)) {
1600 adapter
->tx_fifo_wrk_cnt
++;
1601 callout_reset(&adapter
->tx_fifo_timer
, 1,
1602 em_82547_move_tail
, adapter
);
1605 E1000_WRITE_REG(&adapter
->hw
, TDT
, hw_tdt
);
1606 em_82547_update_fifo_head(adapter
, length
);
1613 em_82547_fifo_workaround(struct adapter
*adapter
, int len
)
1615 int fifo_space
, fifo_pkt_len
;
1617 fifo_pkt_len
= roundup2(len
+ EM_FIFO_HDR
, EM_FIFO_HDR
);
1619 if (adapter
->link_duplex
== HALF_DUPLEX
) {
1620 fifo_space
= adapter
->tx_fifo_size
- adapter
->tx_fifo_head
;
1622 if (fifo_pkt_len
>= (EM_82547_PKT_THRESH
+ fifo_space
)) {
1623 if (em_82547_tx_fifo_reset(adapter
))
1634 em_82547_update_fifo_head(struct adapter
*adapter
, int len
)
1636 int fifo_pkt_len
= roundup2(len
+ EM_FIFO_HDR
, EM_FIFO_HDR
);
1638 /* tx_fifo_head is always 16 byte aligned */
1639 adapter
->tx_fifo_head
+= fifo_pkt_len
;
1640 if (adapter
->tx_fifo_head
>= adapter
->tx_fifo_size
)
1641 adapter
->tx_fifo_head
-= adapter
->tx_fifo_size
;
1645 em_82547_tx_fifo_reset(struct adapter
*adapter
)
1649 if (E1000_READ_REG(&adapter
->hw
, TDT
) == E1000_READ_REG(&adapter
->hw
, TDH
) &&
1650 E1000_READ_REG(&adapter
->hw
, TDFT
) == E1000_READ_REG(&adapter
->hw
, TDFH
) &&
1651 E1000_READ_REG(&adapter
->hw
, TDFTS
) == E1000_READ_REG(&adapter
->hw
, TDFHS
) &&
1652 E1000_READ_REG(&adapter
->hw
, TDFPC
) == 0) {
1653 /* Disable TX unit */
1654 tctl
= E1000_READ_REG(&adapter
->hw
, TCTL
);
1655 E1000_WRITE_REG(&adapter
->hw
, TCTL
, tctl
& ~E1000_TCTL_EN
);
1657 /* Reset FIFO pointers */
1658 E1000_WRITE_REG(&adapter
->hw
, TDFT
, adapter
->tx_head_addr
);
1659 E1000_WRITE_REG(&adapter
->hw
, TDFH
, adapter
->tx_head_addr
);
1660 E1000_WRITE_REG(&adapter
->hw
, TDFTS
, adapter
->tx_head_addr
);
1661 E1000_WRITE_REG(&adapter
->hw
, TDFHS
, adapter
->tx_head_addr
);
1663 /* Re-enable TX unit */
1664 E1000_WRITE_REG(&adapter
->hw
, TCTL
, tctl
);
1665 E1000_WRITE_FLUSH(&adapter
->hw
);
1667 adapter
->tx_fifo_head
= 0;
1668 adapter
->tx_fifo_reset_cnt
++;
1677 em_set_promisc(struct adapter
*adapter
)
1680 struct ifnet
*ifp
= &adapter
->interface_data
.ac_if
;
1682 reg_rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
1684 adapter
->em_insert_vlan_header
= 0;
1685 if (ifp
->if_flags
& IFF_PROMISC
) {
1686 reg_rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
1687 E1000_WRITE_REG(&adapter
->hw
, RCTL
, reg_rctl
);
1690 * Disable VLAN stripping in promiscous mode.
1691 * This enables bridging of vlan tagged frames to occur
1692 * and also allows vlan tags to be seen in tcpdump.
1694 if (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
)
1695 em_disable_vlans(adapter
);
1696 adapter
->em_insert_vlan_header
= 1;
1697 } else if (ifp
->if_flags
& IFF_ALLMULTI
) {
1698 reg_rctl
|= E1000_RCTL_MPE
;
1699 reg_rctl
&= ~E1000_RCTL_UPE
;
1700 E1000_WRITE_REG(&adapter
->hw
, RCTL
, reg_rctl
);
1705 em_disable_promisc(struct adapter
*adapter
)
1707 struct ifnet
*ifp
= &adapter
->interface_data
.ac_if
;
1711 reg_rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
1713 reg_rctl
&= (~E1000_RCTL_UPE
);
1714 reg_rctl
&= (~E1000_RCTL_MPE
);
1715 E1000_WRITE_REG(&adapter
->hw
, RCTL
, reg_rctl
);
1717 if (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
)
1718 em_enable_vlans(adapter
);
1719 adapter
->em_insert_vlan_header
= 0;
1722 /*********************************************************************
1725 * This routine is called whenever multicast address list is updated.
1727 **********************************************************************/
1730 em_set_multi(struct adapter
*adapter
)
1732 uint32_t reg_rctl
= 0;
1733 uint8_t mta
[MAX_NUM_MULTICAST_ADDRESSES
* ETH_LENGTH_OF_ADDRESS
];
1734 struct ifmultiaddr
*ifma
;
1736 struct ifnet
*ifp
= &adapter
->interface_data
.ac_if
;
1738 IOCTL_DEBUGOUT("em_set_multi: begin");
1740 if (adapter
->hw
.mac_type
== em_82542_rev2_0
) {
1741 reg_rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
1742 if (adapter
->hw
.pci_cmd_word
& CMD_MEM_WRT_INVALIDATE
)
1743 em_pci_clear_mwi(&adapter
->hw
);
1744 reg_rctl
|= E1000_RCTL_RST
;
1745 E1000_WRITE_REG(&adapter
->hw
, RCTL
, reg_rctl
);
1749 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1750 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
1753 if (mcnt
== MAX_NUM_MULTICAST_ADDRESSES
)
1756 bcopy(LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
),
1757 &mta
[mcnt
*ETH_LENGTH_OF_ADDRESS
], ETH_LENGTH_OF_ADDRESS
);
1761 if (mcnt
>= MAX_NUM_MULTICAST_ADDRESSES
) {
1762 reg_rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
1763 reg_rctl
|= E1000_RCTL_MPE
;
1764 E1000_WRITE_REG(&adapter
->hw
, RCTL
, reg_rctl
);
1766 em_mc_addr_list_update(&adapter
->hw
, mta
, mcnt
, 0, 1);
1769 if (adapter
->hw
.mac_type
== em_82542_rev2_0
) {
1770 reg_rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
1771 reg_rctl
&= ~E1000_RCTL_RST
;
1772 E1000_WRITE_REG(&adapter
->hw
, RCTL
, reg_rctl
);
1774 if (adapter
->hw
.pci_cmd_word
& CMD_MEM_WRT_INVALIDATE
)
1775 em_pci_set_mwi(&adapter
->hw
);
1779 /*********************************************************************
1782 * This routine checks for link status and updates statistics.
1784 **********************************************************************/
1787 em_local_timer(void *arg
)
1790 struct adapter
*adapter
= arg
;
1791 ifp
= &adapter
->interface_data
.ac_if
;
1793 lwkt_serialize_enter(ifp
->if_serializer
);
1795 em_check_for_link(&adapter
->hw
);
1796 em_update_link_status(adapter
);
1797 em_update_stats_counters(adapter
);
1798 if (em_display_debug_stats
&& ifp
->if_flags
& IFF_RUNNING
)
1799 em_print_hw_stats(adapter
);
1800 em_smartspeed(adapter
);
1802 callout_reset(&adapter
->timer
, hz
, em_local_timer
, adapter
);
1804 lwkt_serialize_exit(ifp
->if_serializer
);
1808 em_update_link_status(struct adapter
*adapter
)
1811 ifp
= &adapter
->interface_data
.ac_if
;
1813 if (E1000_READ_REG(&adapter
->hw
, STATUS
) & E1000_STATUS_LU
) {
1814 if (adapter
->link_active
== 0) {
1815 em_get_speed_and_duplex(&adapter
->hw
,
1816 &adapter
->link_speed
,
1817 &adapter
->link_duplex
);
1818 /* Check if we may set SPEED_MODE bit on PCI-E */
1819 if (adapter
->link_speed
== SPEED_1000
&&
1820 (adapter
->hw
.mac_type
== em_82571
||
1821 adapter
->hw
.mac_type
== em_82572
)) {
1824 tarc0
= E1000_READ_REG(&adapter
->hw
, TARC0
);
1825 tarc0
|= SPEED_MODE_BIT
;
1826 E1000_WRITE_REG(&adapter
->hw
, TARC0
, tarc0
);
1829 if_printf(&adapter
->interface_data
.ac_if
,
1830 "Link is up %d Mbps %s\n",
1831 adapter
->link_speed
,
1832 adapter
->link_duplex
== FULL_DUPLEX
?
1833 "Full Duplex" : "Half Duplex");
1835 adapter
->link_active
= 1;
1836 adapter
->smartspeed
= 0;
1837 ifp
->if_baudrate
= adapter
->link_speed
* 1000000;
1838 ifp
->if_link_state
= LINK_STATE_UP
;
1839 if_link_state_change(ifp
);
1842 if (adapter
->link_active
== 1) {
1843 ifp
->if_baudrate
= 0;
1844 adapter
->link_speed
= 0;
1845 adapter
->link_duplex
= 0;
1847 if_printf(&adapter
->interface_data
.ac_if
,
1850 adapter
->link_active
= 0;
1851 ifp
->if_link_state
= LINK_STATE_DOWN
;
1852 if_link_state_change(ifp
);
1857 /*********************************************************************
1859 * This routine disables all traffic on the adapter by issuing a
1860 * global reset on the MAC and deallocates TX/RX buffers.
1862 **********************************************************************/
1868 struct adapter
* adapter
= arg
;
1869 ifp
= &adapter
->interface_data
.ac_if
;
1871 ASSERT_SERIALIZED(ifp
->if_serializer
);
1873 INIT_DEBUGOUT("em_stop: begin");
1874 em_disable_intr(adapter
);
1875 em_reset_hw(&adapter
->hw
);
1876 callout_stop(&adapter
->timer
);
1877 callout_stop(&adapter
->tx_fifo_timer
);
1878 em_free_transmit_structures(adapter
);
1879 em_free_receive_structures(adapter
);
1881 /* Tell the stack that the interface is no longer active */
1882 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1886 /*********************************************************************
1888 * Determine hardware revision.
1890 **********************************************************************/
1892 em_identify_hardware(struct adapter
*adapter
)
1894 device_t dev
= adapter
->dev
;
1896 /* Make sure our PCI config space has the necessary stuff set */
1897 adapter
->hw
.pci_cmd_word
= pci_read_config(dev
, PCIR_COMMAND
, 2);
1898 if (!((adapter
->hw
.pci_cmd_word
& PCIM_CMD_BUSMASTEREN
) &&
1899 (adapter
->hw
.pci_cmd_word
& PCIM_CMD_MEMEN
))) {
1900 device_printf(dev
, "Memory Access and/or Bus Master bits "
1902 adapter
->hw
.pci_cmd_word
|= PCIM_CMD_BUSMASTEREN
|
1904 pci_write_config(dev
, PCIR_COMMAND
,
1905 adapter
->hw
.pci_cmd_word
, 2);
1908 /* Save off the information about this board */
1909 adapter
->hw
.vendor_id
= pci_get_vendor(dev
);
1910 adapter
->hw
.device_id
= pci_get_device(dev
);
1911 adapter
->hw
.revision_id
= pci_get_revid(dev
);
1912 adapter
->hw
.subsystem_vendor_id
= pci_get_subvendor(dev
);
1913 adapter
->hw
.subsystem_id
= pci_get_subdevice(dev
);
1915 /* Identify the MAC */
1916 if (em_set_mac_type(&adapter
->hw
))
1917 device_printf(dev
, "Unknown MAC Type\n");
1919 if (adapter
->hw
.mac_type
== em_82541
||
1920 adapter
->hw
.mac_type
== em_82541_rev_2
||
1921 adapter
->hw
.mac_type
== em_82547
||
1922 adapter
->hw
.mac_type
== em_82547_rev_2
)
1923 adapter
->hw
.phy_init_script
= TRUE
;
1927 em_allocate_pci_resources(device_t dev
)
1929 struct adapter
*adapter
= device_get_softc(dev
);
1933 adapter
->res_memory
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
,
1935 if (adapter
->res_memory
== NULL
) {
1936 device_printf(dev
, "Unable to allocate bus resource: memory\n");
1939 adapter
->osdep
.mem_bus_space_tag
=
1940 rman_get_bustag(adapter
->res_memory
);
1941 adapter
->osdep
.mem_bus_space_handle
=
1942 rman_get_bushandle(adapter
->res_memory
);
1943 adapter
->hw
.hw_addr
= (uint8_t *)&adapter
->osdep
.mem_bus_space_handle
;
1945 if (adapter
->hw
.mac_type
> em_82543
) {
1946 /* Figure our where our IO BAR is ? */
1947 for (rid
= PCIR_BAR(0); rid
< PCIR_CIS
;) {
1950 val
= pci_read_config(dev
, rid
, 4);
1951 if (EM_BAR_TYPE(val
) == EM_BAR_TYPE_IO
) {
1952 adapter
->io_rid
= rid
;
1956 /* check for 64bit BAR */
1957 if (EM_BAR_MEM_TYPE(val
) == EM_BAR_MEM_TYPE_64BIT
)
1960 if (rid
>= PCIR_CIS
) {
1961 device_printf(dev
, "Unable to locate IO BAR\n");
1965 adapter
->res_ioport
= bus_alloc_resource_any(dev
,
1966 SYS_RES_IOPORT
, &adapter
->io_rid
, RF_ACTIVE
);
1967 if (!(adapter
->res_ioport
)) {
1968 device_printf(dev
, "Unable to allocate bus resource: "
1972 adapter
->hw
.io_base
= 0;
1973 adapter
->osdep
.io_bus_space_tag
=
1974 rman_get_bustag(adapter
->res_ioport
);
1975 adapter
->osdep
.io_bus_space_handle
=
1976 rman_get_bushandle(adapter
->res_ioport
);
1979 /* For ICH8 we need to find the flash memory. */
1980 if ((adapter
->hw
.mac_type
== em_ich8lan
) ||
1981 (adapter
->hw
.mac_type
== em_ich9lan
)) {
1983 adapter
->flash_mem
= bus_alloc_resource_any(dev
,
1984 SYS_RES_MEMORY
, &rid
, RF_ACTIVE
);
1985 if (adapter
->flash_mem
== NULL
) {
1986 device_printf(dev
, "Unable to allocate bus resource: "
1990 adapter
->osdep
.flash_bus_space_tag
=
1991 rman_get_bustag(adapter
->flash_mem
);
1992 adapter
->osdep
.flash_bus_space_handle
=
1993 rman_get_bushandle(adapter
->flash_mem
);
1997 adapter
->res_interrupt
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
,
1998 &rid
, RF_SHAREABLE
| RF_ACTIVE
);
1999 if (adapter
->res_interrupt
== NULL
) {
2000 device_printf(dev
, "Unable to allocate bus resource: "
2005 adapter
->hw
.back
= &adapter
->osdep
;
2011 em_free_pci_resources(device_t dev
)
2013 struct adapter
*adapter
= device_get_softc(dev
);
2015 if (adapter
->res_interrupt
!= NULL
) {
2016 bus_release_resource(dev
, SYS_RES_IRQ
, 0,
2017 adapter
->res_interrupt
);
2019 if (adapter
->res_memory
!= NULL
) {
2020 bus_release_resource(dev
, SYS_RES_MEMORY
, PCIR_BAR(0),
2021 adapter
->res_memory
);
2024 if (adapter
->res_ioport
!= NULL
) {
2025 bus_release_resource(dev
, SYS_RES_IOPORT
, adapter
->io_rid
,
2026 adapter
->res_ioport
);
2029 if (adapter
->flash_mem
!= NULL
) {
2030 bus_release_resource(dev
, SYS_RES_MEMORY
, EM_FLASH
,
2031 adapter
->flash_mem
);
2035 /*********************************************************************
2037 * Initialize the hardware to a configuration as specified by the
2038 * adapter structure. The controller is reset, the EEPROM is
2039 * verified, the MAC address is set, then the shared initialization
2040 * routines are called.
2042 **********************************************************************/
2044 em_hardware_init(struct adapter
*adapter
)
2046 uint16_t rx_buffer_size
;
2048 INIT_DEBUGOUT("em_hardware_init: begin");
2049 /* Issue a global reset */
2050 em_reset_hw(&adapter
->hw
);
2052 /* When hardware is reset, fifo_head is also reset */
2053 adapter
->tx_fifo_head
= 0;
2055 /* Make sure we have a good EEPROM before we read from it */
2056 if (em_validate_eeprom_checksum(&adapter
->hw
) < 0) {
2057 if (em_validate_eeprom_checksum(&adapter
->hw
) < 0) {
2058 device_printf(adapter
->dev
,
2059 "The EEPROM Checksum Is Not Valid\n");
2064 if (em_read_part_num(&adapter
->hw
, &(adapter
->part_num
)) < 0) {
2065 device_printf(adapter
->dev
,
2066 "EEPROM read error while reading part number\n");
2070 /* Set up smart power down as default off on newer adapters. */
2071 if (!em_smart_pwr_down
&&
2072 (adapter
->hw
.mac_type
== em_82571
||
2073 adapter
->hw
.mac_type
== em_82572
)) {
2074 uint16_t phy_tmp
= 0;
2076 /* Speed up time to link by disabling smart power down. */
2077 em_read_phy_reg(&adapter
->hw
, IGP02E1000_PHY_POWER_MGMT
,
2079 phy_tmp
&= ~IGP02E1000_PM_SPD
;
2080 em_write_phy_reg(&adapter
->hw
, IGP02E1000_PHY_POWER_MGMT
,
2085 * These parameters control the automatic generation (Tx) and
2086 * response (Rx) to Ethernet PAUSE frames.
2087 * - High water mark should allow for at least two frames to be
2088 * received after sending an XOFF.
2089 * - Low water mark works best when it is very near the high water mark.
2090 * This allows the receiver to restart by sending XON when it has
2091 * drained a bit. Here we use an arbitary value of 1500 which will
2092 * restart after one full frame is pulled from the buffer. There
2093 * could be several smaller frames in the buffer and if so they will
2094 * not trigger the XON until their total number reduces the buffer
2096 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2098 rx_buffer_size
= ((E1000_READ_REG(&adapter
->hw
, PBA
) & 0xffff) << 10);
2100 adapter
->hw
.fc_high_water
=
2101 rx_buffer_size
- roundup2(adapter
->hw
.max_frame_size
, 1024);
2102 adapter
->hw
.fc_low_water
= adapter
->hw
.fc_high_water
- 1500;
2103 if (adapter
->hw
.mac_type
== em_80003es2lan
)
2104 adapter
->hw
.fc_pause_time
= 0xFFFF;
2106 adapter
->hw
.fc_pause_time
= 1000;
2107 adapter
->hw
.fc_send_xon
= TRUE
;
2108 adapter
->hw
.fc
= E1000_FC_FULL
;
2110 if (em_init_hw(&adapter
->hw
) < 0) {
2111 device_printf(adapter
->dev
, "Hardware Initialization Failed");
2115 em_check_for_link(&adapter
->hw
);
2120 /*********************************************************************
2122 * Setup networking device structure and register an interface.
2124 **********************************************************************/
2126 em_setup_interface(device_t dev
, struct adapter
*adapter
)
2129 u_char fiber_type
= IFM_1000_SX
; /* default type */
2130 INIT_DEBUGOUT("em_setup_interface: begin");
2132 ifp
= &adapter
->interface_data
.ac_if
;
2133 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
2134 ifp
->if_mtu
= ETHERMTU
;
2135 ifp
->if_baudrate
= 1000000000;
2136 ifp
->if_init
= em_init
;
2137 ifp
->if_softc
= adapter
;
2138 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
2139 ifp
->if_ioctl
= em_ioctl
;
2140 ifp
->if_start
= em_start
;
2141 #ifdef DEVICE_POLLING
2142 ifp
->if_poll
= em_poll
;
2144 ifp
->if_watchdog
= em_watchdog
;
2145 ifq_set_maxlen(&ifp
->if_snd
, adapter
->num_tx_desc
- 1);
2146 ifq_set_ready(&ifp
->if_snd
);
2148 if (adapter
->hw
.mac_type
>= em_82543
)
2149 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
2151 ifp
->if_capenable
= ifp
->if_capabilities
;
2153 ether_ifattach(ifp
, adapter
->hw
.mac_addr
, NULL
);
2155 #ifdef PROFILE_SERIALIZER
2156 SYSCTL_ADD_UINT(&adapter
->sysctl_ctx
,
2157 SYSCTL_CHILDREN(adapter
->sysctl_tree
), OID_AUTO
,
2158 "serializer_sleep", CTLFLAG_RW
,
2159 &ifp
->if_serializer
->sleep_cnt
, 0, NULL
);
2160 SYSCTL_ADD_UINT(&adapter
->sysctl_ctx
,
2161 SYSCTL_CHILDREN(adapter
->sysctl_tree
), OID_AUTO
,
2162 "serializer_tryfail", CTLFLAG_RW
,
2163 &ifp
->if_serializer
->tryfail_cnt
, 0, NULL
);
2164 SYSCTL_ADD_UINT(&adapter
->sysctl_ctx
,
2165 SYSCTL_CHILDREN(adapter
->sysctl_tree
), OID_AUTO
,
2166 "serializer_enter", CTLFLAG_RW
,
2167 &ifp
->if_serializer
->enter_cnt
, 0, NULL
);
2168 SYSCTL_ADD_UINT(&adapter
->sysctl_ctx
,
2169 SYSCTL_CHILDREN(adapter
->sysctl_tree
), OID_AUTO
,
2170 "serializer_try", CTLFLAG_RW
,
2171 &ifp
->if_serializer
->try_cnt
, 0, NULL
);
2175 * Tell the upper layer(s) we support long frames.
2177 ifp
->if_data
.ifi_hdrlen
= sizeof(struct ether_vlan_header
);
2178 ifp
->if_capabilities
|= IFCAP_VLAN_HWTAGGING
| IFCAP_VLAN_MTU
;
2180 ifp
->if_capenable
|= IFCAP_VLAN_MTU
;
2184 * Specify the media types supported by this adapter and register
2185 * callbacks to update media and link information
2187 ifmedia_init(&adapter
->media
, IFM_IMASK
, em_media_change
,
2189 if (adapter
->hw
.media_type
== em_media_type_fiber
||
2190 adapter
->hw
.media_type
== em_media_type_internal_serdes
) {
2191 if (adapter
->hw
.mac_type
== em_82545
)
2192 fiber_type
= IFM_1000_LX
;
2193 ifmedia_add(&adapter
->media
, IFM_ETHER
| fiber_type
| IFM_FDX
,
2195 ifmedia_add(&adapter
->media
, IFM_ETHER
| fiber_type
, 0, NULL
);
2197 ifmedia_add(&adapter
->media
, IFM_ETHER
| IFM_10_T
, 0, NULL
);
2198 ifmedia_add(&adapter
->media
, IFM_ETHER
| IFM_10_T
| IFM_FDX
,
2200 ifmedia_add(&adapter
->media
, IFM_ETHER
| IFM_100_TX
,
2202 ifmedia_add(&adapter
->media
, IFM_ETHER
| IFM_100_TX
| IFM_FDX
,
2204 ifmedia_add(&adapter
->media
, IFM_ETHER
| IFM_1000_T
| IFM_FDX
,
2206 ifmedia_add(&adapter
->media
, IFM_ETHER
| IFM_1000_T
, 0, NULL
);
2208 ifmedia_add(&adapter
->media
, IFM_ETHER
| IFM_AUTO
, 0, NULL
);
2209 ifmedia_set(&adapter
->media
, IFM_ETHER
| IFM_AUTO
);
2212 /*********************************************************************
2214 * Workaround for SmartSpeed on 82541 and 82547 controllers
2216 **********************************************************************/
2218 em_smartspeed(struct adapter
*adapter
)
2222 if (adapter
->link_active
|| (adapter
->hw
.phy_type
!= em_phy_igp
) ||
2223 !adapter
->hw
.autoneg
||
2224 !(adapter
->hw
.autoneg_advertised
& ADVERTISE_1000_FULL
))
2227 if (adapter
->smartspeed
== 0) {
2229 * If Master/Slave config fault is asserted twice,
2230 * we assume back-to-back.
2232 em_read_phy_reg(&adapter
->hw
, PHY_1000T_STATUS
, &phy_tmp
);
2233 if (!(phy_tmp
& SR_1000T_MS_CONFIG_FAULT
))
2235 em_read_phy_reg(&adapter
->hw
, PHY_1000T_STATUS
, &phy_tmp
);
2236 if (phy_tmp
& SR_1000T_MS_CONFIG_FAULT
) {
2237 em_read_phy_reg(&adapter
->hw
, PHY_1000T_CTRL
, &phy_tmp
);
2238 if (phy_tmp
& CR_1000T_MS_ENABLE
) {
2239 phy_tmp
&= ~CR_1000T_MS_ENABLE
;
2240 em_write_phy_reg(&adapter
->hw
,
2241 PHY_1000T_CTRL
, phy_tmp
);
2242 adapter
->smartspeed
++;
2243 if (adapter
->hw
.autoneg
&&
2244 !em_phy_setup_autoneg(&adapter
->hw
) &&
2245 !em_read_phy_reg(&adapter
->hw
, PHY_CTRL
,
2247 phy_tmp
|= (MII_CR_AUTO_NEG_EN
|
2248 MII_CR_RESTART_AUTO_NEG
);
2249 em_write_phy_reg(&adapter
->hw
,
2255 } else if (adapter
->smartspeed
== EM_SMARTSPEED_DOWNSHIFT
) {
2256 /* If still no link, perhaps using 2/3 pair cable */
2257 em_read_phy_reg(&adapter
->hw
, PHY_1000T_CTRL
, &phy_tmp
);
2258 phy_tmp
|= CR_1000T_MS_ENABLE
;
2259 em_write_phy_reg(&adapter
->hw
, PHY_1000T_CTRL
, phy_tmp
);
2260 if (adapter
->hw
.autoneg
&&
2261 !em_phy_setup_autoneg(&adapter
->hw
) &&
2262 !em_read_phy_reg(&adapter
->hw
, PHY_CTRL
, &phy_tmp
)) {
2263 phy_tmp
|= (MII_CR_AUTO_NEG_EN
|
2264 MII_CR_RESTART_AUTO_NEG
);
2265 em_write_phy_reg(&adapter
->hw
, PHY_CTRL
, phy_tmp
);
2268 /* Restart process after EM_SMARTSPEED_MAX iterations */
2269 if (adapter
->smartspeed
++ == EM_SMARTSPEED_MAX
)
2270 adapter
->smartspeed
= 0;
2274 * Manage DMA'able memory.
2277 em_dmamap_cb(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
)
2281 *(bus_addr_t
*)arg
= segs
->ds_addr
;
2285 em_dma_malloc(struct adapter
*adapter
, bus_size_t size
,
2286 struct em_dma_alloc
*dma
)
2288 device_t dev
= adapter
->dev
;
2291 error
= bus_dma_tag_create(NULL
, /* parent */
2292 EM_DBA_ALIGN
, 0, /* alignment, bounds */
2293 BUS_SPACE_MAXADDR
, /* lowaddr */
2294 BUS_SPACE_MAXADDR
, /* highaddr */
2295 NULL
, NULL
, /* filter, filterarg */
2298 size
, /* maxsegsize */
2302 device_printf(dev
, "%s: bus_dma_tag_create failed; error %d\n",
2307 error
= bus_dmamem_alloc(dma
->dma_tag
, (void**)&dma
->dma_vaddr
,
2308 BUS_DMA_WAITOK
, &dma
->dma_map
);
2310 device_printf(dev
, "%s: bus_dmammem_alloc failed; "
2311 "size %llu, error %d\n",
2312 __func__
, (uintmax_t)size
, error
);
2316 error
= bus_dmamap_load(dma
->dma_tag
, dma
->dma_map
,
2317 dma
->dma_vaddr
, size
,
2318 em_dmamap_cb
, &dma
->dma_paddr
,
2321 device_printf(dev
, "%s: bus_dmamap_load failed; error %u\n",
2323 bus_dmamem_free(dma
->dma_tag
, dma
->dma_vaddr
, dma
->dma_map
);
2329 bus_dma_tag_destroy(dma
->dma_tag
);
2330 dma
->dma_tag
= NULL
;
2335 em_dma_free(struct adapter
*adapter
, struct em_dma_alloc
*dma
)
2337 if (dma
->dma_tag
!= NULL
) {
2338 bus_dmamap_unload(dma
->dma_tag
, dma
->dma_map
);
2339 bus_dmamem_free(dma
->dma_tag
, dma
->dma_vaddr
, dma
->dma_map
);
2340 bus_dma_tag_destroy(dma
->dma_tag
);
2341 dma
->dma_tag
= NULL
;
2345 /*********************************************************************
2347 * Allocate and initialize transmit structures.
2349 **********************************************************************/
2351 em_setup_transmit_structures(struct adapter
*adapter
)
2353 struct em_buffer
*tx_buffer
;
2358 * Setup DMA descriptor areas.
2360 size
= roundup2(adapter
->hw
.max_frame_size
, MCLBYTES
);
2361 if (bus_dma_tag_create(NULL
, /* parent */
2362 1, 0, /* alignment, bounds */
2363 BUS_SPACE_MAXADDR
, /* lowaddr */
2364 BUS_SPACE_MAXADDR
, /* highaddr */
2365 NULL
, NULL
, /* filter, filterarg */
2367 EM_MAX_SCATTER
, /* nsegments */
2368 size
, /* maxsegsize */
2371 device_printf(adapter
->dev
, "Unable to allocate TX DMA tag\n");
2375 adapter
->tx_buffer_area
=
2376 kmalloc(sizeof(struct em_buffer
) * adapter
->num_tx_desc
,
2377 M_DEVBUF
, M_WAITOK
| M_ZERO
);
2379 bzero(adapter
->tx_desc_base
,
2380 sizeof(struct em_tx_desc
) * adapter
->num_tx_desc
);
2381 tx_buffer
= adapter
->tx_buffer_area
;
2382 for (i
= 0; i
< adapter
->num_tx_desc
; i
++) {
2383 error
= bus_dmamap_create(adapter
->txtag
, 0, &tx_buffer
->map
);
2385 device_printf(adapter
->dev
,
2386 "Unable to create TX DMA map\n");
2392 adapter
->next_avail_tx_desc
= 0;
2393 adapter
->next_tx_to_clean
= 0;
2395 /* Set number of descriptors available */
2396 adapter
->num_tx_desc_avail
= adapter
->num_tx_desc
;
2398 /* Set checksum context */
2399 adapter
->active_checksum_context
= OFFLOAD_NONE
;
2401 bus_dmamap_sync(adapter
->txdma
.dma_tag
, adapter
->txdma
.dma_map
,
2402 BUS_DMASYNC_PREWRITE
);
2406 em_free_transmit_structures(adapter
);
2410 /*********************************************************************
2412 * Enable transmit unit.
2414 **********************************************************************/
2416 em_initialize_transmit_unit(struct adapter
*adapter
)
2419 uint32_t reg_tipg
= 0;
2422 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2424 /* Setup the Base and Length of the Tx Descriptor Ring */
2425 bus_addr
= adapter
->txdma
.dma_paddr
;
2426 E1000_WRITE_REG(&adapter
->hw
, TDLEN
,
2427 adapter
->num_tx_desc
* sizeof(struct em_tx_desc
));
2428 E1000_WRITE_REG(&adapter
->hw
, TDBAH
, (uint32_t)(bus_addr
>> 32));
2429 E1000_WRITE_REG(&adapter
->hw
, TDBAL
, (uint32_t)bus_addr
);
2431 /* Setup the HW Tx Head and Tail descriptor pointers */
2432 E1000_WRITE_REG(&adapter
->hw
, TDT
, 0);
2433 E1000_WRITE_REG(&adapter
->hw
, TDH
, 0);
2435 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2436 E1000_READ_REG(&adapter
->hw
, TDBAL
),
2437 E1000_READ_REG(&adapter
->hw
, TDLEN
));
2439 /* Set the default values for the Tx Inter Packet Gap timer */
2440 switch (adapter
->hw
.mac_type
) {
2441 case em_82542_rev2_0
:
2442 case em_82542_rev2_1
:
2443 reg_tipg
= DEFAULT_82542_TIPG_IPGT
;
2444 reg_tipg
|= DEFAULT_82542_TIPG_IPGR1
<< E1000_TIPG_IPGR1_SHIFT
;
2445 reg_tipg
|= DEFAULT_82542_TIPG_IPGR2
<< E1000_TIPG_IPGR2_SHIFT
;
2447 case em_80003es2lan
:
2448 reg_tipg
= DEFAULT_82543_TIPG_IPGR1
;
2450 DEFAULT_80003ES2LAN_TIPG_IPGR2
<< E1000_TIPG_IPGR2_SHIFT
;
2453 if (adapter
->hw
.media_type
== em_media_type_fiber
||
2454 adapter
->hw
.media_type
== em_media_type_internal_serdes
)
2455 reg_tipg
= DEFAULT_82543_TIPG_IPGT_FIBER
;
2457 reg_tipg
= DEFAULT_82543_TIPG_IPGT_COPPER
;
2458 reg_tipg
|= DEFAULT_82543_TIPG_IPGR1
<< E1000_TIPG_IPGR1_SHIFT
;
2459 reg_tipg
|= DEFAULT_82543_TIPG_IPGR2
<< E1000_TIPG_IPGR2_SHIFT
;
2462 E1000_WRITE_REG(&adapter
->hw
, TIPG
, reg_tipg
);
2463 E1000_WRITE_REG(&adapter
->hw
, TIDV
, adapter
->tx_int_delay
.value
);
2464 if (adapter
->hw
.mac_type
>= em_82540
) {
2465 E1000_WRITE_REG(&adapter
->hw
, TADV
,
2466 adapter
->tx_abs_int_delay
.value
);
2469 /* Program the Transmit Control Register */
2470 reg_tctl
= E1000_TCTL_PSP
| E1000_TCTL_EN
|
2471 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
2472 if (adapter
->hw
.mac_type
>= em_82571
)
2473 reg_tctl
|= E1000_TCTL_MULR
;
2474 if (adapter
->link_duplex
== 1)
2475 reg_tctl
|= E1000_FDX_COLLISION_DISTANCE
<< E1000_COLD_SHIFT
;
2477 reg_tctl
|= E1000_HDX_COLLISION_DISTANCE
<< E1000_COLD_SHIFT
;
2479 /* This write will effectively turn on the transmit unit. */
2480 E1000_WRITE_REG(&adapter
->hw
, TCTL
, reg_tctl
);
2482 /* Setup Transmit Descriptor Base Settings */
2483 adapter
->txd_cmd
= E1000_TXD_CMD_IFCS
;
2485 if (adapter
->tx_int_delay
.value
> 0)
2486 adapter
->txd_cmd
|= E1000_TXD_CMD_IDE
;
2489 /*********************************************************************
2491 * Free all transmit related data structures.
2493 **********************************************************************/
2495 em_free_transmit_structures(struct adapter
*adapter
)
2497 struct em_buffer
*tx_buffer
;
2500 INIT_DEBUGOUT("free_transmit_structures: begin");
2502 if (adapter
->tx_buffer_area
!= NULL
) {
2503 tx_buffer
= adapter
->tx_buffer_area
;
2504 for (i
= 0; i
< adapter
->num_tx_desc
; i
++, tx_buffer
++) {
2505 if (tx_buffer
->m_head
!= NULL
) {
2506 bus_dmamap_unload(adapter
->txtag
,
2508 m_freem(tx_buffer
->m_head
);
2511 if (tx_buffer
->map
!= NULL
) {
2512 bus_dmamap_destroy(adapter
->txtag
, tx_buffer
->map
);
2513 tx_buffer
->map
= NULL
;
2515 tx_buffer
->m_head
= NULL
;
2518 if (adapter
->tx_buffer_area
!= NULL
) {
2519 kfree(adapter
->tx_buffer_area
, M_DEVBUF
);
2520 adapter
->tx_buffer_area
= NULL
;
2522 if (adapter
->txtag
!= NULL
) {
2523 bus_dma_tag_destroy(adapter
->txtag
);
2524 adapter
->txtag
= NULL
;
2528 /*********************************************************************
2530 * The offload context needs to be set when we transfer the first
2531 * packet of a particular protocol (TCP/UDP). We change the
2532 * context only if the protocol type changes.
2534 **********************************************************************/
2536 em_transmit_checksum_setup(struct adapter
*adapter
,
2538 uint32_t *txd_upper
,
2539 uint32_t *txd_lower
)
2541 struct em_context_desc
*TXD
;
2542 struct em_buffer
*tx_buffer
;
2545 if (mp
->m_pkthdr
.csum_flags
) {
2546 if (mp
->m_pkthdr
.csum_flags
& CSUM_TCP
) {
2547 *txd_upper
= E1000_TXD_POPTS_TXSM
<< 8;
2548 *txd_lower
= E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
;
2549 if (adapter
->active_checksum_context
== OFFLOAD_TCP_IP
)
2552 adapter
->active_checksum_context
= OFFLOAD_TCP_IP
;
2553 } else if (mp
->m_pkthdr
.csum_flags
& CSUM_UDP
) {
2554 *txd_upper
= E1000_TXD_POPTS_TXSM
<< 8;
2555 *txd_lower
= E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
;
2556 if (adapter
->active_checksum_context
== OFFLOAD_UDP_IP
)
2559 adapter
->active_checksum_context
= OFFLOAD_UDP_IP
;
2572 * If we reach this point, the checksum offload context
2573 * needs to be reset.
2575 curr_txd
= adapter
->next_avail_tx_desc
;
2576 tx_buffer
= &adapter
->tx_buffer_area
[curr_txd
];
2577 TXD
= (struct em_context_desc
*) &adapter
->tx_desc_base
[curr_txd
];
2579 TXD
->lower_setup
.ip_fields
.ipcss
= ETHER_HDR_LEN
;
2580 TXD
->lower_setup
.ip_fields
.ipcso
=
2581 ETHER_HDR_LEN
+ offsetof(struct ip
, ip_sum
);
2582 TXD
->lower_setup
.ip_fields
.ipcse
=
2583 htole16(ETHER_HDR_LEN
+ sizeof(struct ip
) - 1);
2585 TXD
->upper_setup
.tcp_fields
.tucss
=
2586 ETHER_HDR_LEN
+ sizeof(struct ip
);
2587 TXD
->upper_setup
.tcp_fields
.tucse
= htole16(0);
2589 if (adapter
->active_checksum_context
== OFFLOAD_TCP_IP
) {
2590 TXD
->upper_setup
.tcp_fields
.tucso
=
2591 ETHER_HDR_LEN
+ sizeof(struct ip
) +
2592 offsetof(struct tcphdr
, th_sum
);
2593 } else if (adapter
->active_checksum_context
== OFFLOAD_UDP_IP
) {
2594 TXD
->upper_setup
.tcp_fields
.tucso
=
2595 ETHER_HDR_LEN
+ sizeof(struct ip
) +
2596 offsetof(struct udphdr
, uh_sum
);
2599 TXD
->tcp_seg_setup
.data
= htole32(0);
2600 TXD
->cmd_and_length
= htole32(adapter
->txd_cmd
| E1000_TXD_CMD_DEXT
);
2602 tx_buffer
->m_head
= NULL
;
2603 tx_buffer
->next_eop
= -1;
2605 if (++curr_txd
== adapter
->num_tx_desc
)
2608 adapter
->num_tx_desc_avail
--;
2609 adapter
->next_avail_tx_desc
= curr_txd
;
2612 /**********************************************************************
2614 * Examine each tx_buffer in the used queue. If the hardware is done
2615 * processing the packet then free associated resources. The
2616 * tx_buffer is put back on the free queue.
2618 **********************************************************************/
2621 em_txeof(struct adapter
*adapter
)
2623 int first
, last
, done
, num_avail
;
2624 struct em_buffer
*tx_buffer
;
2625 struct em_tx_desc
*tx_desc
, *eop_desc
;
2626 struct ifnet
*ifp
= &adapter
->interface_data
.ac_if
;
2628 if (adapter
->num_tx_desc_avail
== adapter
->num_tx_desc
)
2631 num_avail
= adapter
->num_tx_desc_avail
;
2632 first
= adapter
->next_tx_to_clean
;
2633 tx_desc
= &adapter
->tx_desc_base
[first
];
2634 tx_buffer
= &adapter
->tx_buffer_area
[first
];
2635 last
= tx_buffer
->next_eop
;
2636 KKASSERT(last
>= 0 && last
< adapter
->num_tx_desc
);
2637 eop_desc
= &adapter
->tx_desc_base
[last
];
2640 * Now caculate the terminating index for the cleanup loop below
2642 if (++last
== adapter
->num_tx_desc
)
2646 bus_dmamap_sync(adapter
->txdma
.dma_tag
, adapter
->txdma
.dma_map
,
2647 BUS_DMASYNC_POSTREAD
);
2649 while (eop_desc
->upper
.fields
.status
& E1000_TXD_STAT_DD
) {
2650 while (first
!= done
) {
2651 tx_desc
->upper
.data
= 0;
2652 tx_desc
->lower
.data
= 0;
2657 if (tx_buffer
->m_head
) {
2659 bus_dmamap_sync(adapter
->txtag
, tx_buffer
->map
,
2660 BUS_DMASYNC_POSTWRITE
);
2661 bus_dmamap_unload(adapter
->txtag
,
2664 m_freem(tx_buffer
->m_head
);
2665 tx_buffer
->m_head
= NULL
;
2667 tx_buffer
->next_eop
= -1;
2669 if (++first
== adapter
->num_tx_desc
)
2672 tx_buffer
= &adapter
->tx_buffer_area
[first
];
2673 tx_desc
= &adapter
->tx_desc_base
[first
];
2675 /* See if we can continue to the next packet */
2676 last
= tx_buffer
->next_eop
;
2678 KKASSERT(last
>= 0 && last
< adapter
->num_tx_desc
);
2679 eop_desc
= &adapter
->tx_desc_base
[last
];
2680 if (++last
== adapter
->num_tx_desc
)
2688 bus_dmamap_sync(adapter
->txdma
.dma_tag
, adapter
->txdma
.dma_map
,
2689 BUS_DMASYNC_PREWRITE
);
2691 adapter
->next_tx_to_clean
= first
;
2694 * If we have enough room, clear IFF_OACTIVE to tell the stack
2695 * that it is OK to send packets.
2696 * If there are no pending descriptors, clear the timeout. Otherwise,
2697 * if some descriptors have been freed, restart the timeout.
2699 if (num_avail
> EM_TX_CLEANUP_THRESHOLD
) {
2700 ifp
->if_flags
&= ~IFF_OACTIVE
;
2701 if (num_avail
== adapter
->num_tx_desc
)
2703 else if (num_avail
== adapter
->num_tx_desc_avail
)
2704 ifp
->if_timer
= EM_TX_TIMEOUT
;
2706 adapter
->num_tx_desc_avail
= num_avail
;
2709 /*********************************************************************
2711 * Get a buffer from system mbuf buffer pool.
2713 **********************************************************************/
2715 em_get_buf(int i
, struct adapter
*adapter
, struct mbuf
*nmp
, int how
)
2717 struct mbuf
*mp
= nmp
;
2718 struct em_buffer
*rx_buffer
;
2723 ifp
= &adapter
->interface_data
.ac_if
;
2726 mp
= m_getcl(how
, MT_DATA
, M_PKTHDR
);
2728 adapter
->mbuf_cluster_failed
++;
2731 mp
->m_len
= mp
->m_pkthdr
.len
= MCLBYTES
;
2733 mp
->m_len
= mp
->m_pkthdr
.len
= MCLBYTES
;
2734 mp
->m_data
= mp
->m_ext
.ext_buf
;
2738 if (ifp
->if_mtu
<= ETHERMTU
)
2739 m_adj(mp
, ETHER_ALIGN
);
2741 rx_buffer
= &adapter
->rx_buffer_area
[i
];
2744 * Using memory from the mbuf cluster pool, invoke the
2745 * bus_dma machinery to arrange the memory mapping.
2747 error
= bus_dmamap_load(adapter
->rxtag
, rx_buffer
->map
,
2748 mtod(mp
, void *), mp
->m_len
,
2749 em_dmamap_cb
, &paddr
, 0);
2754 rx_buffer
->m_head
= mp
;
2755 adapter
->rx_desc_base
[i
].buffer_addr
= htole64(paddr
);
2756 bus_dmamap_sync(adapter
->rxtag
, rx_buffer
->map
, BUS_DMASYNC_PREREAD
);
2761 /*********************************************************************
2763 * Allocate memory for rx_buffer structures. Since we use one
2764 * rx_buffer per received packet, the maximum number of rx_buffer's
2765 * that we'll need is equal to the number of receive descriptors
2766 * that we've allocated.
2768 **********************************************************************/
2770 em_allocate_receive_structures(struct adapter
*adapter
)
2773 struct em_buffer
*rx_buffer
;
2775 size
= adapter
->num_rx_desc
* sizeof(struct em_buffer
);
2776 adapter
->rx_buffer_area
= kmalloc(size
, M_DEVBUF
, M_WAITOK
| M_ZERO
);
2778 error
= bus_dma_tag_create(NULL
, /* parent */
2779 1, 0, /* alignment, bounds */
2780 BUS_SPACE_MAXADDR
, /* lowaddr */
2781 BUS_SPACE_MAXADDR
, /* highaddr */
2782 NULL
, NULL
, /* filter, filterarg */
2783 MCLBYTES
, /* maxsize */
2785 MCLBYTES
, /* maxsegsize */
2789 device_printf(adapter
->dev
, "%s: bus_dma_tag_create failed; "
2790 "error %u\n", __func__
, error
);
2794 rx_buffer
= adapter
->rx_buffer_area
;
2795 for (i
= 0; i
< adapter
->num_rx_desc
; i
++, rx_buffer
++) {
2796 error
= bus_dmamap_create(adapter
->rxtag
, BUS_DMA_NOWAIT
,
2799 device_printf(adapter
->dev
,
2800 "%s: bus_dmamap_create failed; "
2801 "error %u\n", __func__
, error
);
2806 for (i
= 0; i
< adapter
->num_rx_desc
; i
++) {
2807 error
= em_get_buf(i
, adapter
, NULL
, MB_DONTWAIT
);
2812 bus_dmamap_sync(adapter
->rxdma
.dma_tag
, adapter
->rxdma
.dma_map
,
2813 BUS_DMASYNC_PREWRITE
);
2817 em_free_receive_structures(adapter
);
2821 /*********************************************************************
2823 * Allocate and initialize receive structures.
2825 **********************************************************************/
2827 em_setup_receive_structures(struct adapter
*adapter
)
2831 bzero(adapter
->rx_desc_base
,
2832 sizeof(struct em_rx_desc
) * adapter
->num_rx_desc
);
2834 error
= em_allocate_receive_structures(adapter
);
2838 /* Setup our descriptor pointers */
2839 adapter
->next_rx_desc_to_check
= 0;
2844 /*********************************************************************
2846 * Enable receive unit.
2848 **********************************************************************/
2850 em_initialize_receive_unit(struct adapter
*adapter
)
2853 uint32_t reg_rxcsum
;
2857 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2859 ifp
= &adapter
->interface_data
.ac_if
;
2862 * Make sure receives are disabled while setting
2863 * up the descriptor ring
2865 E1000_WRITE_REG(&adapter
->hw
, RCTL
, 0);
2867 /* Set the Receive Delay Timer Register */
2868 E1000_WRITE_REG(&adapter
->hw
, RDTR
,
2869 adapter
->rx_int_delay
.value
| E1000_RDT_FPDB
);
2871 if(adapter
->hw
.mac_type
>= em_82540
) {
2872 E1000_WRITE_REG(&adapter
->hw
, RADV
,
2873 adapter
->rx_abs_int_delay
.value
);
2875 /* Set the interrupt throttling rate in 256ns increments */
2876 if (em_int_throttle_ceil
) {
2877 E1000_WRITE_REG(&adapter
->hw
, ITR
,
2878 1000000000 / 256 / em_int_throttle_ceil
);
2880 E1000_WRITE_REG(&adapter
->hw
, ITR
, 0);
2884 /* Setup the Base and Length of the Rx Descriptor Ring */
2885 bus_addr
= adapter
->rxdma
.dma_paddr
;
2886 E1000_WRITE_REG(&adapter
->hw
, RDLEN
, adapter
->num_rx_desc
*
2887 sizeof(struct em_rx_desc
));
2888 E1000_WRITE_REG(&adapter
->hw
, RDBAH
, (uint32_t)(bus_addr
>> 32));
2889 E1000_WRITE_REG(&adapter
->hw
, RDBAL
, (uint32_t)bus_addr
);
2891 /* Setup the Receive Control Register */
2892 reg_rctl
= E1000_RCTL_EN
| E1000_RCTL_BAM
| E1000_RCTL_LBM_NO
|
2893 E1000_RCTL_RDMTS_HALF
|
2894 (adapter
->hw
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
2896 if (adapter
->hw
.tbi_compatibility_on
== TRUE
)
2897 reg_rctl
|= E1000_RCTL_SBP
;
2899 switch (adapter
->rx_buffer_len
) {
2901 case EM_RXBUFFER_2048
:
2902 reg_rctl
|= E1000_RCTL_SZ_2048
;
2904 case EM_RXBUFFER_4096
:
2905 reg_rctl
|= E1000_RCTL_SZ_4096
| E1000_RCTL_BSEX
|
2908 case EM_RXBUFFER_8192
:
2909 reg_rctl
|= E1000_RCTL_SZ_8192
| E1000_RCTL_BSEX
|
2912 case EM_RXBUFFER_16384
:
2913 reg_rctl
|= E1000_RCTL_SZ_16384
| E1000_RCTL_BSEX
|
2918 if (ifp
->if_mtu
> ETHERMTU
)
2919 reg_rctl
|= E1000_RCTL_LPE
;
2921 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2922 if ((adapter
->hw
.mac_type
>= em_82543
) &&
2923 (ifp
->if_capenable
& IFCAP_RXCSUM
)) {
2924 reg_rxcsum
= E1000_READ_REG(&adapter
->hw
, RXCSUM
);
2925 reg_rxcsum
|= (E1000_RXCSUM_IPOFL
| E1000_RXCSUM_TUOFL
);
2926 E1000_WRITE_REG(&adapter
->hw
, RXCSUM
, reg_rxcsum
);
2929 #ifdef EM_X60_WORKAROUND
2930 if (adapter
->hw
.mac_type
== em_82573
)
2931 E1000_WRITE_REG(&adapter
->hw
, RDTR
, 32);
2934 /* Enable Receives */
2935 E1000_WRITE_REG(&adapter
->hw
, RCTL
, reg_rctl
);
2937 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2938 E1000_WRITE_REG(&adapter
->hw
, RDH
, 0);
2939 E1000_WRITE_REG(&adapter
->hw
, RDT
, adapter
->num_rx_desc
- 1);
2942 /*********************************************************************
2944 * Free receive related data structures.
2946 **********************************************************************/
2948 em_free_receive_structures(struct adapter
*adapter
)
2950 struct em_buffer
*rx_buffer
;
2953 INIT_DEBUGOUT("free_receive_structures: begin");
2955 if (adapter
->rx_buffer_area
!= NULL
) {
2956 rx_buffer
= adapter
->rx_buffer_area
;
2957 for (i
= 0; i
< adapter
->num_rx_desc
; i
++, rx_buffer
++) {
2958 if (rx_buffer
->m_head
!= NULL
) {
2959 bus_dmamap_unload(adapter
->rxtag
,
2961 m_freem(rx_buffer
->m_head
);
2962 rx_buffer
->m_head
= NULL
;
2964 if (rx_buffer
->map
!= NULL
) {
2965 bus_dmamap_destroy(adapter
->rxtag
,
2967 rx_buffer
->map
= NULL
;
2971 if (adapter
->rx_buffer_area
!= NULL
) {
2972 kfree(adapter
->rx_buffer_area
, M_DEVBUF
);
2973 adapter
->rx_buffer_area
= NULL
;
2975 if (adapter
->rxtag
!= NULL
) {
2976 bus_dma_tag_destroy(adapter
->rxtag
);
2977 adapter
->rxtag
= NULL
;
2981 /*********************************************************************
2983 * This routine executes in interrupt context. It replenishes
2984 * the mbufs in the descriptor and sends data which has been
2985 * dma'ed into host memory to upper layer.
2987 * We loop at most count times if count is > 0, or until done if
2990 *********************************************************************/
2992 em_rxeof(struct adapter
*adapter
, int count
)
2996 uint8_t accept_frame
= 0;
2998 uint16_t len
, desc_len
, prev_len_adj
;
3000 #ifdef ETHER_INPUT_CHAIN
3001 struct mbuf_chain chain
[MAXCPU
];
3005 /* Pointer to the receive descriptor being examined. */
3006 struct em_rx_desc
*current_desc
;
3008 ifp
= &adapter
->interface_data
.ac_if
;
3009 i
= adapter
->next_rx_desc_to_check
;
3010 current_desc
= &adapter
->rx_desc_base
[i
];
3012 bus_dmamap_sync(adapter
->rxdma
.dma_tag
, adapter
->rxdma
.dma_map
,
3013 BUS_DMASYNC_POSTREAD
);
3015 if (!(current_desc
->status
& E1000_RXD_STAT_DD
))
3018 #ifdef ETHER_INPUT_CHAIN
3019 for (j
= 0; j
< ncpus
; ++j
)
3020 chain
[j
].mc_head
= chain
[j
].mc_tail
= NULL
;
3023 while ((current_desc
->status
& E1000_RXD_STAT_DD
) && count
!= 0) {
3025 mp
= adapter
->rx_buffer_area
[i
].m_head
;
3026 bus_dmamap_sync(adapter
->rxtag
, adapter
->rx_buffer_area
[i
].map
,
3027 BUS_DMASYNC_POSTREAD
);
3028 bus_dmamap_unload(adapter
->rxtag
,
3029 adapter
->rx_buffer_area
[i
].map
);
3033 desc_len
= le16toh(current_desc
->length
);
3034 if (current_desc
->status
& E1000_RXD_STAT_EOP
) {
3037 if (desc_len
< ETHER_CRC_LEN
) {
3039 prev_len_adj
= ETHER_CRC_LEN
- desc_len
;
3041 len
= desc_len
- ETHER_CRC_LEN
;
3048 if (current_desc
->errors
& E1000_RXD_ERR_FRAME_ERR_MASK
) {
3050 uint32_t pkt_len
= desc_len
;
3052 if (adapter
->fmp
!= NULL
)
3053 pkt_len
+= adapter
->fmp
->m_pkthdr
.len
;
3055 last_byte
= *(mtod(mp
, caddr_t
) + desc_len
- 1);
3057 if (TBI_ACCEPT(&adapter
->hw
, current_desc
->status
,
3058 current_desc
->errors
,
3059 pkt_len
, last_byte
)) {
3060 em_tbi_adjust_stats(&adapter
->hw
,
3063 adapter
->hw
.mac_addr
);
3072 if (em_get_buf(i
, adapter
, NULL
, MB_DONTWAIT
) == ENOBUFS
) {
3073 adapter
->dropped_pkts
++;
3074 em_get_buf(i
, adapter
, mp
, MB_DONTWAIT
);
3075 if (adapter
->fmp
!= NULL
)
3076 m_freem(adapter
->fmp
);
3077 adapter
->fmp
= NULL
;
3078 adapter
->lmp
= NULL
;
3082 /* Assign correct length to the current fragment */
3085 if (adapter
->fmp
== NULL
) {
3086 mp
->m_pkthdr
.len
= len
;
3087 adapter
->fmp
= mp
; /* Store the first mbuf */
3090 /* Chain mbuf's together */
3092 * Adjust length of previous mbuf in chain if
3093 * we received less than 4 bytes in the last
3096 if (prev_len_adj
> 0) {
3097 adapter
->lmp
->m_len
-= prev_len_adj
;
3098 adapter
->fmp
->m_pkthdr
.len
-= prev_len_adj
;
3100 adapter
->lmp
->m_next
= mp
;
3101 adapter
->lmp
= adapter
->lmp
->m_next
;
3102 adapter
->fmp
->m_pkthdr
.len
+= len
;
3106 adapter
->fmp
->m_pkthdr
.rcvif
= ifp
;
3109 em_receive_checksum(adapter
, current_desc
,
3111 if (current_desc
->status
& E1000_RXD_STAT_VP
) {
3112 VLAN_INPUT_TAG(adapter
->fmp
,
3113 (current_desc
->special
&
3114 E1000_RXD_SPC_VLAN_MASK
));
3116 #ifdef ETHER_INPUT_CHAIN
3117 ether_input_chain(ifp
, adapter
->fmp
,
3120 ifp
->if_input(ifp
, adapter
->fmp
);
3123 adapter
->fmp
= NULL
;
3124 adapter
->lmp
= NULL
;
3127 adapter
->dropped_pkts
++;
3128 em_get_buf(i
, adapter
, mp
, MB_DONTWAIT
);
3129 if (adapter
->fmp
!= NULL
)
3130 m_freem(adapter
->fmp
);
3131 adapter
->fmp
= NULL
;
3132 adapter
->lmp
= NULL
;
3136 /* Zero out the receive descriptors status. */
3137 current_desc
->status
= 0;
3139 /* Advance our pointers to the next descriptor. */
3140 if (++i
== adapter
->num_rx_desc
) {
3142 current_desc
= adapter
->rx_desc_base
;
3148 #ifdef ETHER_INPUT_CHAIN
3149 ether_input_dispatch(chain
);
3152 bus_dmamap_sync(adapter
->rxdma
.dma_tag
, adapter
->rxdma
.dma_map
,
3153 BUS_DMASYNC_PREWRITE
);
3155 adapter
->next_rx_desc_to_check
= i
;
3157 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3159 i
= adapter
->num_rx_desc
- 1;
3161 E1000_WRITE_REG(&adapter
->hw
, RDT
, i
);
3164 /*********************************************************************
3166 * Verify that the hardware indicated that the checksum is valid.
3167 * Inform the stack about the status of checksum so that stack
3168 * doesn't spend time verifying the checksum.
3170 *********************************************************************/
3172 em_receive_checksum(struct adapter
*adapter
,
3173 struct em_rx_desc
*rx_desc
,
3176 /* 82543 or newer only */
3177 if ((adapter
->hw
.mac_type
< em_82543
) ||
3178 /* Ignore Checksum bit is set */
3179 (rx_desc
->status
& E1000_RXD_STAT_IXSM
)) {
3180 mp
->m_pkthdr
.csum_flags
= 0;
3184 if (rx_desc
->status
& E1000_RXD_STAT_IPCS
) {
3186 if (!(rx_desc
->errors
& E1000_RXD_ERR_IPE
)) {
3187 /* IP Checksum Good */
3188 mp
->m_pkthdr
.csum_flags
= CSUM_IP_CHECKED
;
3189 mp
->m_pkthdr
.csum_flags
|= CSUM_IP_VALID
;
3191 mp
->m_pkthdr
.csum_flags
= 0;
3195 if (rx_desc
->status
& E1000_RXD_STAT_TCPCS
) {
3197 if (!(rx_desc
->errors
& E1000_RXD_ERR_TCPE
)) {
3198 mp
->m_pkthdr
.csum_flags
|=
3199 (CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
|
3200 CSUM_FRAG_NOT_CHECKED
);
3201 mp
->m_pkthdr
.csum_data
= htons(0xffff);
3208 em_enable_vlans(struct adapter
*adapter
)
3212 E1000_WRITE_REG(&adapter
->hw
, VET
, ETHERTYPE_VLAN
);
3214 ctrl
= E1000_READ_REG(&adapter
->hw
, CTRL
);
3215 ctrl
|= E1000_CTRL_VME
;
3216 E1000_WRITE_REG(&adapter
->hw
, CTRL
, ctrl
);
3220 em_disable_vlans(struct adapter
*adapter
)
3224 ctrl
= E1000_READ_REG(&adapter
->hw
, CTRL
);
3225 ctrl
&= ~E1000_CTRL_VME
;
3226 E1000_WRITE_REG(&adapter
->hw
, CTRL
, ctrl
);
3230 * note: we must call bus_enable_intr() prior to enabling the hardware
3231 * interrupt and bus_disable_intr() after disabling the hardware interrupt
3232 * in order to avoid handler execution races from scheduled interrupt
3236 em_enable_intr(struct adapter
*adapter
)
3238 struct ifnet
*ifp
= &adapter
->interface_data
.ac_if
;
3240 if ((ifp
->if_flags
& IFF_POLLING
) == 0) {
3241 lwkt_serialize_handler_enable(ifp
->if_serializer
);
3242 E1000_WRITE_REG(&adapter
->hw
, IMS
, (IMS_ENABLE_MASK
));
3247 em_disable_intr(struct adapter
*adapter
)
3250 * The first version of 82542 had an errata where when link was forced
3251 * it would stay up even up even if the cable was disconnected.
3252 * Sequence errors were used to detect the disconnect and then the
3253 * driver would unforce the link. This code in the in the ISR. For
3254 * this to work correctly the Sequence error interrupt had to be
3255 * enabled all the time.
3257 if (adapter
->hw
.mac_type
== em_82542_rev2_0
) {
3258 E1000_WRITE_REG(&adapter
->hw
, IMC
,
3259 (0xffffffff & ~E1000_IMC_RXSEQ
));
3261 E1000_WRITE_REG(&adapter
->hw
, IMC
, 0xffffffff);
3264 lwkt_serialize_handler_disable(adapter
->interface_data
.ac_if
.if_serializer
);
3268 em_is_valid_ether_addr(uint8_t *addr
)
3270 static const char zero_addr
[6] = { 0, 0, 0, 0, 0, 0 };
3272 if ((addr
[0] & 1) || !bcmp(addr
, zero_addr
, ETHER_ADDR_LEN
))
3279 em_write_pci_cfg(struct em_hw
*hw
, uint32_t reg
, uint16_t *value
)
3281 pci_write_config(((struct em_osdep
*)hw
->back
)->dev
, reg
, *value
, 2);
3285 em_read_pci_cfg(struct em_hw
*hw
, uint32_t reg
, uint16_t *value
)
3287 *value
= pci_read_config(((struct em_osdep
*)hw
->back
)->dev
, reg
, 2);
3291 em_pci_set_mwi(struct em_hw
*hw
)
3293 pci_write_config(((struct em_osdep
*)hw
->back
)->dev
, PCIR_COMMAND
,
3294 (hw
->pci_cmd_word
| CMD_MEM_WRT_INVALIDATE
), 2);
3298 em_pci_clear_mwi(struct em_hw
*hw
)
3300 pci_write_config(((struct em_osdep
*)hw
->back
)->dev
, PCIR_COMMAND
,
3301 (hw
->pci_cmd_word
& ~CMD_MEM_WRT_INVALIDATE
), 2);
3305 em_io_read(struct em_hw
*hw
, unsigned long port
)
3307 struct em_osdep
*io
= hw
->back
;
3309 return bus_space_read_4(io
->io_bus_space_tag
,
3310 io
->io_bus_space_handle
, port
);
3314 em_io_write(struct em_hw
*hw
, unsigned long port
, uint32_t value
)
3316 struct em_osdep
*io
= hw
->back
;
3318 bus_space_write_4(io
->io_bus_space_tag
,
3319 io
->io_bus_space_handle
, port
, value
);
3323 * We may eventually really do this, but its unnecessary
3324 * for now so we just return unsupported.
3327 em_read_pcie_cap_reg(struct em_hw
*hw
, uint32_t reg
, uint16_t *value
)
3333 /*********************************************************************
3334 * 82544 Coexistence issue workaround.
3335 * There are 2 issues.
3336 * 1. Transmit Hang issue.
3337 * To detect this issue, following equation can be used...
3338 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3339 * If SUM[3:0] is in between 1 to 4, we will have this issue.
3342 * To detect this issue, following equation can be used...
3343 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3344 * If SUM[3:0] is in between 9 to c, we will have this issue.
3348 * Make sure we do not have ending address as 1,2,3,4(Hang) or
3351 *************************************************************************/
3353 em_fill_descriptors(bus_addr_t address
, uint32_t length
, PDESC_ARRAY desc_array
)
3355 /* Since issue is sensitive to length and address.*/
3356 /* Let us first check the address...*/
3357 uint32_t safe_terminator
;
3359 desc_array
->descriptor
[0].address
= address
;
3360 desc_array
->descriptor
[0].length
= length
;
3361 desc_array
->elements
= 1;
3362 return (desc_array
->elements
);
3364 safe_terminator
= (uint32_t)((((uint32_t)address
& 0x7) + (length
& 0xF)) & 0xF);
3365 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3366 if (safe_terminator
== 0 ||
3367 (safe_terminator
> 4 && safe_terminator
< 9) ||
3368 (safe_terminator
> 0xC && safe_terminator
<= 0xF)) {
3369 desc_array
->descriptor
[0].address
= address
;
3370 desc_array
->descriptor
[0].length
= length
;
3371 desc_array
->elements
= 1;
3372 return (desc_array
->elements
);
3375 desc_array
->descriptor
[0].address
= address
;
3376 desc_array
->descriptor
[0].length
= length
- 4;
3377 desc_array
->descriptor
[1].address
= address
+ (length
- 4);
3378 desc_array
->descriptor
[1].length
= 4;
3379 desc_array
->elements
= 2;
3380 return (desc_array
->elements
);
3383 /**********************************************************************
3385 * Update the board statistics counters.
3387 **********************************************************************/
3389 em_update_stats_counters(struct adapter
*adapter
)
3393 if (adapter
->hw
.media_type
== em_media_type_copper
||
3394 (E1000_READ_REG(&adapter
->hw
, STATUS
) & E1000_STATUS_LU
)) {
3395 adapter
->stats
.symerrs
+= E1000_READ_REG(&adapter
->hw
, SYMERRS
);
3396 adapter
->stats
.sec
+= E1000_READ_REG(&adapter
->hw
, SEC
);
3398 adapter
->stats
.crcerrs
+= E1000_READ_REG(&adapter
->hw
, CRCERRS
);
3399 adapter
->stats
.mpc
+= E1000_READ_REG(&adapter
->hw
, MPC
);
3400 adapter
->stats
.scc
+= E1000_READ_REG(&adapter
->hw
, SCC
);
3401 adapter
->stats
.ecol
+= E1000_READ_REG(&adapter
->hw
, ECOL
);
3403 adapter
->stats
.mcc
+= E1000_READ_REG(&adapter
->hw
, MCC
);
3404 adapter
->stats
.latecol
+= E1000_READ_REG(&adapter
->hw
, LATECOL
);
3405 adapter
->stats
.colc
+= E1000_READ_REG(&adapter
->hw
, COLC
);
3406 adapter
->stats
.dc
+= E1000_READ_REG(&adapter
->hw
, DC
);
3407 adapter
->stats
.rlec
+= E1000_READ_REG(&adapter
->hw
, RLEC
);
3408 adapter
->stats
.xonrxc
+= E1000_READ_REG(&adapter
->hw
, XONRXC
);
3409 adapter
->stats
.xontxc
+= E1000_READ_REG(&adapter
->hw
, XONTXC
);
3410 adapter
->stats
.xoffrxc
+= E1000_READ_REG(&adapter
->hw
, XOFFRXC
);
3411 adapter
->stats
.xofftxc
+= E1000_READ_REG(&adapter
->hw
, XOFFTXC
);
3412 adapter
->stats
.fcruc
+= E1000_READ_REG(&adapter
->hw
, FCRUC
);
3413 adapter
->stats
.prc64
+= E1000_READ_REG(&adapter
->hw
, PRC64
);
3414 adapter
->stats
.prc127
+= E1000_READ_REG(&adapter
->hw
, PRC127
);
3415 adapter
->stats
.prc255
+= E1000_READ_REG(&adapter
->hw
, PRC255
);
3416 adapter
->stats
.prc511
+= E1000_READ_REG(&adapter
->hw
, PRC511
);
3417 adapter
->stats
.prc1023
+= E1000_READ_REG(&adapter
->hw
, PRC1023
);
3418 adapter
->stats
.prc1522
+= E1000_READ_REG(&adapter
->hw
, PRC1522
);
3419 adapter
->stats
.gprc
+= E1000_READ_REG(&adapter
->hw
, GPRC
);
3420 adapter
->stats
.bprc
+= E1000_READ_REG(&adapter
->hw
, BPRC
);
3421 adapter
->stats
.mprc
+= E1000_READ_REG(&adapter
->hw
, MPRC
);
3422 adapter
->stats
.gptc
+= E1000_READ_REG(&adapter
->hw
, GPTC
);
3424 /* For the 64-bit byte counters the low dword must be read first. */
3425 /* Both registers clear on the read of the high dword */
3427 adapter
->stats
.gorcl
+= E1000_READ_REG(&adapter
->hw
, GORCL
);
3428 adapter
->stats
.gorch
+= E1000_READ_REG(&adapter
->hw
, GORCH
);
3429 adapter
->stats
.gotcl
+= E1000_READ_REG(&adapter
->hw
, GOTCL
);
3430 adapter
->stats
.gotch
+= E1000_READ_REG(&adapter
->hw
, GOTCH
);
3432 adapter
->stats
.rnbc
+= E1000_READ_REG(&adapter
->hw
, RNBC
);
3433 adapter
->stats
.ruc
+= E1000_READ_REG(&adapter
->hw
, RUC
);
3434 adapter
->stats
.rfc
+= E1000_READ_REG(&adapter
->hw
, RFC
);
3435 adapter
->stats
.roc
+= E1000_READ_REG(&adapter
->hw
, ROC
);
3436 adapter
->stats
.rjc
+= E1000_READ_REG(&adapter
->hw
, RJC
);
3438 adapter
->stats
.torl
+= E1000_READ_REG(&adapter
->hw
, TORL
);
3439 adapter
->stats
.torh
+= E1000_READ_REG(&adapter
->hw
, TORH
);
3440 adapter
->stats
.totl
+= E1000_READ_REG(&adapter
->hw
, TOTL
);
3441 adapter
->stats
.toth
+= E1000_READ_REG(&adapter
->hw
, TOTH
);
3443 adapter
->stats
.tpr
+= E1000_READ_REG(&adapter
->hw
, TPR
);
3444 adapter
->stats
.tpt
+= E1000_READ_REG(&adapter
->hw
, TPT
);
3445 adapter
->stats
.ptc64
+= E1000_READ_REG(&adapter
->hw
, PTC64
);
3446 adapter
->stats
.ptc127
+= E1000_READ_REG(&adapter
->hw
, PTC127
);
3447 adapter
->stats
.ptc255
+= E1000_READ_REG(&adapter
->hw
, PTC255
);
3448 adapter
->stats
.ptc511
+= E1000_READ_REG(&adapter
->hw
, PTC511
);
3449 adapter
->stats
.ptc1023
+= E1000_READ_REG(&adapter
->hw
, PTC1023
);
3450 adapter
->stats
.ptc1522
+= E1000_READ_REG(&adapter
->hw
, PTC1522
);
3451 adapter
->stats
.mptc
+= E1000_READ_REG(&adapter
->hw
, MPTC
);
3452 adapter
->stats
.bptc
+= E1000_READ_REG(&adapter
->hw
, BPTC
);
3454 if (adapter
->hw
.mac_type
>= em_82543
) {
3455 adapter
->stats
.algnerrc
+=
3456 E1000_READ_REG(&adapter
->hw
, ALGNERRC
);
3457 adapter
->stats
.rxerrc
+=
3458 E1000_READ_REG(&adapter
->hw
, RXERRC
);
3459 adapter
->stats
.tncrs
+=
3460 E1000_READ_REG(&adapter
->hw
, TNCRS
);
3461 adapter
->stats
.cexterr
+=
3462 E1000_READ_REG(&adapter
->hw
, CEXTERR
);
3463 adapter
->stats
.tsctc
+=
3464 E1000_READ_REG(&adapter
->hw
, TSCTC
);
3465 adapter
->stats
.tsctfc
+=
3466 E1000_READ_REG(&adapter
->hw
, TSCTFC
);
3468 ifp
= &adapter
->interface_data
.ac_if
;
3470 /* Fill out the OS statistics structure */
3471 ifp
->if_collisions
= adapter
->stats
.colc
;
3475 adapter
->dropped_pkts
+
3476 adapter
->stats
.rxerrc
+
3477 adapter
->stats
.crcerrs
+
3478 adapter
->stats
.algnerrc
+
3479 adapter
->stats
.ruc
+ adapter
->stats
.roc
+
3480 adapter
->stats
.mpc
+ adapter
->stats
.cexterr
+
3481 adapter
->rx_overruns
;
3484 ifp
->if_oerrors
= adapter
->stats
.ecol
+ adapter
->stats
.latecol
+
3485 adapter
->watchdog_timeouts
;
3489 /**********************************************************************
3491 * This routine is called only when em_display_debug_stats is enabled.
3492 * This routine provides a way to take a look at important statistics
3493 * maintained by the driver and hardware.
3495 **********************************************************************/
3497 em_print_debug_info(struct adapter
*adapter
)
3499 device_t dev
= adapter
->dev
;
3500 uint8_t *hw_addr
= adapter
->hw
.hw_addr
;
3502 device_printf(dev
, "Adapter hardware address = %p \n", hw_addr
);
3503 device_printf(dev
, "CTRL = 0x%x RCTL = 0x%x\n",
3504 E1000_READ_REG(&adapter
->hw
, CTRL
),
3505 E1000_READ_REG(&adapter
->hw
, RCTL
));
3506 device_printf(dev
, "Packet buffer = Tx=%dk Rx=%dk\n",
3507 ((E1000_READ_REG(&adapter
->hw
, PBA
) & 0xffff0000) >> 16),
3508 (E1000_READ_REG(&adapter
->hw
, PBA
) & 0xffff));
3509 device_printf(dev
, "Flow control watermarks high = %d low = %d\n",
3510 adapter
->hw
.fc_high_water
, adapter
->hw
.fc_low_water
);
3511 device_printf(dev
, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
3512 E1000_READ_REG(&adapter
->hw
, TIDV
),
3513 E1000_READ_REG(&adapter
->hw
, TADV
));
3514 device_printf(dev
, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
3515 E1000_READ_REG(&adapter
->hw
, RDTR
),
3516 E1000_READ_REG(&adapter
->hw
, RADV
));
3517 device_printf(dev
, "fifo workaround = %lld, fifo_reset_count = %lld\n",
3518 (long long)adapter
->tx_fifo_wrk_cnt
,
3519 (long long)adapter
->tx_fifo_reset_cnt
);
3520 device_printf(dev
, "hw tdh = %d, hw tdt = %d\n",
3521 E1000_READ_REG(&adapter
->hw
, TDH
),
3522 E1000_READ_REG(&adapter
->hw
, TDT
));
3523 device_printf(dev
, "Num Tx descriptors avail = %d\n",
3524 adapter
->num_tx_desc_avail
);
3525 device_printf(dev
, "Tx Descriptors not avail1 = %ld\n",
3526 adapter
->no_tx_desc_avail1
);
3527 device_printf(dev
, "Tx Descriptors not avail2 = %ld\n",
3528 adapter
->no_tx_desc_avail2
);
3529 device_printf(dev
, "Std mbuf failed = %ld\n",
3530 adapter
->mbuf_alloc_failed
);
3531 device_printf(dev
, "Std mbuf cluster failed = %ld\n",
3532 adapter
->mbuf_cluster_failed
);
3533 device_printf(dev
, "Driver dropped packets = %ld\n",
3534 adapter
->dropped_pkts
);
3538 em_print_hw_stats(struct adapter
*adapter
)
3540 device_t dev
= adapter
->dev
;
3542 device_printf(dev
, "Excessive collisions = %lld\n",
3543 (long long)adapter
->stats
.ecol
);
3544 device_printf(dev
, "Symbol errors = %lld\n",
3545 (long long)adapter
->stats
.symerrs
);
3546 device_printf(dev
, "Sequence errors = %lld\n",
3547 (long long)adapter
->stats
.sec
);
3548 device_printf(dev
, "Defer count = %lld\n",
3549 (long long)adapter
->stats
.dc
);
3551 device_printf(dev
, "Missed Packets = %lld\n",
3552 (long long)adapter
->stats
.mpc
);
3553 device_printf(dev
, "Receive No Buffers = %lld\n",
3554 (long long)adapter
->stats
.rnbc
);
3555 /* RLEC is inaccurate on some hardware, calculate our own. */
3556 device_printf(dev
, "Receive Length errors = %lld\n",
3557 (long long)adapter
->stats
.roc
+
3558 (long long)adapter
->stats
.ruc
);
3559 device_printf(dev
, "Receive errors = %lld\n",
3560 (long long)adapter
->stats
.rxerrc
);
3561 device_printf(dev
, "Crc errors = %lld\n",
3562 (long long)adapter
->stats
.crcerrs
);
3563 device_printf(dev
, "Alignment errors = %lld\n",
3564 (long long)adapter
->stats
.algnerrc
);
3565 device_printf(dev
, "Carrier extension errors = %lld\n",
3566 (long long)adapter
->stats
.cexterr
);
3567 device_printf(dev
, "RX overruns = %lu\n", adapter
->rx_overruns
);
3568 device_printf(dev
, "Watchdog timeouts = %lu\n",
3569 adapter
->watchdog_timeouts
);
3571 device_printf(dev
, "XON Rcvd = %lld\n",
3572 (long long)adapter
->stats
.xonrxc
);
3573 device_printf(dev
, "XON Xmtd = %lld\n",
3574 (long long)adapter
->stats
.xontxc
);
3575 device_printf(dev
, "XOFF Rcvd = %lld\n",
3576 (long long)adapter
->stats
.xoffrxc
);
3577 device_printf(dev
, "XOFF Xmtd = %lld\n",
3578 (long long)adapter
->stats
.xofftxc
);
3580 device_printf(dev
, "Good Packets Rcvd = %lld\n",
3581 (long long)adapter
->stats
.gprc
);
3582 device_printf(dev
, "Good Packets Xmtd = %lld\n",
3583 (long long)adapter
->stats
.gptc
);
3587 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS
)
3591 struct adapter
*adapter
;
3594 error
= sysctl_handle_int(oidp
, &result
, 0, req
);
3596 if (error
|| !req
->newptr
)
3600 adapter
= (struct adapter
*)arg1
;
3601 em_print_debug_info(adapter
);
3608 em_sysctl_stats(SYSCTL_HANDLER_ARGS
)
3612 struct adapter
*adapter
;
3615 error
= sysctl_handle_int(oidp
, &result
, 0, req
);
3617 if (error
|| !req
->newptr
)
3621 adapter
= (struct adapter
*)arg1
;
3622 em_print_hw_stats(adapter
);
3629 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS
)
3631 struct em_int_delay_info
*info
;
3632 struct adapter
*adapter
;
3638 info
= (struct em_int_delay_info
*)arg1
;
3639 adapter
= info
->adapter
;
3640 usecs
= info
->value
;
3641 error
= sysctl_handle_int(oidp
, &usecs
, 0, req
);
3642 if (error
!= 0 || req
->newptr
== NULL
)
3644 if (usecs
< 0 || usecs
> E1000_TICKS_TO_USECS(65535))
3646 info
->value
= usecs
;
3647 ticks
= E1000_USECS_TO_TICKS(usecs
);
3649 lwkt_serialize_enter(adapter
->interface_data
.ac_if
.if_serializer
);
3650 regval
= E1000_READ_OFFSET(&adapter
->hw
, info
->offset
);
3651 regval
= (regval
& ~0xffff) | (ticks
& 0xffff);
3652 /* Handle a few special cases. */
3653 switch (info
->offset
) {
3655 case E1000_82542_RDTR
:
3656 regval
|= E1000_RDT_FPDB
;
3659 case E1000_82542_TIDV
:
3661 adapter
->txd_cmd
&= ~E1000_TXD_CMD_IDE
;
3662 /* Don't write 0 into the TIDV register. */
3665 adapter
->txd_cmd
|= E1000_TXD_CMD_IDE
;
3668 E1000_WRITE_OFFSET(&adapter
->hw
, info
->offset
, regval
);
3669 lwkt_serialize_exit(adapter
->interface_data
.ac_if
.if_serializer
);
3674 em_add_int_delay_sysctl(struct adapter
*adapter
, const char *name
,
3675 const char *description
, struct em_int_delay_info
*info
,
3676 int offset
, int value
)
3678 info
->adapter
= adapter
;
3679 info
->offset
= offset
;
3680 info
->value
= value
;
3681 SYSCTL_ADD_PROC(&adapter
->sysctl_ctx
,
3682 SYSCTL_CHILDREN(adapter
->sysctl_tree
),
3683 OID_AUTO
, name
, CTLTYPE_INT
|CTLFLAG_RW
,
3684 info
, 0, em_sysctl_int_delay
, "I", description
);
3688 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS
)
3690 struct adapter
*adapter
= (void *)arg1
;
3694 throttle
= em_int_throttle_ceil
;
3695 error
= sysctl_handle_int(oidp
, &throttle
, 0, req
);
3696 if (error
|| req
->newptr
== NULL
)
3698 if (throttle
< 0 || throttle
> 1000000000 / 256)
3702 * Set the interrupt throttling rate in 256ns increments,
3703 * recalculate sysctl value assignment to get exact frequency.
3705 throttle
= 1000000000 / 256 / throttle
;
3706 lwkt_serialize_enter(adapter
->interface_data
.ac_if
.if_serializer
);
3707 em_int_throttle_ceil
= 1000000000 / 256 / throttle
;
3708 E1000_WRITE_REG(&adapter
->hw
, ITR
, throttle
);
3709 lwkt_serialize_exit(adapter
->interface_data
.ac_if
.if_serializer
);
3711 lwkt_serialize_enter(adapter
->interface_data
.ac_if
.if_serializer
);
3712 em_int_throttle_ceil
= 0;
3713 E1000_WRITE_REG(&adapter
->hw
, ITR
, 0);
3714 lwkt_serialize_exit(adapter
->interface_data
.ac_if
.if_serializer
);
3716 device_printf(adapter
->dev
, "Interrupt moderation set to %d/sec\n",
3717 em_int_throttle_ceil
);