Merge illumos-gate
[unleashed.git] / kernel / drivers / net / e1000g / e1000g_main.c
blob7c216ba560127e210b085a79aec1ba4f9520cf4d
1 /*
2 * This file is provided under a CDDLv1 license. When using or
3 * redistributing this file, you may do so under this license.
4 * In redistributing this file this license must be included
5 * and no other modification of this header file is permitted.
7 * CDDL LICENSE SUMMARY
9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
11 * The contents of this file are subject to the terms of Version
12 * 1.0 of the Common Development and Distribution License (the "License").
14 * You should have received a copy of the License with this software.
15 * You can obtain a copy of the License at
16 * http://www.opensolaris.org/os/licensing.
17 * See the License for the specific language governing permissions
18 * and limitations under the License.
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved.
27 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
28 * Copyright (c) 2018, Joyent, Inc.
32 * **********************************************************************
33 * *
34 * Module Name: *
35 * e1000g_main.c *
36 * *
37 * Abstract: *
38 * This file contains the interface routines for the solaris OS. *
39 * It has all DDI entry point routines and GLD entry point routines. *
40 * *
41 * This file also contains routines that take care of initialization *
42 * uninit routine and interrupt routine. *
43 * *
44 * **********************************************************************
47 #include <sys/dlpi.h>
48 #include <sys/mac.h>
49 #include "e1000g_sw.h"
50 #include "e1000g_debug.h"
52 static char ident[] = "Intel PRO/1000 Ethernet";
53 /* LINTED E_STATIC_UNUSED */
54 static char e1000g_version[] = "Driver Ver. 5.3.24";
57 * Proto types for DDI entry points
59 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
60 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
61 static int e1000g_quiesce(dev_info_t *);
64 * init and intr routines prototype
66 static int e1000g_resume(dev_info_t *);
67 static int e1000g_suspend(dev_info_t *);
68 static uint_t e1000g_intr_pciexpress(caddr_t);
69 static uint_t e1000g_intr(caddr_t);
70 static void e1000g_intr_work(struct e1000g *, uint32_t);
71 #pragma inline(e1000g_intr_work)
72 static int e1000g_init(struct e1000g *);
73 static int e1000g_start(struct e1000g *, boolean_t);
74 static void e1000g_stop(struct e1000g *, boolean_t);
75 static int e1000g_m_start(void *);
76 static void e1000g_m_stop(void *);
77 static int e1000g_m_promisc(void *, boolean_t);
78 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
79 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
80 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
81 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t,
82 uint_t, const void *);
83 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t,
84 uint_t, void *);
85 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t,
86 mac_prop_info_handle_t);
87 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t,
88 const void *);
89 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *);
90 static void e1000g_init_locks(struct e1000g *);
91 static void e1000g_destroy_locks(struct e1000g *);
92 static int e1000g_identify_hardware(struct e1000g *);
93 static int e1000g_regs_map(struct e1000g *);
94 static int e1000g_set_driver_params(struct e1000g *);
95 static void e1000g_set_bufsize(struct e1000g *);
96 static int e1000g_register_mac(struct e1000g *);
97 static boolean_t e1000g_rx_drain(struct e1000g *);
98 static boolean_t e1000g_tx_drain(struct e1000g *);
99 static void e1000g_init_unicst(struct e1000g *);
100 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int);
101 static int e1000g_alloc_rx_data(struct e1000g *);
102 static void e1000g_release_multicast(struct e1000g *);
103 static void e1000g_pch_limits(struct e1000g *);
104 static uint32_t e1000g_mtu2maxframe(uint32_t);
107 * Local routines
109 static boolean_t e1000g_reset_adapter(struct e1000g *);
110 static void e1000g_tx_clean(struct e1000g *);
111 static void e1000g_rx_clean(struct e1000g *);
112 static void e1000g_link_timer(void *);
113 static void e1000g_local_timer(void *);
114 static boolean_t e1000g_link_check(struct e1000g *);
115 static boolean_t e1000g_stall_check(struct e1000g *);
116 static void e1000g_smartspeed(struct e1000g *);
117 static void e1000g_get_conf(struct e1000g *);
118 static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int,
119 int *);
120 static void enable_watchdog_timer(struct e1000g *);
121 static void disable_watchdog_timer(struct e1000g *);
122 static void start_watchdog_timer(struct e1000g *);
123 static void restart_watchdog_timer(struct e1000g *);
124 static void stop_watchdog_timer(struct e1000g *);
125 static void stop_link_timer(struct e1000g *);
126 static void stop_82547_timer(e1000g_tx_ring_t *);
127 static void e1000g_force_speed_duplex(struct e1000g *);
128 static void e1000g_setup_max_mtu(struct e1000g *);
129 static void e1000g_get_max_frame_size(struct e1000g *);
130 static boolean_t is_valid_mac_addr(uint8_t *);
131 static void e1000g_unattach(dev_info_t *, struct e1000g *);
132 static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *);
133 #ifdef E1000G_DEBUG
134 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
135 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
136 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
137 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
138 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
139 struct iocblk *, mblk_t *);
140 #endif
141 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
142 struct iocblk *, mblk_t *);
143 static boolean_t e1000g_check_loopback_support(struct e1000_hw *);
144 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
145 static void e1000g_set_internal_loopback(struct e1000g *);
146 static void e1000g_set_external_loopback_1000(struct e1000g *);
147 static void e1000g_set_external_loopback_100(struct e1000g *);
148 static void e1000g_set_external_loopback_10(struct e1000g *);
149 static int e1000g_add_intrs(struct e1000g *);
150 static int e1000g_intr_add(struct e1000g *, int);
151 static int e1000g_rem_intrs(struct e1000g *);
152 static int e1000g_enable_intrs(struct e1000g *);
153 static int e1000g_disable_intrs(struct e1000g *);
154 static boolean_t e1000g_link_up(struct e1000g *);
155 static void e1000g_get_phy_state(struct e1000g *);
156 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
157 const void *impl_data);
158 static void e1000g_fm_init(struct e1000g *Adapter);
159 static void e1000g_fm_fini(struct e1000g *Adapter);
160 static void e1000g_param_sync(struct e1000g *);
161 static void e1000g_get_driver_control(struct e1000_hw *);
162 static void e1000g_release_driver_control(struct e1000_hw *);
163 static void e1000g_restore_promisc(struct e1000g *Adapter);
165 char *e1000g_priv_props[] = {
166 "_tx_bcopy_threshold",
167 "_tx_interrupt_enable",
168 "_tx_intr_delay",
169 "_tx_intr_abs_delay",
170 "_rx_bcopy_threshold",
171 "_max_num_rcv_packets",
172 "_rx_intr_delay",
173 "_rx_intr_abs_delay",
174 "_intr_throttling_rate",
175 "_intr_adaptive",
176 "_adv_pause_cap",
177 "_adv_asym_pause_cap",
178 NULL
181 static struct cb_ops cb_ws_ops = {
182 nulldev, /* cb_open */
183 nulldev, /* cb_close */
184 nodev, /* cb_strategy */
185 nodev, /* cb_print */
186 nodev, /* cb_dump */
187 nodev, /* cb_read */
188 nodev, /* cb_write */
189 nodev, /* cb_ioctl */
190 nodev, /* cb_devmap */
191 nodev, /* cb_mmap */
192 nodev, /* cb_segmap */
193 nochpoll, /* cb_chpoll */
194 ddi_prop_op, /* cb_prop_op */
195 NULL, /* cb_stream */
196 D_MP | D_HOTPLUG, /* cb_flag */
197 CB_REV, /* cb_rev */
198 nodev, /* cb_aread */
199 nodev /* cb_awrite */
202 static struct dev_ops ws_ops = {
203 DEVO_REV, /* devo_rev */
204 0, /* devo_refcnt */
205 NULL, /* devo_getinfo */
206 nulldev, /* devo_identify */
207 nulldev, /* devo_probe */
208 e1000g_attach, /* devo_attach */
209 e1000g_detach, /* devo_detach */
210 nodev, /* devo_reset */
211 &cb_ws_ops, /* devo_cb_ops */
212 NULL, /* devo_bus_ops */
213 ddi_power, /* devo_power */
214 e1000g_quiesce /* devo_quiesce */
217 static struct modldrv modldrv = {
218 &mod_driverops, /* Type of module. This one is a driver */
219 ident, /* Discription string */
220 &ws_ops, /* driver ops */
223 static struct modlinkage modlinkage = {
224 MODREV_1, &modldrv, NULL
227 /* Access attributes for register mapping */
228 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
229 DDI_DEVICE_ATTR_V1,
230 DDI_STRUCTURE_LE_ACC,
231 DDI_STRICTORDER_ACC,
232 DDI_FLAGERR_ACC
235 #define E1000G_M_CALLBACK_FLAGS \
236 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
238 static mac_callbacks_t e1000g_m_callbacks = {
239 E1000G_M_CALLBACK_FLAGS,
240 e1000g_m_stat,
241 e1000g_m_start,
242 e1000g_m_stop,
243 e1000g_m_promisc,
244 e1000g_m_multicst,
245 NULL,
246 e1000g_m_tx,
247 NULL,
248 e1000g_m_ioctl,
249 e1000g_m_getcapab,
250 NULL,
251 NULL,
252 e1000g_m_setprop,
253 e1000g_m_getprop,
254 e1000g_m_propinfo
258 * Global variables
260 uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K;
261 uint32_t e1000g_mblks_pending = 0;
263 * Workaround for Dynamic Reconfiguration support, for x86 platform only.
264 * Here we maintain a private dev_info list if e1000g_force_detach is
265 * enabled. If we force the driver to detach while there are still some
266 * rx buffers retained in the upper layer, we have to keep a copy of the
267 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
268 * structure will be freed after the driver is detached. However when we
269 * finally free those rx buffers released by the upper layer, we need to
270 * refer to the dev_info to free the dma buffers. So we save a copy of
271 * the dev_info for this purpose. On x86 platform, we assume this copy
272 * of dev_info is always valid, but on SPARC platform, it could be invalid
273 * after the system board level DR operation. For this reason, the global
274 * variable e1000g_force_detach must be B_FALSE on SPARC platform.
276 boolean_t e1000g_force_detach = B_TRUE;
277 private_devi_list_t *e1000g_private_devi_list = NULL;
280 * The mutex e1000g_rx_detach_lock is defined to protect the processing of
281 * the private dev_info list, and to serialize the processing of rx buffer
282 * freeing and rx buffer recycling.
284 kmutex_t e1000g_rx_detach_lock;
286 * The rwlock e1000g_dma_type_lock is defined to protect the global flag
287 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
288 * If there are many e1000g instances, the system may run out of DVMA
289 * resources during the initialization of the instances, then the flag will
290 * be changed to "USE_DMA". Because different e1000g instances are initialized
291 * in parallel, we need to use this lock to protect the flag.
293 krwlock_t e1000g_dma_type_lock;
296 * The 82546 chipset is a dual-port device, both the ports share one eeprom.
297 * Based on the information from Intel, the 82546 chipset has some hardware
298 * problem. When one port is being reset and the other port is trying to
299 * access the eeprom, it could cause system hang or panic. To workaround this
300 * hardware problem, we use a global mutex to prevent such operations from
301 * happening simultaneously on different instances. This workaround is applied
302 * to all the devices supported by this driver.
304 kmutex_t e1000g_nvm_lock;
307 * Loadable module configuration entry points for the driver
311 * _init - module initialization
314 _init(void)
316 int status;
318 mac_init_ops(&ws_ops, WSNAME);
319 status = mod_install(&modlinkage);
320 if (status != DDI_SUCCESS)
321 mac_fini_ops(&ws_ops);
322 else {
323 mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL);
324 rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
325 mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL);
328 return (status);
332 * _fini - module finalization
335 _fini(void)
337 int status;
339 if (e1000g_mblks_pending != 0)
340 return (EBUSY);
342 status = mod_remove(&modlinkage);
343 if (status == DDI_SUCCESS) {
344 mac_fini_ops(&ws_ops);
346 if (e1000g_force_detach) {
347 private_devi_list_t *devi_node;
349 mutex_enter(&e1000g_rx_detach_lock);
350 while (e1000g_private_devi_list != NULL) {
351 devi_node = e1000g_private_devi_list;
352 e1000g_private_devi_list =
353 e1000g_private_devi_list->next;
355 kmem_free(devi_node->priv_dip,
356 sizeof (struct dev_info));
357 kmem_free(devi_node,
358 sizeof (private_devi_list_t));
360 mutex_exit(&e1000g_rx_detach_lock);
363 mutex_destroy(&e1000g_rx_detach_lock);
364 rw_destroy(&e1000g_dma_type_lock);
365 mutex_destroy(&e1000g_nvm_lock);
368 return (status);
372 * _info - module information
375 _info(struct modinfo *modinfop)
377 return (mod_info(&modlinkage, modinfop));
381 * e1000g_attach - driver attach
383 * This function is the device-specific initialization entry
384 * point. This entry point is required and must be written.
385 * The DDI_ATTACH command must be provided in the attach entry
386 * point. When attach() is called with cmd set to DDI_ATTACH,
387 * all normal kernel services (such as kmem_alloc(9F)) are
388 * available for use by the driver.
390 * The attach() function will be called once for each instance
391 * of the device on the system with cmd set to DDI_ATTACH.
392 * Until attach() succeeds, the only driver entry points which
393 * may be called are open(9E) and getinfo(9E).
395 static int
396 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
398 struct e1000g *Adapter;
399 struct e1000_hw *hw;
400 struct e1000g_osdep *osdep;
401 int instance;
403 switch (cmd) {
404 default:
405 e1000g_log(NULL, CE_WARN,
406 "Unsupported command send to e1000g_attach... ");
407 return (DDI_FAILURE);
409 case DDI_RESUME:
410 return (e1000g_resume(devinfo));
412 case DDI_ATTACH:
413 break;
417 * get device instance number
419 instance = ddi_get_instance(devinfo);
422 * Allocate soft data structure
424 Adapter = kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
426 Adapter->dip = devinfo;
427 Adapter->instance = instance;
428 Adapter->tx_ring->adapter = Adapter;
429 Adapter->rx_ring->adapter = Adapter;
431 hw = &Adapter->shared;
432 osdep = &Adapter->osdep;
433 hw->back = osdep;
434 osdep->adapter = Adapter;
436 ddi_set_driver_private(devinfo, (caddr_t)Adapter);
439 * Initialize for fma support
441 (void) e1000g_get_prop(Adapter, "fm-capable",
442 0, 0x0f,
443 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
444 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE,
445 &Adapter->fm_capabilities);
446 e1000g_fm_init(Adapter);
447 Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT;
450 * PCI Configure
452 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
453 e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
454 goto attach_fail;
456 Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
459 * Setup hardware
461 if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
462 e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
463 goto attach_fail;
467 * Map in the device registers.
469 if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
470 e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
471 goto attach_fail;
473 Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
476 * Initialize driver parameters
478 if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
479 goto attach_fail;
481 Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
483 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
484 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
485 goto attach_fail;
489 * Disable ULP support
491 (void) e1000_disable_ulp_lpt_lp(hw, TRUE);
494 * Initialize interrupts
496 if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
497 e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
498 goto attach_fail;
500 Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
503 * Initialize mutex's for this device.
504 * Do this before enabling the interrupt handler and
505 * register the softint to avoid the condition where
506 * interrupt handler can try using uninitialized mutex
508 e1000g_init_locks(Adapter);
509 Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
512 * Initialize Driver Counters
514 if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
515 e1000g_log(Adapter, CE_WARN, "Init stats failed");
516 goto attach_fail;
518 Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
521 * Initialize chip hardware and software structures
523 rw_enter(&Adapter->chip_lock, RW_WRITER);
524 if (e1000g_init(Adapter) != DDI_SUCCESS) {
525 rw_exit(&Adapter->chip_lock);
526 e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
527 goto attach_fail;
529 rw_exit(&Adapter->chip_lock);
530 Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
533 * Register the driver to the MAC
535 if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
536 e1000g_log(Adapter, CE_WARN, "Register MAC failed");
537 goto attach_fail;
539 Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
542 * Now that mutex locks are initialized, and the chip is also
543 * initialized, enable interrupts.
545 if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
546 e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
547 goto attach_fail;
549 Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
552 * If e1000g_force_detach is enabled, in global private dip list,
553 * we will create a new entry, which maintains the priv_dip for DR
554 * supports after driver detached.
556 if (e1000g_force_detach) {
557 private_devi_list_t *devi_node;
559 Adapter->priv_dip =
560 kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
561 bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
562 sizeof (struct dev_info));
564 devi_node =
565 kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
567 mutex_enter(&e1000g_rx_detach_lock);
568 devi_node->priv_dip = Adapter->priv_dip;
569 devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
570 devi_node->pending_rx_count = 0;
572 Adapter->priv_devi_node = devi_node;
574 if (e1000g_private_devi_list == NULL) {
575 devi_node->prev = NULL;
576 devi_node->next = NULL;
577 e1000g_private_devi_list = devi_node;
578 } else {
579 devi_node->prev = NULL;
580 devi_node->next = e1000g_private_devi_list;
581 e1000g_private_devi_list->prev = devi_node;
582 e1000g_private_devi_list = devi_node;
584 mutex_exit(&e1000g_rx_detach_lock);
587 Adapter->e1000g_state = E1000G_INITIALIZED;
588 return (DDI_SUCCESS);
590 attach_fail:
591 e1000g_unattach(devinfo, Adapter);
592 return (DDI_FAILURE);
595 static int
596 e1000g_register_mac(struct e1000g *Adapter)
598 struct e1000_hw *hw = &Adapter->shared;
599 mac_register_t *mac;
600 int err;
602 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
603 return (DDI_FAILURE);
605 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
606 mac->m_driver = Adapter;
607 mac->m_dip = Adapter->dip;
608 mac->m_src_addr = hw->mac.addr;
609 mac->m_callbacks = &e1000g_m_callbacks;
610 mac->m_min_sdu = 0;
611 mac->m_max_sdu = Adapter->default_mtu;
612 mac->m_margin = VLAN_TAGSZ;
613 mac->m_priv_props = e1000g_priv_props;
614 mac->m_v12n = MAC_VIRT_LEVEL1;
616 err = mac_register(mac, &Adapter->mh);
617 mac_free(mac);
619 return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
622 static int
623 e1000g_identify_hardware(struct e1000g *Adapter)
625 struct e1000_hw *hw = &Adapter->shared;
626 struct e1000g_osdep *osdep = &Adapter->osdep;
628 /* Get the device id */
629 hw->vendor_id =
630 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
631 hw->device_id =
632 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
633 hw->revision_id =
634 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
635 hw->subsystem_device_id =
636 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
637 hw->subsystem_vendor_id =
638 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
640 if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
641 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
642 "MAC type could not be set properly.");
643 return (DDI_FAILURE);
646 return (DDI_SUCCESS);
649 static int
650 e1000g_regs_map(struct e1000g *Adapter)
652 dev_info_t *devinfo = Adapter->dip;
653 struct e1000_hw *hw = &Adapter->shared;
654 struct e1000g_osdep *osdep = &Adapter->osdep;
655 off_t mem_size;
656 bar_info_t bar_info;
657 int offset, rnumber;
659 rnumber = ADAPTER_REG_SET;
660 /* Get size of adapter register memory */
661 if (ddi_dev_regsize(devinfo, rnumber, &mem_size) !=
662 DDI_SUCCESS) {
663 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
664 "ddi_dev_regsize for registers failed");
665 return (DDI_FAILURE);
668 /* Map adapter register memory */
669 if ((ddi_regs_map_setup(devinfo, rnumber,
670 (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
671 &osdep->reg_handle)) != DDI_SUCCESS) {
672 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
673 "ddi_regs_map_setup for registers failed");
674 goto regs_map_fail;
677 /* ICH needs to map flash memory */
678 switch (hw->mac.type) {
679 case e1000_ich8lan:
680 case e1000_ich9lan:
681 case e1000_ich10lan:
682 case e1000_pchlan:
683 case e1000_pch2lan:
684 case e1000_pch_lpt:
685 rnumber = ICH_FLASH_REG_SET;
687 /* get flash size */
688 if (ddi_dev_regsize(devinfo, rnumber,
689 &mem_size) != DDI_SUCCESS) {
690 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
691 "ddi_dev_regsize for ICH flash failed");
692 goto regs_map_fail;
695 /* map flash in */
696 if (ddi_regs_map_setup(devinfo, rnumber,
697 (caddr_t *)&hw->flash_address, 0,
698 mem_size, &e1000g_regs_acc_attr,
699 &osdep->ich_flash_handle) != DDI_SUCCESS) {
700 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
701 "ddi_regs_map_setup for ICH flash failed");
702 goto regs_map_fail;
704 break;
705 case e1000_pch_spt:
706 case e1000_pch_cnp:
708 * On the SPT, the device flash is actually in BAR0, not a
709 * separate BAR. Therefore we end up setting the
710 * ich_flash_handle to be the same as the register handle.
711 * We mark the same to reduce the confusion in the other
712 * functions and macros. Though this does make the set up and
713 * tear-down path slightly more complicated.
715 osdep->ich_flash_handle = osdep->reg_handle;
716 hw->flash_address = hw->hw_addr;
717 default:
718 break;
721 /* map io space */
722 switch (hw->mac.type) {
723 case e1000_82544:
724 case e1000_82540:
725 case e1000_82545:
726 case e1000_82546:
727 case e1000_82541:
728 case e1000_82541_rev_2:
729 /* find the IO bar */
730 rnumber = -1;
731 for (offset = PCI_CONF_BASE1;
732 offset <= PCI_CONF_BASE5; offset += 4) {
733 if (e1000g_get_bar_info(devinfo, offset, &bar_info)
734 != DDI_SUCCESS)
735 continue;
736 if (bar_info.type == E1000G_BAR_IO) {
737 rnumber = bar_info.rnumber;
738 break;
742 if (rnumber < 0) {
743 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
744 "No io space is found");
745 goto regs_map_fail;
748 /* get io space size */
749 if (ddi_dev_regsize(devinfo, rnumber,
750 &mem_size) != DDI_SUCCESS) {
751 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
752 "ddi_dev_regsize for io space failed");
753 goto regs_map_fail;
756 /* map io space */
757 if ((ddi_regs_map_setup(devinfo, rnumber,
758 (caddr_t *)&hw->io_base, 0, mem_size,
759 &e1000g_regs_acc_attr,
760 &osdep->io_reg_handle)) != DDI_SUCCESS) {
761 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
762 "ddi_regs_map_setup for io space failed");
763 goto regs_map_fail;
765 break;
766 default:
767 hw->io_base = 0;
768 break;
771 return (DDI_SUCCESS);
773 regs_map_fail:
774 if (osdep->reg_handle != NULL)
775 ddi_regs_map_free(&osdep->reg_handle);
776 if (osdep->ich_flash_handle != NULL && hw->mac.type < e1000_pch_spt)
777 ddi_regs_map_free(&osdep->ich_flash_handle);
778 return (DDI_FAILURE);
781 static int
782 e1000g_set_driver_params(struct e1000g *Adapter)
784 struct e1000_hw *hw;
786 hw = &Adapter->shared;
788 /* Set MAC type and initialize hardware functions */
789 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
790 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
791 "Could not setup hardware functions");
792 return (DDI_FAILURE);
795 /* Get bus information */
796 if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
797 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
798 "Could not get bus information");
799 return (DDI_FAILURE);
802 e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
804 hw->mac.autoneg_failed = B_TRUE;
806 /* Set the autoneg_wait_to_complete flag to B_FALSE */
807 hw->phy.autoneg_wait_to_complete = B_FALSE;
809 /* Adaptive IFS related changes */
810 hw->mac.adaptive_ifs = B_TRUE;
812 /* Enable phy init script for IGP phy of 82541/82547 */
813 if ((hw->mac.type == e1000_82547) ||
814 (hw->mac.type == e1000_82541) ||
815 (hw->mac.type == e1000_82547_rev_2) ||
816 (hw->mac.type == e1000_82541_rev_2))
817 e1000_init_script_state_82541(hw, B_TRUE);
819 /* Enable the TTL workaround for 82541/82547 */
820 e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
822 Adapter->strip_crc = B_FALSE;
824 /* setup the maximum MTU size of the chip */
825 e1000g_setup_max_mtu(Adapter);
827 /* Get speed/duplex settings in conf file */
828 hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
829 hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
830 e1000g_force_speed_duplex(Adapter);
832 /* Get Jumbo Frames settings in conf file */
833 e1000g_get_max_frame_size(Adapter);
835 /* Get conf file properties */
836 e1000g_get_conf(Adapter);
838 /* enforce PCH limits */
839 e1000g_pch_limits(Adapter);
841 /* Set Rx/Tx buffer size */
842 e1000g_set_bufsize(Adapter);
844 /* Master Latency Timer */
845 Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
847 /* copper options */
848 if (hw->phy.media_type == e1000_media_type_copper) {
849 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
850 hw->phy.disable_polarity_correction = B_FALSE;
851 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */
854 /* The initial link state should be "unknown" */
855 Adapter->link_state = LINK_STATE_UNKNOWN;
857 /* Initialize rx parameters */
858 Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY;
859 Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY;
861 /* Initialize tx parameters */
862 Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
863 Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
864 Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY;
865 Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY;
867 /* Initialize rx parameters */
868 Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
870 return (DDI_SUCCESS);
873 static void
874 e1000g_setup_max_mtu(struct e1000g *Adapter)
876 struct e1000_mac_info *mac = &Adapter->shared.mac;
877 struct e1000_phy_info *phy = &Adapter->shared.phy;
879 switch (mac->type) {
880 /* types that do not support jumbo frames */
881 case e1000_ich8lan:
882 case e1000_82573:
883 case e1000_82583:
884 Adapter->max_mtu = ETHERMTU;
885 break;
886 /* ich9 supports jumbo frames except on one phy type */
887 case e1000_ich9lan:
888 if (phy->type == e1000_phy_ife)
889 Adapter->max_mtu = ETHERMTU;
890 else
891 Adapter->max_mtu = MAXIMUM_MTU_9K;
892 break;
893 /* pch can do jumbo frames up to 4K */
894 case e1000_pchlan:
895 Adapter->max_mtu = MAXIMUM_MTU_4K;
896 break;
897 /* pch2 can do jumbo frames up to 9K */
898 case e1000_pch2lan:
899 case e1000_pch_lpt:
900 case e1000_pch_spt:
901 case e1000_pch_cnp:
902 Adapter->max_mtu = MAXIMUM_MTU_9K;
903 break;
904 /* types with a special limit */
905 case e1000_82571:
906 case e1000_82572:
907 case e1000_82574:
908 case e1000_80003es2lan:
909 case e1000_ich10lan:
910 if (e1000g_jumbo_mtu >= ETHERMTU &&
911 e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) {
912 Adapter->max_mtu = e1000g_jumbo_mtu;
913 } else {
914 Adapter->max_mtu = MAXIMUM_MTU_9K;
916 break;
917 /* default limit is 16K */
918 default:
919 Adapter->max_mtu = FRAME_SIZE_UPTO_16K -
920 sizeof (struct ether_vlan_header) - ETHERFCSL;
921 break;
925 static void
926 e1000g_set_bufsize(struct e1000g *Adapter)
928 struct e1000_mac_info *mac = &Adapter->shared.mac;
929 uint64_t rx_size;
930 uint64_t tx_size;
932 dev_info_t *devinfo = Adapter->dip;
933 /* Get the system page size */
934 Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
937 Adapter->min_frame_size = ETHERMIN + ETHERFCSL;
939 if (Adapter->mem_workaround_82546 &&
940 ((mac->type == e1000_82545) ||
941 (mac->type == e1000_82546) ||
942 (mac->type == e1000_82546_rev_3))) {
943 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
944 } else {
945 rx_size = Adapter->max_frame_size;
946 if ((rx_size > FRAME_SIZE_UPTO_2K) &&
947 (rx_size <= FRAME_SIZE_UPTO_4K))
948 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
949 else if ((rx_size > FRAME_SIZE_UPTO_4K) &&
950 (rx_size <= FRAME_SIZE_UPTO_8K))
951 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
952 else if ((rx_size > FRAME_SIZE_UPTO_8K) &&
953 (rx_size <= FRAME_SIZE_UPTO_16K))
954 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
955 else
956 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
958 Adapter->rx_buffer_size += E1000G_IPALIGNROOM;
960 tx_size = Adapter->max_frame_size;
961 if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K))
962 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
963 else if ((tx_size > FRAME_SIZE_UPTO_4K) &&
964 (tx_size <= FRAME_SIZE_UPTO_8K))
965 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
966 else if ((tx_size > FRAME_SIZE_UPTO_8K) &&
967 (tx_size <= FRAME_SIZE_UPTO_16K))
968 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
969 else
970 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
973 * For Wiseman adapters we have an requirement of having receive
974 * buffers aligned at 256 byte boundary. Since Livengood does not
975 * require this and forcing it for all hardwares will have
976 * performance implications, I am making it applicable only for
977 * Wiseman and for Jumbo frames enabled mode as rest of the time,
978 * it is okay to have normal frames...but it does involve a
979 * potential risk where we may loose data if buffer is not
980 * aligned...so all wiseman boards to have 256 byte aligned
981 * buffers
983 if (mac->type < e1000_82543)
984 Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
985 else
986 Adapter->rx_buf_align = 1;
990 * e1000g_detach - driver detach
992 * The detach() function is the complement of the attach routine.
993 * If cmd is set to DDI_DETACH, detach() is used to remove the
994 * state associated with a given instance of a device node
995 * prior to the removal of that instance from the system.
997 * The detach() function will be called once for each instance
998 * of the device for which there has been a successful attach()
999 * once there are no longer any opens on the device.
1001 * Interrupts routine are disabled, All memory allocated by this
1002 * driver are freed.
1004 static int
1005 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1007 struct e1000g *Adapter;
1008 boolean_t rx_drain;
1010 switch (cmd) {
1011 default:
1012 return (DDI_FAILURE);
1014 case DDI_SUSPEND:
1015 return (e1000g_suspend(devinfo));
1017 case DDI_DETACH:
1018 break;
1021 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1022 if (Adapter == NULL)
1023 return (DDI_FAILURE);
1025 rx_drain = e1000g_rx_drain(Adapter);
1026 if (!rx_drain && !e1000g_force_detach)
1027 return (DDI_FAILURE);
1029 if (mac_unregister(Adapter->mh) != 0) {
1030 e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
1031 return (DDI_FAILURE);
1033 Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
1035 ASSERT(!(Adapter->e1000g_state & E1000G_STARTED));
1037 if (!e1000g_force_detach && !rx_drain)
1038 return (DDI_FAILURE);
1040 e1000g_unattach(devinfo, Adapter);
1042 return (DDI_SUCCESS);
1046 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
1048 void
1049 e1000g_free_priv_devi_node(private_devi_list_t *devi_node)
1051 ASSERT(e1000g_private_devi_list != NULL);
1052 ASSERT(devi_node != NULL);
1054 if (devi_node->prev != NULL)
1055 devi_node->prev->next = devi_node->next;
1056 if (devi_node->next != NULL)
1057 devi_node->next->prev = devi_node->prev;
1058 if (devi_node == e1000g_private_devi_list)
1059 e1000g_private_devi_list = devi_node->next;
1061 kmem_free(devi_node->priv_dip,
1062 sizeof (struct dev_info));
1063 kmem_free(devi_node,
1064 sizeof (private_devi_list_t));
1067 static void
1068 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
1070 private_devi_list_t *devi_node;
1071 int result;
1073 if (Adapter->e1000g_blink != NULL) {
1074 ddi_periodic_delete(Adapter->e1000g_blink);
1075 Adapter->e1000g_blink = NULL;
1078 if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1079 (void) e1000g_disable_intrs(Adapter);
1082 if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
1083 (void) mac_unregister(Adapter->mh);
1086 if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1087 (void) e1000g_rem_intrs(Adapter);
1090 if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
1091 (void) ddi_prop_remove_all(devinfo);
1094 if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
1095 kstat_delete((kstat_t *)Adapter->e1000g_ksp);
1098 if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
1099 stop_link_timer(Adapter);
1101 mutex_enter(&e1000g_nvm_lock);
1102 result = e1000_reset_hw(&Adapter->shared);
1103 mutex_exit(&e1000g_nvm_lock);
1105 if (result != E1000_SUCCESS) {
1106 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1107 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1111 e1000g_release_multicast(Adapter);
1113 if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
1114 if (Adapter->osdep.reg_handle != NULL)
1115 ddi_regs_map_free(&Adapter->osdep.reg_handle);
1116 if (Adapter->osdep.ich_flash_handle != NULL &&
1117 Adapter->shared.mac.type < e1000_pch_spt)
1118 ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
1119 if (Adapter->osdep.io_reg_handle != NULL)
1120 ddi_regs_map_free(&Adapter->osdep.io_reg_handle);
1123 if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1124 if (Adapter->osdep.cfg_handle != NULL)
1125 pci_config_teardown(&Adapter->osdep.cfg_handle);
1128 if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1129 e1000g_destroy_locks(Adapter);
1132 if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) {
1133 e1000g_fm_fini(Adapter);
1136 mutex_enter(&e1000g_rx_detach_lock);
1137 if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) {
1138 devi_node = Adapter->priv_devi_node;
1139 devi_node->flag |= E1000G_PRIV_DEVI_DETACH;
1141 if (devi_node->pending_rx_count == 0) {
1142 e1000g_free_priv_devi_node(devi_node);
1145 mutex_exit(&e1000g_rx_detach_lock);
1147 kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1150 * Another hotplug spec requirement,
1151 * run ddi_set_driver_private(devinfo, null);
1153 ddi_set_driver_private(devinfo, NULL);
1157 * Get the BAR type and rnumber for a given PCI BAR offset
1159 static int
1160 e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info)
1162 pci_regspec_t *regs;
1163 uint_t regs_length;
1164 int type, rnumber, rcount;
1166 ASSERT((bar_offset >= PCI_CONF_BASE0) &&
1167 (bar_offset <= PCI_CONF_BASE5));
1170 * Get the DDI "reg" property
1172 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
1173 DDI_PROP_DONTPASS, "reg", (int **)&regs,
1174 &regs_length) != DDI_PROP_SUCCESS) {
1175 return (DDI_FAILURE);
1178 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t);
1180 * Check the BAR offset
1182 for (rnumber = 0; rnumber < rcount; ++rnumber) {
1183 if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) {
1184 type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK;
1185 break;
1189 ddi_prop_free(regs);
1191 if (rnumber >= rcount)
1192 return (DDI_FAILURE);
1194 switch (type) {
1195 case PCI_ADDR_CONFIG:
1196 bar_info->type = E1000G_BAR_CONFIG;
1197 break;
1198 case PCI_ADDR_IO:
1199 bar_info->type = E1000G_BAR_IO;
1200 break;
1201 case PCI_ADDR_MEM32:
1202 bar_info->type = E1000G_BAR_MEM32;
1203 break;
1204 case PCI_ADDR_MEM64:
1205 bar_info->type = E1000G_BAR_MEM64;
1206 break;
1207 default:
1208 return (DDI_FAILURE);
1210 bar_info->rnumber = rnumber;
1211 return (DDI_SUCCESS);
1214 static void
1215 e1000g_init_locks(struct e1000g *Adapter)
1217 e1000g_tx_ring_t *tx_ring;
1218 e1000g_rx_ring_t *rx_ring;
1220 rw_init(&Adapter->chip_lock, NULL,
1221 RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1222 mutex_init(&Adapter->link_lock, NULL,
1223 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1224 mutex_init(&Adapter->watchdog_lock, NULL,
1225 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1227 tx_ring = Adapter->tx_ring;
1229 mutex_init(&tx_ring->tx_lock, NULL,
1230 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1231 mutex_init(&tx_ring->usedlist_lock, NULL,
1232 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1233 mutex_init(&tx_ring->freelist_lock, NULL,
1234 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1236 rx_ring = Adapter->rx_ring;
1238 mutex_init(&rx_ring->rx_lock, NULL,
1239 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1241 mutex_init(&Adapter->e1000g_led_lock, NULL,
1242 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1245 static void
1246 e1000g_destroy_locks(struct e1000g *Adapter)
1248 e1000g_tx_ring_t *tx_ring;
1249 e1000g_rx_ring_t *rx_ring;
1251 mutex_destroy(&Adapter->e1000g_led_lock);
1253 tx_ring = Adapter->tx_ring;
1254 mutex_destroy(&tx_ring->tx_lock);
1255 mutex_destroy(&tx_ring->usedlist_lock);
1256 mutex_destroy(&tx_ring->freelist_lock);
1258 rx_ring = Adapter->rx_ring;
1259 mutex_destroy(&rx_ring->rx_lock);
1261 mutex_destroy(&Adapter->link_lock);
1262 mutex_destroy(&Adapter->watchdog_lock);
1263 rw_destroy(&Adapter->chip_lock);
1265 /* destory mutex initialized in shared code */
1266 e1000_destroy_hw_mutex(&Adapter->shared);
1269 static int
1270 e1000g_resume(dev_info_t *devinfo)
1272 struct e1000g *Adapter;
1274 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1275 if (Adapter == NULL)
1276 e1000g_log(Adapter, CE_PANIC,
1277 "Instance pointer is null\n");
1279 if (Adapter->dip != devinfo)
1280 e1000g_log(Adapter, CE_PANIC,
1281 "Devinfo is not the same as saved devinfo\n");
1283 rw_enter(&Adapter->chip_lock, RW_WRITER);
1285 if (Adapter->e1000g_state & E1000G_STARTED) {
1286 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
1287 rw_exit(&Adapter->chip_lock);
1289 * We note the failure, but return success, as the
1290 * system is still usable without this controller.
1292 e1000g_log(Adapter, CE_WARN,
1293 "e1000g_resume: failed to restart controller\n");
1294 return (DDI_SUCCESS);
1296 /* Enable and start the watchdog timer */
1297 enable_watchdog_timer(Adapter);
1300 Adapter->e1000g_state &= ~E1000G_SUSPENDED;
1302 rw_exit(&Adapter->chip_lock);
1304 return (DDI_SUCCESS);
1307 static int
1308 e1000g_suspend(dev_info_t *devinfo)
1310 struct e1000g *Adapter;
1312 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1313 if (Adapter == NULL)
1314 return (DDI_FAILURE);
1316 rw_enter(&Adapter->chip_lock, RW_WRITER);
1318 Adapter->e1000g_state |= E1000G_SUSPENDED;
1320 /* if the port isn't plumbed, we can simply return */
1321 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
1322 rw_exit(&Adapter->chip_lock);
1323 return (DDI_SUCCESS);
1326 e1000g_stop(Adapter, B_FALSE);
1328 rw_exit(&Adapter->chip_lock);
1330 /* Disable and stop all the timers */
1331 disable_watchdog_timer(Adapter);
1332 stop_link_timer(Adapter);
1333 stop_82547_timer(Adapter->tx_ring);
1335 return (DDI_SUCCESS);
1338 static int
1339 e1000g_init(struct e1000g *Adapter)
1341 uint32_t pba;
1342 uint32_t high_water;
1343 struct e1000_hw *hw;
1344 clock_t link_timeout;
1345 int result;
1347 hw = &Adapter->shared;
1350 * reset to put the hardware in a known state
1351 * before we try to do anything with the eeprom
1353 mutex_enter(&e1000g_nvm_lock);
1354 result = e1000_reset_hw(hw);
1355 mutex_exit(&e1000g_nvm_lock);
1357 if (result != E1000_SUCCESS) {
1358 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1359 goto init_fail;
1362 mutex_enter(&e1000g_nvm_lock);
1363 result = e1000_validate_nvm_checksum(hw);
1364 if (result < E1000_SUCCESS) {
1366 * Some PCI-E parts fail the first check due to
1367 * the link being in sleep state. Call it again,
1368 * if it fails a second time its a real issue.
1370 result = e1000_validate_nvm_checksum(hw);
1372 mutex_exit(&e1000g_nvm_lock);
1374 if (result < E1000_SUCCESS) {
1375 e1000g_log(Adapter, CE_WARN,
1376 "Invalid NVM checksum. Please contact "
1377 "the vendor to update the NVM.");
1378 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1379 goto init_fail;
1382 result = 0;
1383 /* Get the local ethernet address. */
1384 if (!result) {
1385 mutex_enter(&e1000g_nvm_lock);
1386 result = e1000_read_mac_addr(hw);
1387 mutex_exit(&e1000g_nvm_lock);
1390 if (result < E1000_SUCCESS) {
1391 e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1392 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1393 goto init_fail;
1396 /* check for valid mac address */
1397 if (!is_valid_mac_addr(hw->mac.addr)) {
1398 e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1399 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1400 goto init_fail;
1403 /* Set LAA state for 82571 chipset */
1404 e1000_set_laa_state_82571(hw, B_TRUE);
1406 /* Master Latency Timer implementation */
1407 if (Adapter->master_latency_timer) {
1408 pci_config_put8(Adapter->osdep.cfg_handle,
1409 PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1412 if (hw->mac.type < e1000_82547) {
1414 * Total FIFO is 64K
1416 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1417 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1418 else
1419 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1420 } else if ((hw->mac.type == e1000_82571) ||
1421 (hw->mac.type == e1000_82572) ||
1422 (hw->mac.type == e1000_80003es2lan)) {
1424 * Total FIFO is 48K
1426 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1427 pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */
1428 else
1429 pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */
1430 } else if (hw->mac.type == e1000_82573) {
1431 pba = E1000_PBA_20K; /* 20K for Rx, 12K for Tx */
1432 } else if (hw->mac.type == e1000_82574) {
1433 /* Keep adapter default: 20K for Rx, 20K for Tx */
1434 pba = E1000_READ_REG(hw, E1000_PBA);
1435 } else if (hw->mac.type == e1000_ich8lan) {
1436 pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */
1437 } else if (hw->mac.type == e1000_ich9lan) {
1438 pba = E1000_PBA_10K;
1439 } else if (hw->mac.type == e1000_ich10lan) {
1440 pba = E1000_PBA_10K;
1441 } else if (hw->mac.type == e1000_pchlan) {
1442 pba = E1000_PBA_26K;
1443 } else if (hw->mac.type == e1000_pch2lan) {
1444 pba = E1000_PBA_26K;
1445 } else if (hw->mac.type == e1000_pch_lpt) {
1446 pba = E1000_PBA_26K;
1447 } else if (hw->mac.type == e1000_pch_spt) {
1448 pba = E1000_PBA_26K;
1449 } else if (hw->mac.type == e1000_pch_cnp) {
1450 pba = E1000_PBA_26K;
1451 } else {
1453 * Total FIFO is 40K
1455 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1456 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1457 else
1458 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1460 E1000_WRITE_REG(hw, E1000_PBA, pba);
1463 * These parameters set thresholds for the adapter's generation(Tx)
1464 * and response(Rx) to Ethernet PAUSE frames. These are just threshold
1465 * settings. Flow control is enabled or disabled in the configuration
1466 * file.
1467 * High-water mark is set down from the top of the rx fifo (not
1468 * sensitive to max_frame_size) and low-water is set just below
1469 * high-water mark.
1470 * The high water mark must be low enough to fit one full frame above
1471 * it in the rx FIFO. Should be the lower of:
1472 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1473 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1474 * Rx FIFO size minus one full frame.
1476 high_water = min(((pba << 10) * 9 / 10),
1477 ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 ||
1478 hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ?
1479 ((pba << 10) - (E1000_ERT_2048 << 3)) :
1480 ((pba << 10) - Adapter->max_frame_size)));
1482 hw->fc.high_water = high_water & 0xFFF8;
1483 hw->fc.low_water = hw->fc.high_water - 8;
1485 if (hw->mac.type == e1000_80003es2lan)
1486 hw->fc.pause_time = 0xFFFF;
1487 else
1488 hw->fc.pause_time = E1000_FC_PAUSE_TIME;
1489 hw->fc.send_xon = B_TRUE;
1492 * Reset the adapter hardware the second time.
1494 mutex_enter(&e1000g_nvm_lock);
1495 result = e1000_reset_hw(hw);
1496 mutex_exit(&e1000g_nvm_lock);
1498 if (result != E1000_SUCCESS) {
1499 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1500 goto init_fail;
1503 /* disable wakeup control by default */
1504 if (hw->mac.type >= e1000_82544)
1505 E1000_WRITE_REG(hw, E1000_WUC, 0);
1508 * MWI should be disabled on 82546.
1510 if (hw->mac.type == e1000_82546)
1511 e1000_pci_clear_mwi(hw);
1512 else
1513 e1000_pci_set_mwi(hw);
1516 * Configure/Initialize hardware
1518 mutex_enter(&e1000g_nvm_lock);
1519 result = e1000_init_hw(hw);
1520 mutex_exit(&e1000g_nvm_lock);
1522 if (result < E1000_SUCCESS) {
1523 e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1524 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1525 goto init_fail;
1529 * Restore LED settings to the default from EEPROM
1530 * to meet the standard for Sun platforms.
1532 (void) e1000_cleanup_led(hw);
1534 /* Disable Smart Power Down */
1535 phy_spd_state(hw, B_FALSE);
1537 /* Make sure driver has control */
1538 e1000g_get_driver_control(hw);
1541 * Initialize unicast addresses.
1543 e1000g_init_unicst(Adapter);
1546 * Setup and initialize the mctable structures. After this routine
1547 * completes Multicast table will be set
1549 e1000_update_mc_addr_list(hw,
1550 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
1551 msec_delay(5);
1554 * Implement Adaptive IFS
1556 e1000_reset_adaptive(hw);
1558 /* Setup Interrupt Throttling Register */
1559 if (hw->mac.type >= e1000_82540) {
1560 E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1561 } else
1562 Adapter->intr_adaptive = B_FALSE;
1564 /* Start the timer for link setup */
1565 if (hw->mac.autoneg)
1566 link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1567 else
1568 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1570 mutex_enter(&Adapter->link_lock);
1571 if (hw->phy.autoneg_wait_to_complete) {
1572 Adapter->link_complete = B_TRUE;
1573 } else {
1574 Adapter->link_complete = B_FALSE;
1575 Adapter->link_tid = timeout(e1000g_link_timer,
1576 (void *)Adapter, link_timeout);
1578 mutex_exit(&Adapter->link_lock);
1580 /* Save the state of the phy */
1581 e1000g_get_phy_state(Adapter);
1583 e1000g_param_sync(Adapter);
1585 Adapter->init_count++;
1587 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
1588 goto init_fail;
1590 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1591 goto init_fail;
1594 Adapter->poll_mode = e1000g_poll_mode;
1596 return (DDI_SUCCESS);
1598 init_fail:
1599 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1600 return (DDI_FAILURE);
1603 static int
1604 e1000g_alloc_rx_data(struct e1000g *Adapter)
1606 e1000g_rx_ring_t *rx_ring;
1607 e1000g_rx_data_t *rx_data;
1609 rx_ring = Adapter->rx_ring;
1611 rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP);
1613 if (rx_data == NULL)
1614 return (DDI_FAILURE);
1616 rx_data->priv_devi_node = Adapter->priv_devi_node;
1617 rx_data->rx_ring = rx_ring;
1619 mutex_init(&rx_data->freelist_lock, NULL,
1620 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1621 mutex_init(&rx_data->recycle_lock, NULL,
1622 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1624 rx_ring->rx_data = rx_data;
1626 return (DDI_SUCCESS);
1629 void
1630 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data)
1632 rx_sw_packet_t *packet, *next_packet;
1634 if (rx_data == NULL)
1635 return;
1637 packet = rx_data->packet_area;
1638 while (packet != NULL) {
1639 next_packet = packet->next;
1640 e1000g_free_rx_sw_packet(packet, B_TRUE);
1641 packet = next_packet;
1643 rx_data->packet_area = NULL;
1646 void
1647 e1000g_free_rx_data(e1000g_rx_data_t *rx_data)
1649 if (rx_data == NULL)
1650 return;
1652 mutex_destroy(&rx_data->freelist_lock);
1653 mutex_destroy(&rx_data->recycle_lock);
1655 kmem_free(rx_data, sizeof (e1000g_rx_data_t));
1659 * Check if the link is up
1661 static boolean_t
1662 e1000g_link_up(struct e1000g *Adapter)
1664 struct e1000_hw *hw = &Adapter->shared;
1665 boolean_t link_up = B_FALSE;
1668 * get_link_status is set in the interrupt handler on link-status-change
1669 * or rx sequence error interrupt. get_link_status will stay
1670 * false until the e1000_check_for_link establishes link only
1671 * for copper adapters.
1673 switch (hw->phy.media_type) {
1674 case e1000_media_type_copper:
1675 if (hw->mac.get_link_status) {
1677 * SPT and newer devices need a bit of extra time before
1678 * we ask them.
1680 if (hw->mac.type >= e1000_pch_spt)
1681 msec_delay(50);
1682 (void) e1000_check_for_link(hw);
1683 if ((E1000_READ_REG(hw, E1000_STATUS) &
1684 E1000_STATUS_LU)) {
1685 link_up = B_TRUE;
1686 } else {
1687 link_up = !hw->mac.get_link_status;
1689 } else {
1690 link_up = B_TRUE;
1692 break;
1693 case e1000_media_type_fiber:
1694 (void) e1000_check_for_link(hw);
1695 link_up = (E1000_READ_REG(hw, E1000_STATUS) &
1696 E1000_STATUS_LU);
1697 break;
1698 case e1000_media_type_internal_serdes:
1699 (void) e1000_check_for_link(hw);
1700 link_up = hw->mac.serdes_has_link;
1701 break;
1704 return (link_up);
1707 static void
1708 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1710 struct iocblk *iocp;
1711 struct e1000g *e1000gp;
1712 enum ioc_reply status;
1714 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
1715 iocp->ioc_error = 0;
1716 e1000gp = (struct e1000g *)arg;
1718 ASSERT(e1000gp);
1719 if (e1000gp == NULL) {
1720 miocnak(q, mp, 0, EINVAL);
1721 return;
1724 rw_enter(&e1000gp->chip_lock, RW_READER);
1725 if (e1000gp->e1000g_state & E1000G_SUSPENDED) {
1726 rw_exit(&e1000gp->chip_lock);
1727 miocnak(q, mp, 0, EINVAL);
1728 return;
1730 rw_exit(&e1000gp->chip_lock);
1732 switch (iocp->ioc_cmd) {
1734 case LB_GET_INFO_SIZE:
1735 case LB_GET_INFO:
1736 case LB_GET_MODE:
1737 case LB_SET_MODE:
1738 status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1739 break;
1742 #ifdef E1000G_DEBUG
1743 case E1000G_IOC_REG_PEEK:
1744 case E1000G_IOC_REG_POKE:
1745 status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1746 break;
1747 case E1000G_IOC_CHIP_RESET:
1748 e1000gp->reset_count++;
1749 if (e1000g_reset_adapter(e1000gp))
1750 status = IOC_ACK;
1751 else
1752 status = IOC_INVAL;
1753 break;
1754 #endif
1755 default:
1756 status = IOC_INVAL;
1757 break;
1761 * Decide how to reply
1763 switch (status) {
1764 default:
1765 case IOC_INVAL:
1767 * Error, reply with a NAK and EINVAL or the specified error
1769 miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1770 EINVAL : iocp->ioc_error);
1771 break;
1773 case IOC_DONE:
1775 * OK, reply already sent
1777 break;
1779 case IOC_ACK:
1781 * OK, reply with an ACK
1783 miocack(q, mp, 0, 0);
1784 break;
1786 case IOC_REPLY:
1788 * OK, send prepared reply as ACK or NAK
1790 mp->b_datap->db_type = iocp->ioc_error == 0 ?
1791 M_IOCACK : M_IOCNAK;
1792 qreply(q, mp);
1793 break;
1798 * The default value of e1000g_poll_mode == 0 assumes that the NIC is
1799 * capable of supporting only one interrupt and we shouldn't disable
1800 * the physical interrupt. In this case we let the interrupt come and
1801 * we queue the packets in the rx ring itself in case we are in polling
1802 * mode (better latency but slightly lower performance and a very
1803 * high intrrupt count in mpstat which is harmless).
1805 * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt
1806 * which can be disabled in poll mode. This gives better overall
1807 * throughput (compared to the mode above), shows very low interrupt
1808 * count but has slightly higher latency since we pick the packets when
1809 * the poll thread does polling.
1811 * Currently, this flag should be enabled only while doing performance
1812 * measurement or when it can be guaranteed that entire NIC going
1813 * in poll mode will not harm any traffic like cluster heartbeat etc.
1815 int e1000g_poll_mode = 0;
1818 * Called from the upper layers when driver is in polling mode to
1819 * pick up any queued packets. Care should be taken to not block
1820 * this thread.
1822 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup)
1824 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)arg;
1825 mblk_t *mp = NULL;
1826 mblk_t *tail;
1827 struct e1000g *adapter;
1829 adapter = rx_ring->adapter;
1831 rw_enter(&adapter->chip_lock, RW_READER);
1833 if (adapter->e1000g_state & E1000G_SUSPENDED) {
1834 rw_exit(&adapter->chip_lock);
1835 return (NULL);
1838 mutex_enter(&rx_ring->rx_lock);
1839 mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup);
1840 mutex_exit(&rx_ring->rx_lock);
1841 rw_exit(&adapter->chip_lock);
1842 return (mp);
1845 static int
1846 e1000g_m_start(void *arg)
1848 struct e1000g *Adapter = (struct e1000g *)arg;
1850 rw_enter(&Adapter->chip_lock, RW_WRITER);
1852 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1853 rw_exit(&Adapter->chip_lock);
1854 return (ECANCELED);
1857 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
1858 rw_exit(&Adapter->chip_lock);
1859 return (ENOTACTIVE);
1862 Adapter->e1000g_state |= E1000G_STARTED;
1864 rw_exit(&Adapter->chip_lock);
1866 /* Enable and start the watchdog timer */
1867 enable_watchdog_timer(Adapter);
1869 return (0);
1872 static int
1873 e1000g_start(struct e1000g *Adapter, boolean_t global)
1875 e1000g_rx_data_t *rx_data;
1877 if (global) {
1878 if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) {
1879 e1000g_log(Adapter, CE_WARN, "Allocate rx data failed");
1880 goto start_fail;
1883 /* Allocate dma resources for descriptors and buffers */
1884 if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1885 e1000g_log(Adapter, CE_WARN,
1886 "Alloc DMA resources failed");
1887 goto start_fail;
1889 Adapter->rx_buffer_setup = B_FALSE;
1892 if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1893 if (e1000g_init(Adapter) != DDI_SUCCESS) {
1894 e1000g_log(Adapter, CE_WARN,
1895 "Adapter initialization failed");
1896 goto start_fail;
1900 /* Setup and initialize the transmit structures */
1901 e1000g_tx_setup(Adapter);
1902 msec_delay(5);
1904 /* Setup and initialize the receive structures */
1905 e1000g_rx_setup(Adapter);
1906 msec_delay(5);
1908 /* Restore the e1000g promiscuous mode */
1909 e1000g_restore_promisc(Adapter);
1911 e1000g_mask_interrupt(Adapter);
1913 Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1915 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1916 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1917 goto start_fail;
1920 return (DDI_SUCCESS);
1922 start_fail:
1923 rx_data = Adapter->rx_ring->rx_data;
1925 if (global) {
1926 e1000g_release_dma_resources(Adapter);
1927 e1000g_free_rx_pending_buffers(rx_data);
1928 e1000g_free_rx_data(rx_data);
1931 mutex_enter(&e1000g_nvm_lock);
1932 (void) e1000_reset_hw(&Adapter->shared);
1933 mutex_exit(&e1000g_nvm_lock);
1935 return (DDI_FAILURE);
1939 * The I219 has the curious property that if the descriptor rings are not
1940 * emptied before resetting the hardware or before changing the device state
1941 * based on runtime power management, it'll cause the card to hang. This can
1942 * then only be fixed by a PCI reset. As such, for the I219 and it alone, we
1943 * have to flush the rings if we're in this state.
1945 static void
1946 e1000g_flush_desc_rings(struct e1000g *Adapter)
1948 struct e1000_hw *hw = &Adapter->shared;
1949 u16 hang_state;
1950 u32 fext_nvm11, tdlen;
1952 /* First, disable MULR fix in FEXTNVM11 */
1953 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
1954 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
1955 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11);
1957 /* do nothing if we're not in faulty state, or if the queue is empty */
1958 tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
1959 hang_state = pci_config_get16(Adapter->osdep.cfg_handle,
1960 PCICFG_DESC_RING_STATUS);
1961 if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
1962 return;
1963 e1000g_flush_tx_ring(Adapter);
1965 /* recheck, maybe the fault is caused by the rx ring */
1966 hang_state = pci_config_get16(Adapter->osdep.cfg_handle,
1967 PCICFG_DESC_RING_STATUS);
1968 if (hang_state & FLUSH_DESC_REQUIRED)
1969 e1000g_flush_rx_ring(Adapter);
1973 static void
1974 e1000g_m_stop(void *arg)
1976 struct e1000g *Adapter = (struct e1000g *)arg;
1978 /* Drain tx sessions */
1979 (void) e1000g_tx_drain(Adapter);
1981 rw_enter(&Adapter->chip_lock, RW_WRITER);
1983 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1984 rw_exit(&Adapter->chip_lock);
1985 return;
1987 Adapter->e1000g_state &= ~E1000G_STARTED;
1988 e1000g_stop(Adapter, B_TRUE);
1990 rw_exit(&Adapter->chip_lock);
1992 /* Disable and stop all the timers */
1993 disable_watchdog_timer(Adapter);
1994 stop_link_timer(Adapter);
1995 stop_82547_timer(Adapter->tx_ring);
1998 static void
1999 e1000g_stop(struct e1000g *Adapter, boolean_t global)
2001 private_devi_list_t *devi_node;
2002 e1000g_rx_data_t *rx_data;
2003 int result;
2005 Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
2007 /* Stop the chip and release pending resources */
2009 /* Tell firmware driver is no longer in control */
2010 e1000g_release_driver_control(&Adapter->shared);
2012 e1000g_clear_all_interrupts(Adapter);
2014 mutex_enter(&e1000g_nvm_lock);
2015 result = e1000_reset_hw(&Adapter->shared);
2016 mutex_exit(&e1000g_nvm_lock);
2018 if (result != E1000_SUCCESS) {
2019 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
2020 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
2023 mutex_enter(&Adapter->link_lock);
2024 Adapter->link_complete = B_FALSE;
2025 mutex_exit(&Adapter->link_lock);
2027 /* Release resources still held by the TX descriptors */
2028 e1000g_tx_clean(Adapter);
2030 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2031 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
2033 /* Clean the pending rx jumbo packet fragment */
2034 e1000g_rx_clean(Adapter);
2037 * The I219, eg. the pch_spt, has bugs such that we must ensure that
2038 * rings are flushed before we do anything else. This must be done
2039 * before we release DMA resources.
2041 if (Adapter->shared.mac.type >= e1000_pch_spt)
2042 e1000g_flush_desc_rings(Adapter);
2044 if (global) {
2045 e1000g_release_dma_resources(Adapter);
2047 mutex_enter(&e1000g_rx_detach_lock);
2048 rx_data = Adapter->rx_ring->rx_data;
2049 rx_data->flag |= E1000G_RX_STOPPED;
2051 if (rx_data->pending_count == 0) {
2052 e1000g_free_rx_pending_buffers(rx_data);
2053 e1000g_free_rx_data(rx_data);
2054 } else {
2055 devi_node = rx_data->priv_devi_node;
2056 if (devi_node != NULL)
2057 atomic_inc_32(&devi_node->pending_rx_count);
2058 else
2059 atomic_inc_32(&Adapter->pending_rx_count);
2061 mutex_exit(&e1000g_rx_detach_lock);
2064 if (Adapter->link_state != LINK_STATE_UNKNOWN) {
2065 Adapter->link_state = LINK_STATE_UNKNOWN;
2066 if (!Adapter->reset_flag)
2067 mac_link_update(Adapter->mh, Adapter->link_state);
2071 static void
2072 e1000g_rx_clean(struct e1000g *Adapter)
2074 e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data;
2076 if (rx_data == NULL)
2077 return;
2079 if (rx_data->rx_mblk != NULL) {
2080 freemsg(rx_data->rx_mblk);
2081 rx_data->rx_mblk = NULL;
2082 rx_data->rx_mblk_tail = NULL;
2083 rx_data->rx_mblk_len = 0;
2087 static void
2088 e1000g_tx_clean(struct e1000g *Adapter)
2090 e1000g_tx_ring_t *tx_ring;
2091 p_tx_sw_packet_t packet;
2092 mblk_t *mp;
2093 mblk_t *nmp;
2094 uint32_t packet_count;
2096 tx_ring = Adapter->tx_ring;
2099 * Here we don't need to protect the lists using
2100 * the usedlist_lock and freelist_lock, for they
2101 * have been protected by the chip_lock.
2103 mp = NULL;
2104 nmp = NULL;
2105 packet_count = 0;
2106 packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
2107 while (packet != NULL) {
2108 if (packet->mp != NULL) {
2109 /* Assemble the message chain */
2110 if (mp == NULL) {
2111 mp = packet->mp;
2112 nmp = packet->mp;
2113 } else {
2114 nmp->b_next = packet->mp;
2115 nmp = packet->mp;
2117 /* Disconnect the message from the sw packet */
2118 packet->mp = NULL;
2121 e1000g_free_tx_swpkt(packet);
2122 packet_count++;
2124 packet = (p_tx_sw_packet_t)
2125 QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
2128 if (mp != NULL)
2129 freemsgchain(mp);
2131 if (packet_count > 0) {
2132 QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
2133 QUEUE_INIT_LIST(&tx_ring->used_list);
2135 /* Setup TX descriptor pointers */
2136 tx_ring->tbd_next = tx_ring->tbd_first;
2137 tx_ring->tbd_oldest = tx_ring->tbd_first;
2139 /* Setup our HW Tx Head & Tail descriptor pointers */
2140 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
2141 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
2145 static boolean_t
2146 e1000g_tx_drain(struct e1000g *Adapter)
2148 int i;
2149 boolean_t done;
2150 e1000g_tx_ring_t *tx_ring;
2152 tx_ring = Adapter->tx_ring;
2154 /* Allow up to 'wsdraintime' for pending xmit's to complete. */
2155 for (i = 0; i < TX_DRAIN_TIME; i++) {
2156 mutex_enter(&tx_ring->usedlist_lock);
2157 done = IS_QUEUE_EMPTY(&tx_ring->used_list);
2158 mutex_exit(&tx_ring->usedlist_lock);
2160 if (done)
2161 break;
2163 msec_delay(1);
2166 return (done);
2169 static boolean_t
2170 e1000g_rx_drain(struct e1000g *Adapter)
2172 int i;
2173 boolean_t done;
2176 * Allow up to RX_DRAIN_TIME for pending received packets to complete.
2178 for (i = 0; i < RX_DRAIN_TIME; i++) {
2179 done = (Adapter->pending_rx_count == 0);
2181 if (done)
2182 break;
2184 msec_delay(1);
2187 return (done);
2190 static boolean_t
2191 e1000g_reset_adapter(struct e1000g *Adapter)
2193 /* Disable and stop all the timers */
2194 disable_watchdog_timer(Adapter);
2195 stop_link_timer(Adapter);
2196 stop_82547_timer(Adapter->tx_ring);
2198 rw_enter(&Adapter->chip_lock, RW_WRITER);
2200 if (Adapter->stall_flag) {
2201 Adapter->stall_flag = B_FALSE;
2202 Adapter->reset_flag = B_TRUE;
2205 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2206 rw_exit(&Adapter->chip_lock);
2207 return (B_TRUE);
2210 e1000g_stop(Adapter, B_FALSE);
2212 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
2213 rw_exit(&Adapter->chip_lock);
2214 e1000g_log(Adapter, CE_WARN, "Reset failed");
2215 return (B_FALSE);
2218 rw_exit(&Adapter->chip_lock);
2220 /* Enable and start the watchdog timer */
2221 enable_watchdog_timer(Adapter);
2223 return (B_TRUE);
2226 boolean_t
2227 e1000g_global_reset(struct e1000g *Adapter)
2229 /* Disable and stop all the timers */
2230 disable_watchdog_timer(Adapter);
2231 stop_link_timer(Adapter);
2232 stop_82547_timer(Adapter->tx_ring);
2234 rw_enter(&Adapter->chip_lock, RW_WRITER);
2236 e1000g_stop(Adapter, B_TRUE);
2238 Adapter->init_count = 0;
2240 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
2241 rw_exit(&Adapter->chip_lock);
2242 e1000g_log(Adapter, CE_WARN, "Reset failed");
2243 return (B_FALSE);
2246 rw_exit(&Adapter->chip_lock);
2248 /* Enable and start the watchdog timer */
2249 enable_watchdog_timer(Adapter);
2251 return (B_TRUE);
2255 * e1000g_intr_pciexpress - ISR for PCI Express chipsets
2257 * This interrupt service routine is for PCI-Express adapters.
2258 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
2259 * bit is set.
2261 static uint_t
2262 e1000g_intr_pciexpress(caddr_t arg)
2264 struct e1000g *Adapter;
2265 uint32_t icr;
2267 Adapter = (struct e1000g *)(uintptr_t)arg;
2268 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2270 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2271 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2272 return (DDI_INTR_CLAIMED);
2275 if (icr & E1000_ICR_INT_ASSERTED) {
2277 * E1000_ICR_INT_ASSERTED bit was set:
2278 * Read(Clear) the ICR, claim this interrupt,
2279 * look for work to do.
2281 e1000g_intr_work(Adapter, icr);
2282 return (DDI_INTR_CLAIMED);
2283 } else {
2285 * E1000_ICR_INT_ASSERTED bit was not set:
2286 * Don't claim this interrupt, return immediately.
2288 return (DDI_INTR_UNCLAIMED);
2293 * e1000g_intr - ISR for PCI/PCI-X chipsets
2295 * This interrupt service routine is for PCI/PCI-X adapters.
2296 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
2297 * bit is set or not.
2299 static uint_t
2300 e1000g_intr(caddr_t arg)
2302 struct e1000g *Adapter;
2303 uint32_t icr;
2305 Adapter = (struct e1000g *)(uintptr_t)arg;
2306 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2308 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2309 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2310 return (DDI_INTR_CLAIMED);
2313 if (icr) {
2315 * Any bit was set in ICR:
2316 * Read(Clear) the ICR, claim this interrupt,
2317 * look for work to do.
2319 e1000g_intr_work(Adapter, icr);
2320 return (DDI_INTR_CLAIMED);
2321 } else {
2323 * No bit was set in ICR:
2324 * Don't claim this interrupt, return immediately.
2326 return (DDI_INTR_UNCLAIMED);
2331 * e1000g_intr_work - actual processing of ISR
2333 * Read(clear) the ICR contents and call appropriate interrupt
2334 * processing routines.
2336 static void
2337 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
2339 struct e1000_hw *hw;
2340 hw = &Adapter->shared;
2341 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
2343 Adapter->rx_pkt_cnt = 0;
2344 Adapter->tx_pkt_cnt = 0;
2346 rw_enter(&Adapter->chip_lock, RW_READER);
2348 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2349 rw_exit(&Adapter->chip_lock);
2350 return;
2353 * Here we need to check the "e1000g_state" flag within the chip_lock to
2354 * ensure the receive routine will not execute when the adapter is
2355 * being reset.
2357 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2358 rw_exit(&Adapter->chip_lock);
2359 return;
2362 if (icr & E1000_ICR_RXT0) {
2363 mblk_t *mp = NULL;
2364 mblk_t *tail = NULL;
2365 e1000g_rx_ring_t *rx_ring;
2367 rx_ring = Adapter->rx_ring;
2368 mutex_enter(&rx_ring->rx_lock);
2370 * Sometimes with legacy interrupts, it possible that
2371 * there is a single interrupt for Rx/Tx. In which
2372 * case, if poll flag is set, we shouldn't really
2373 * be doing Rx processing.
2375 if (!rx_ring->poll_flag)
2376 mp = e1000g_receive(rx_ring, &tail,
2377 E1000G_CHAIN_NO_LIMIT);
2378 mutex_exit(&rx_ring->rx_lock);
2379 rw_exit(&Adapter->chip_lock);
2380 if (mp != NULL)
2381 mac_rx_ring(Adapter->mh, rx_ring->mrh,
2382 mp, rx_ring->ring_gen_num);
2383 } else
2384 rw_exit(&Adapter->chip_lock);
2386 if (icr & E1000_ICR_TXDW) {
2387 if (!Adapter->tx_intr_enable)
2388 e1000g_clear_tx_interrupt(Adapter);
2390 /* Recycle the tx descriptors */
2391 rw_enter(&Adapter->chip_lock, RW_READER);
2392 (void) e1000g_recycle(tx_ring);
2393 E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
2394 rw_exit(&Adapter->chip_lock);
2396 if (tx_ring->resched_needed &&
2397 (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) {
2398 tx_ring->resched_needed = B_FALSE;
2399 mac_tx_update(Adapter->mh);
2400 E1000G_STAT(tx_ring->stat_reschedule);
2405 * The Receive Sequence errors RXSEQ and the link status change LSC
2406 * are checked to detect that the cable has been pulled out. For
2407 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
2408 * are an indication that cable is not connected.
2410 if ((icr & E1000_ICR_RXSEQ) ||
2411 (icr & E1000_ICR_LSC) ||
2412 (icr & E1000_ICR_GPI_EN1)) {
2413 boolean_t link_changed;
2414 timeout_id_t tid = 0;
2416 stop_watchdog_timer(Adapter);
2418 rw_enter(&Adapter->chip_lock, RW_WRITER);
2421 * Because we got a link-status-change interrupt, force
2422 * e1000_check_for_link() to look at phy
2424 Adapter->shared.mac.get_link_status = B_TRUE;
2426 /* e1000g_link_check takes care of link status change */
2427 link_changed = e1000g_link_check(Adapter);
2429 /* Get new phy state */
2430 e1000g_get_phy_state(Adapter);
2433 * If the link timer has not timed out, we'll not notify
2434 * the upper layer with any link state until the link is up.
2436 if (link_changed && !Adapter->link_complete) {
2437 if (Adapter->link_state == LINK_STATE_UP) {
2438 mutex_enter(&Adapter->link_lock);
2439 Adapter->link_complete = B_TRUE;
2440 tid = Adapter->link_tid;
2441 Adapter->link_tid = 0;
2442 mutex_exit(&Adapter->link_lock);
2443 } else {
2444 link_changed = B_FALSE;
2447 rw_exit(&Adapter->chip_lock);
2449 if (link_changed) {
2450 if (tid != 0)
2451 (void) untimeout(tid);
2454 * Workaround for esb2. Data stuck in fifo on a link
2455 * down event. Stop receiver here and reset in watchdog.
2457 if ((Adapter->link_state == LINK_STATE_DOWN) &&
2458 (Adapter->shared.mac.type == e1000_80003es2lan)) {
2459 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
2460 E1000_WRITE_REG(hw, E1000_RCTL,
2461 rctl & ~E1000_RCTL_EN);
2462 e1000g_log(Adapter, CE_WARN,
2463 "ESB2 receiver disabled");
2464 Adapter->esb2_workaround = B_TRUE;
2466 if (!Adapter->reset_flag)
2467 mac_link_update(Adapter->mh,
2468 Adapter->link_state);
2469 if (Adapter->link_state == LINK_STATE_UP)
2470 Adapter->reset_flag = B_FALSE;
2473 start_watchdog_timer(Adapter);
2477 static void
2478 e1000g_init_unicst(struct e1000g *Adapter)
2480 struct e1000_hw *hw;
2481 int slot;
2483 hw = &Adapter->shared;
2485 if (Adapter->init_count == 0) {
2486 /* Initialize the multiple unicast addresses */
2487 Adapter->unicst_total = min(hw->mac.rar_entry_count,
2488 MAX_NUM_UNICAST_ADDRESSES);
2491 * The common code does not correctly calculate the number of
2492 * rar's that could be reserved by firmware for the pch_lpt and
2493 * pch_spt macs. The interface has one primary rar, and 11
2494 * additional ones. Those 11 additional ones are not always
2495 * available. According to the datasheet, we need to check a
2496 * few of the bits set in the FWSM register. If the value is
2497 * zero, everything is available. If the value is 1, none of the
2498 * additional registers are available. If the value is 2-7, only
2499 * that number are available.
2501 if (hw->mac.type >= e1000_pch_lpt) {
2502 uint32_t locked, rar;
2504 locked = E1000_READ_REG(hw, E1000_FWSM) &
2505 E1000_FWSM_WLOCK_MAC_MASK;
2506 locked >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2507 rar = 1;
2508 if (locked == 0)
2509 rar += 11;
2510 else if (locked == 1)
2511 rar += 0;
2512 else
2513 rar += locked;
2514 Adapter->unicst_total = min(rar,
2515 MAX_NUM_UNICAST_ADDRESSES);
2518 /* Workaround for an erratum of 82571 chipst */
2519 if ((hw->mac.type == e1000_82571) &&
2520 (e1000_get_laa_state_82571(hw) == B_TRUE))
2521 Adapter->unicst_total--;
2523 /* VMware doesn't support multiple mac addresses properly */
2524 if (hw->subsystem_vendor_id == 0x15ad)
2525 Adapter->unicst_total = 1;
2527 Adapter->unicst_avail = Adapter->unicst_total;
2529 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2530 /* Clear both the flag and MAC address */
2531 Adapter->unicst_addr[slot].reg.high = 0;
2532 Adapter->unicst_addr[slot].reg.low = 0;
2534 } else {
2535 /* Workaround for an erratum of 82571 chipst */
2536 if ((hw->mac.type == e1000_82571) &&
2537 (e1000_get_laa_state_82571(hw) == B_TRUE))
2538 (void) e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
2540 /* Re-configure the RAR registers */
2541 for (slot = 0; slot < Adapter->unicst_total; slot++)
2542 if (Adapter->unicst_addr[slot].mac.set == 1)
2543 (void) e1000_rar_set(hw,
2544 Adapter->unicst_addr[slot].mac.addr, slot);
2547 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2548 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2551 static int
2552 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
2553 int slot)
2555 struct e1000_hw *hw;
2557 hw = &Adapter->shared;
2560 * The first revision of Wiseman silicon (rev 2.0) has an errata
2561 * that requires the receiver to be in reset when any of the
2562 * receive address registers (RAR regs) are accessed. The first
2563 * rev of Wiseman silicon also requires MWI to be disabled when
2564 * a global reset or a receive reset is issued. So before we
2565 * initialize the RARs, we check the rev of the Wiseman controller
2566 * and work around any necessary HW errata.
2568 if ((hw->mac.type == e1000_82542) &&
2569 (hw->revision_id == E1000_REVISION_2)) {
2570 e1000_pci_clear_mwi(hw);
2571 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2572 msec_delay(5);
2574 if (mac_addr == NULL) {
2575 E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0);
2576 E1000_WRITE_FLUSH(hw);
2577 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0);
2578 E1000_WRITE_FLUSH(hw);
2579 /* Clear both the flag and MAC address */
2580 Adapter->unicst_addr[slot].reg.high = 0;
2581 Adapter->unicst_addr[slot].reg.low = 0;
2582 } else {
2583 bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr,
2584 ETHERADDRL);
2585 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
2586 Adapter->unicst_addr[slot].mac.set = 1;
2589 /* Workaround for an erratum of 82571 chipst */
2590 if (slot == 0) {
2591 if ((hw->mac.type == e1000_82571) &&
2592 (e1000_get_laa_state_82571(hw) == B_TRUE))
2593 if (mac_addr == NULL) {
2594 E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2595 slot << 1, 0);
2596 E1000_WRITE_FLUSH(hw);
2597 E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2598 (slot << 1) + 1, 0);
2599 E1000_WRITE_FLUSH(hw);
2600 } else {
2601 (void) e1000_rar_set(hw, (uint8_t *)mac_addr,
2602 LAST_RAR_ENTRY);
2607 * If we are using Wiseman rev 2.0 silicon, we will have previously
2608 * put the receive in reset, and disabled MWI, to work around some
2609 * HW errata. Now we should take the receiver out of reset, and
2610 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
2612 if ((hw->mac.type == e1000_82542) &&
2613 (hw->revision_id == E1000_REVISION_2)) {
2614 E1000_WRITE_REG(hw, E1000_RCTL, 0);
2615 msec_delay(1);
2616 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2617 e1000_pci_set_mwi(hw);
2618 e1000g_rx_setup(Adapter);
2621 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2622 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2623 return (EIO);
2626 return (0);
2629 static int
2630 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2632 struct e1000_hw *hw = &Adapter->shared;
2633 struct ether_addr *newtable;
2634 size_t new_len;
2635 size_t old_len;
2636 int res = 0;
2638 if ((multiaddr[0] & 01) == 0) {
2639 res = EINVAL;
2640 e1000g_log(Adapter, CE_WARN, "Illegal multicast address");
2641 goto done;
2644 if (Adapter->mcast_count >= Adapter->mcast_max_num) {
2645 res = ENOENT;
2646 e1000g_log(Adapter, CE_WARN,
2647 "Adapter requested more than %d mcast addresses",
2648 Adapter->mcast_max_num);
2649 goto done;
2653 if (Adapter->mcast_count == Adapter->mcast_alloc_count) {
2654 old_len = Adapter->mcast_alloc_count *
2655 sizeof (struct ether_addr);
2656 new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) *
2657 sizeof (struct ether_addr);
2659 newtable = kmem_alloc(new_len, KM_NOSLEEP);
2660 if (newtable == NULL) {
2661 res = ENOMEM;
2662 e1000g_log(Adapter, CE_WARN,
2663 "Not enough memory to alloc mcast table");
2664 goto done;
2667 if (Adapter->mcast_table != NULL) {
2668 bcopy(Adapter->mcast_table, newtable, old_len);
2669 kmem_free(Adapter->mcast_table, old_len);
2671 Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE;
2672 Adapter->mcast_table = newtable;
2675 bcopy(multiaddr,
2676 &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2677 Adapter->mcast_count++;
2680 * Update the MC table in the hardware
2682 e1000g_clear_interrupt(Adapter);
2684 e1000_update_mc_addr_list(hw,
2685 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2687 e1000g_mask_interrupt(Adapter);
2689 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2690 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2691 res = EIO;
2694 done:
2695 return (res);
2698 static int
2699 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2701 struct e1000_hw *hw = &Adapter->shared;
2702 struct ether_addr *newtable;
2703 size_t new_len;
2704 size_t old_len;
2705 unsigned i;
2707 for (i = 0; i < Adapter->mcast_count; i++) {
2708 if (bcmp(multiaddr, &Adapter->mcast_table[i],
2709 ETHERADDRL) == 0) {
2710 for (i++; i < Adapter->mcast_count; i++) {
2711 Adapter->mcast_table[i - 1] =
2712 Adapter->mcast_table[i];
2714 Adapter->mcast_count--;
2715 break;
2719 if ((Adapter->mcast_alloc_count - Adapter->mcast_count) >
2720 MCAST_ALLOC_SIZE) {
2721 old_len = Adapter->mcast_alloc_count *
2722 sizeof (struct ether_addr);
2723 new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) *
2724 sizeof (struct ether_addr);
2726 newtable = kmem_alloc(new_len, KM_NOSLEEP);
2727 if (newtable != NULL) {
2728 bcopy(Adapter->mcast_table, newtable, new_len);
2729 kmem_free(Adapter->mcast_table, old_len);
2731 Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE;
2732 Adapter->mcast_table = newtable;
2737 * Update the MC table in the hardware
2739 e1000g_clear_interrupt(Adapter);
2741 e1000_update_mc_addr_list(hw,
2742 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2744 e1000g_mask_interrupt(Adapter);
2746 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2747 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2748 return (EIO);
2751 return (0);
2754 static void
2755 e1000g_release_multicast(struct e1000g *Adapter)
2757 if (Adapter->mcast_table != NULL) {
2758 kmem_free(Adapter->mcast_table,
2759 Adapter->mcast_alloc_count * sizeof (struct ether_addr));
2760 Adapter->mcast_table = NULL;
2765 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2767 struct e1000g *Adapter = (struct e1000g *)arg;
2768 int result;
2770 rw_enter(&Adapter->chip_lock, RW_WRITER);
2772 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2773 result = ECANCELED;
2774 goto done;
2777 result = (add) ? multicst_add(Adapter, addr)
2778 : multicst_remove(Adapter, addr);
2780 done:
2781 rw_exit(&Adapter->chip_lock);
2782 return (result);
2787 e1000g_m_promisc(void *arg, boolean_t on)
2789 struct e1000g *Adapter = (struct e1000g *)arg;
2790 uint32_t rctl;
2792 rw_enter(&Adapter->chip_lock, RW_WRITER);
2794 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2795 rw_exit(&Adapter->chip_lock);
2796 return (ECANCELED);
2799 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2801 if (on)
2802 rctl |=
2803 (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2804 else
2805 rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2807 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2809 Adapter->e1000g_promisc = on;
2811 rw_exit(&Adapter->chip_lock);
2813 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2814 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2815 return (EIO);
2818 return (0);
2822 * Entry points to enable and disable interrupts at the granularity of
2823 * a group.
2824 * Turns the poll_mode for the whole adapter on and off to enable or
2825 * override the ring level polling control over the hardware interrupts.
2827 static int
2828 e1000g_rx_group_intr_enable(mac_intr_handle_t arg)
2830 struct e1000g *adapter = (struct e1000g *)arg;
2831 e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2834 * Later interrupts at the granularity of the this ring will
2835 * invoke mac_rx() with NULL, indicating the need for another
2836 * software classification.
2837 * We have a single ring usable per adapter now, so we only need to
2838 * reset the rx handle for that one.
2839 * When more RX rings can be used, we should update each one of them.
2841 mutex_enter(&rx_ring->rx_lock);
2842 rx_ring->mrh = NULL;
2843 adapter->poll_mode = B_FALSE;
2844 mutex_exit(&rx_ring->rx_lock);
2845 return (0);
2848 static int
2849 e1000g_rx_group_intr_disable(mac_intr_handle_t arg)
2851 struct e1000g *adapter = (struct e1000g *)arg;
2852 e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2854 mutex_enter(&rx_ring->rx_lock);
2857 * Later interrupts at the granularity of the this ring will
2858 * invoke mac_rx() with the handle for this ring;
2860 adapter->poll_mode = B_TRUE;
2861 rx_ring->mrh = rx_ring->mrh_init;
2862 mutex_exit(&rx_ring->rx_lock);
2863 return (0);
2867 * Entry points to enable and disable interrupts at the granularity of
2868 * a ring.
2869 * adapter poll_mode controls whether we actually proceed with hardware
2870 * interrupt toggling.
2872 static int
2873 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)
2875 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh;
2876 struct e1000g *adapter = rx_ring->adapter;
2877 struct e1000_hw *hw = &adapter->shared;
2878 uint32_t intr_mask;
2880 rw_enter(&adapter->chip_lock, RW_READER);
2882 if (adapter->e1000g_state & E1000G_SUSPENDED) {
2883 rw_exit(&adapter->chip_lock);
2884 return (0);
2887 mutex_enter(&rx_ring->rx_lock);
2888 rx_ring->poll_flag = 0;
2889 mutex_exit(&rx_ring->rx_lock);
2891 /* Rx interrupt enabling for MSI and legacy */
2892 intr_mask = E1000_READ_REG(hw, E1000_IMS);
2893 intr_mask |= E1000_IMS_RXT0;
2894 E1000_WRITE_REG(hw, E1000_IMS, intr_mask);
2895 E1000_WRITE_FLUSH(hw);
2897 /* Trigger a Rx interrupt to check Rx ring */
2898 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2899 E1000_WRITE_FLUSH(hw);
2901 rw_exit(&adapter->chip_lock);
2902 return (0);
2905 static int
2906 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)
2908 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh;
2909 struct e1000g *adapter = rx_ring->adapter;
2910 struct e1000_hw *hw = &adapter->shared;
2912 rw_enter(&adapter->chip_lock, RW_READER);
2914 if (adapter->e1000g_state & E1000G_SUSPENDED) {
2915 rw_exit(&adapter->chip_lock);
2916 return (0);
2918 mutex_enter(&rx_ring->rx_lock);
2919 rx_ring->poll_flag = 1;
2920 mutex_exit(&rx_ring->rx_lock);
2922 /* Rx interrupt disabling for MSI and legacy */
2923 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
2924 E1000_WRITE_FLUSH(hw);
2926 rw_exit(&adapter->chip_lock);
2927 return (0);
2931 * e1000g_unicst_find - Find the slot for the specified unicast address
2933 static int
2934 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr)
2936 int slot;
2938 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2939 if ((Adapter->unicst_addr[slot].mac.set == 1) &&
2940 (bcmp(Adapter->unicst_addr[slot].mac.addr,
2941 mac_addr, ETHERADDRL) == 0))
2942 return (slot);
2945 return (-1);
2949 * Entry points to add and remove a MAC address to a ring group.
2950 * The caller takes care of adding and removing the MAC addresses
2951 * to the filter via these two routines.
2954 static int
2955 e1000g_addmac(void *arg, const uint8_t *mac_addr)
2957 struct e1000g *Adapter = (struct e1000g *)arg;
2958 int slot, err;
2960 rw_enter(&Adapter->chip_lock, RW_WRITER);
2962 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2963 rw_exit(&Adapter->chip_lock);
2964 return (ECANCELED);
2967 if (e1000g_unicst_find(Adapter, mac_addr) != -1) {
2968 /* The same address is already in slot */
2969 rw_exit(&Adapter->chip_lock);
2970 return (0);
2973 if (Adapter->unicst_avail == 0) {
2974 /* no slots available */
2975 rw_exit(&Adapter->chip_lock);
2976 return (ENOSPC);
2979 /* Search for a free slot */
2980 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2981 if (Adapter->unicst_addr[slot].mac.set == 0)
2982 break;
2984 ASSERT(slot < Adapter->unicst_total);
2986 err = e1000g_unicst_set(Adapter, mac_addr, slot);
2987 if (err == 0)
2988 Adapter->unicst_avail--;
2990 rw_exit(&Adapter->chip_lock);
2992 return (err);
2995 static int
2996 e1000g_remmac(void *arg, const uint8_t *mac_addr)
2998 struct e1000g *Adapter = (struct e1000g *)arg;
2999 int slot, err;
3001 rw_enter(&Adapter->chip_lock, RW_WRITER);
3003 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3004 rw_exit(&Adapter->chip_lock);
3005 return (ECANCELED);
3008 slot = e1000g_unicst_find(Adapter, mac_addr);
3009 if (slot == -1) {
3010 rw_exit(&Adapter->chip_lock);
3011 return (EINVAL);
3014 ASSERT(Adapter->unicst_addr[slot].mac.set);
3016 /* Clear this slot */
3017 err = e1000g_unicst_set(Adapter, NULL, slot);
3018 if (err == 0)
3019 Adapter->unicst_avail++;
3021 rw_exit(&Adapter->chip_lock);
3023 return (err);
3026 static int
3027 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
3029 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh;
3031 mutex_enter(&rx_ring->rx_lock);
3032 rx_ring->ring_gen_num = mr_gen_num;
3033 mutex_exit(&rx_ring->rx_lock);
3034 return (0);
3038 * Callback funtion for MAC layer to register all rings.
3040 * The hardware supports a single group with currently only one ring
3041 * available.
3042 * Though not offering virtualization ability per se, exposing the
3043 * group/ring still enables the polling and interrupt toggling.
3045 /* ARGSUSED */
3046 void
3047 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index,
3048 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
3050 struct e1000g *Adapter = (struct e1000g *)arg;
3051 e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
3052 mac_intr_t *mintr;
3055 * We advertised only RX group/rings, so the MAC framework shouldn't
3056 * ask for any thing else.
3058 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0);
3060 rx_ring->mrh = rx_ring->mrh_init = rh;
3061 infop->mri_driver = (mac_ring_driver_t)rx_ring;
3062 infop->mri_start = e1000g_ring_start;
3063 infop->mri_stop = NULL;
3064 infop->mri_poll = e1000g_poll_ring;
3065 infop->mri_stat = e1000g_rx_ring_stat;
3067 /* Ring level interrupts */
3068 mintr = &infop->mri_intr;
3069 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
3070 mintr->mi_enable = e1000g_rx_ring_intr_enable;
3071 mintr->mi_disable = e1000g_rx_ring_intr_disable;
3072 if (Adapter->msi_enable)
3073 mintr->mi_ddi_handle = Adapter->htable[0];
3076 /* ARGSUSED */
3077 static void
3078 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index,
3079 mac_group_info_t *infop, mac_group_handle_t gh)
3081 struct e1000g *Adapter = (struct e1000g *)arg;
3082 mac_intr_t *mintr;
3085 * We advertised a single RX ring. Getting a request for anything else
3086 * signifies a bug in the MAC framework.
3088 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0);
3090 Adapter->rx_group = gh;
3092 infop->mgi_driver = (mac_group_driver_t)Adapter;
3093 infop->mgi_start = NULL;
3094 infop->mgi_stop = NULL;
3095 infop->mgi_addmac = e1000g_addmac;
3096 infop->mgi_remmac = e1000g_remmac;
3097 infop->mgi_count = 1;
3099 /* Group level interrupts */
3100 mintr = &infop->mgi_intr;
3101 mintr->mi_handle = (mac_intr_handle_t)Adapter;
3102 mintr->mi_enable = e1000g_rx_group_intr_enable;
3103 mintr->mi_disable = e1000g_rx_group_intr_disable;
3106 static void
3107 e1000g_led_blink(void *arg)
3109 e1000g_t *e1000g = arg;
3111 mutex_enter(&e1000g->e1000g_led_lock);
3112 VERIFY(e1000g->e1000g_emul_blink);
3113 if (e1000g->e1000g_emul_state) {
3114 (void) e1000_led_on(&e1000g->shared);
3115 } else {
3116 (void) e1000_led_off(&e1000g->shared);
3118 e1000g->e1000g_emul_state = !e1000g->e1000g_emul_state;
3119 mutex_exit(&e1000g->e1000g_led_lock);
3122 static int
3123 e1000g_led_set(void *arg, mac_led_mode_t mode, uint_t flags)
3125 e1000g_t *e1000g = arg;
3127 if (flags != 0)
3128 return (EINVAL);
3130 if (mode != MAC_LED_DEFAULT &&
3131 mode != MAC_LED_IDENT &&
3132 mode != MAC_LED_OFF &&
3133 mode != MAC_LED_ON)
3134 return (ENOTSUP);
3136 mutex_enter(&e1000g->e1000g_led_lock);
3138 if ((mode == MAC_LED_IDENT || mode == MAC_LED_OFF ||
3139 mode == MAC_LED_ON) &&
3140 !e1000g->e1000g_led_setup) {
3141 if (e1000_setup_led(&e1000g->shared) != E1000_SUCCESS) {
3142 mutex_exit(&e1000g->e1000g_led_lock);
3143 return (EIO);
3146 e1000g->e1000g_led_setup = B_TRUE;
3149 if (mode != MAC_LED_IDENT && e1000g->e1000g_blink != NULL) {
3150 ddi_periodic_t id = e1000g->e1000g_blink;
3151 e1000g->e1000g_blink = NULL;
3152 mutex_exit(&e1000g->e1000g_led_lock);
3153 ddi_periodic_delete(id);
3154 mutex_enter(&e1000g->e1000g_led_lock);
3157 switch (mode) {
3158 case MAC_LED_DEFAULT:
3159 if (e1000g->e1000g_led_setup) {
3160 if (e1000_cleanup_led(&e1000g->shared) !=
3161 E1000_SUCCESS) {
3162 mutex_exit(&e1000g->e1000g_led_lock);
3163 return (EIO);
3165 e1000g->e1000g_led_setup = B_FALSE;
3167 break;
3168 case MAC_LED_IDENT:
3169 if (e1000g->e1000g_emul_blink) {
3170 if (e1000g->e1000g_blink != NULL)
3171 break;
3174 * Note, we use a 200 ms period here as that's what
3175 * section 10.1.3 8254x Intel Manual (PCI/PCI-X Family
3176 * of Gigabit Ethernet Controllers Software Developer's
3177 * Manual) indicates that the optional blink hardware
3178 * operates at.
3180 e1000g->e1000g_blink =
3181 ddi_periodic_add(e1000g_led_blink, e1000g,
3182 200ULL * (NANOSEC / MILLISEC), DDI_IPL_0);
3183 } else if (e1000_blink_led(&e1000g->shared) != E1000_SUCCESS) {
3184 mutex_exit(&e1000g->e1000g_led_lock);
3185 return (EIO);
3187 break;
3188 case MAC_LED_OFF:
3189 if (e1000_led_off(&e1000g->shared) != E1000_SUCCESS) {
3190 mutex_exit(&e1000g->e1000g_led_lock);
3191 return (EIO);
3193 break;
3194 case MAC_LED_ON:
3195 if (e1000_led_on(&e1000g->shared) != E1000_SUCCESS) {
3196 mutex_exit(&e1000g->e1000g_led_lock);
3197 return (EIO);
3199 break;
3200 default:
3201 mutex_exit(&e1000g->e1000g_led_lock);
3202 return (ENOTSUP);
3205 mutex_exit(&e1000g->e1000g_led_lock);
3206 return (0);
3210 static boolean_t
3211 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3213 struct e1000g *Adapter = (struct e1000g *)arg;
3215 switch (cap) {
3216 case MAC_CAPAB_HCKSUM: {
3217 uint32_t *txflags = cap_data;
3219 if (Adapter->tx_hcksum_enable)
3220 *txflags = HCKSUM_IPHDRCKSUM |
3221 HCKSUM_INET_PARTIAL;
3222 else
3223 return (B_FALSE);
3224 break;
3227 case MAC_CAPAB_LSO: {
3228 mac_capab_lso_t *cap_lso = cap_data;
3230 if (Adapter->lso_enable) {
3231 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
3232 cap_lso->lso_basic_tcp_ipv4.lso_max =
3233 E1000_LSO_MAXLEN;
3234 } else
3235 return (B_FALSE);
3236 break;
3238 case MAC_CAPAB_RINGS: {
3239 mac_capab_rings_t *cap_rings = cap_data;
3241 /* No TX rings exposed yet */
3242 if (cap_rings->mr_type != MAC_RING_TYPE_RX)
3243 return (B_FALSE);
3245 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3246 cap_rings->mr_rnum = 1;
3247 cap_rings->mr_gnum = 1;
3248 cap_rings->mr_rget = e1000g_fill_ring;
3249 cap_rings->mr_gget = e1000g_fill_group;
3250 break;
3252 case MAC_CAPAB_LED: {
3253 mac_capab_led_t *cap_led = cap_data;
3255 cap_led->mcl_flags = 0;
3256 cap_led->mcl_modes = MAC_LED_DEFAULT;
3257 if (Adapter->shared.mac.ops.blink_led != NULL &&
3258 Adapter->shared.mac.ops.blink_led !=
3259 e1000_null_ops_generic) {
3260 cap_led->mcl_modes |= MAC_LED_IDENT;
3263 if (Adapter->shared.mac.ops.led_off != NULL &&
3264 Adapter->shared.mac.ops.led_off !=
3265 e1000_null_ops_generic) {
3266 cap_led->mcl_modes |= MAC_LED_OFF;
3269 if (Adapter->shared.mac.ops.led_on != NULL &&
3270 Adapter->shared.mac.ops.led_on !=
3271 e1000_null_ops_generic) {
3272 cap_led->mcl_modes |= MAC_LED_ON;
3276 * Some hardware doesn't support blinking natively as they're
3277 * missing the optional blink circuit. If they have both off and
3278 * on then we'll emulate it ourselves.
3280 if (((cap_led->mcl_modes & MAC_LED_IDENT) == 0) &&
3281 ((cap_led->mcl_modes & MAC_LED_OFF) != 0) &&
3282 ((cap_led->mcl_modes & MAC_LED_ON) != 0)) {
3283 cap_led->mcl_modes |= MAC_LED_IDENT;
3284 Adapter->e1000g_emul_blink = B_TRUE;
3287 cap_led->mcl_set = e1000g_led_set;
3288 break;
3290 default:
3291 return (B_FALSE);
3293 return (B_TRUE);
3296 static boolean_t
3297 e1000g_param_locked(mac_prop_id_t pr_num)
3300 * All en_* parameters are locked (read-only) while
3301 * the device is in any sort of loopback mode ...
3303 switch (pr_num) {
3304 case MAC_PROP_EN_1000FDX_CAP:
3305 case MAC_PROP_EN_1000HDX_CAP:
3306 case MAC_PROP_EN_100FDX_CAP:
3307 case MAC_PROP_EN_100HDX_CAP:
3308 case MAC_PROP_EN_10FDX_CAP:
3309 case MAC_PROP_EN_10HDX_CAP:
3310 case MAC_PROP_AUTONEG:
3311 case MAC_PROP_FLOWCTRL:
3312 return (B_TRUE);
3314 return (B_FALSE);
3318 * callback function for set/get of properties
3320 static int
3321 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3322 uint_t pr_valsize, const void *pr_val)
3324 struct e1000g *Adapter = arg;
3325 struct e1000_hw *hw = &Adapter->shared;
3326 struct e1000_fc_info *fc = &Adapter->shared.fc;
3327 int err = 0;
3328 link_flowctrl_t flowctrl;
3329 uint32_t cur_mtu, new_mtu;
3331 rw_enter(&Adapter->chip_lock, RW_WRITER);
3333 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3334 rw_exit(&Adapter->chip_lock);
3335 return (ECANCELED);
3338 if (Adapter->loopback_mode != E1000G_LB_NONE &&
3339 e1000g_param_locked(pr_num)) {
3341 * All en_* parameters are locked (read-only)
3342 * while the device is in any sort of loopback mode.
3344 rw_exit(&Adapter->chip_lock);
3345 return (EBUSY);
3348 switch (pr_num) {
3349 case MAC_PROP_EN_1000FDX_CAP:
3350 if (hw->phy.media_type != e1000_media_type_copper) {
3351 err = ENOTSUP;
3352 break;
3354 Adapter->param_en_1000fdx = *(uint8_t *)pr_val;
3355 Adapter->param_adv_1000fdx = *(uint8_t *)pr_val;
3356 goto reset;
3357 case MAC_PROP_EN_100FDX_CAP:
3358 if (hw->phy.media_type != e1000_media_type_copper) {
3359 err = ENOTSUP;
3360 break;
3362 Adapter->param_en_100fdx = *(uint8_t *)pr_val;
3363 Adapter->param_adv_100fdx = *(uint8_t *)pr_val;
3364 goto reset;
3365 case MAC_PROP_EN_100HDX_CAP:
3366 if (hw->phy.media_type != e1000_media_type_copper) {
3367 err = ENOTSUP;
3368 break;
3370 Adapter->param_en_100hdx = *(uint8_t *)pr_val;
3371 Adapter->param_adv_100hdx = *(uint8_t *)pr_val;
3372 goto reset;
3373 case MAC_PROP_EN_10FDX_CAP:
3374 if (hw->phy.media_type != e1000_media_type_copper) {
3375 err = ENOTSUP;
3376 break;
3378 Adapter->param_en_10fdx = *(uint8_t *)pr_val;
3379 Adapter->param_adv_10fdx = *(uint8_t *)pr_val;
3380 goto reset;
3381 case MAC_PROP_EN_10HDX_CAP:
3382 if (hw->phy.media_type != e1000_media_type_copper) {
3383 err = ENOTSUP;
3384 break;
3386 Adapter->param_en_10hdx = *(uint8_t *)pr_val;
3387 Adapter->param_adv_10hdx = *(uint8_t *)pr_val;
3388 goto reset;
3389 case MAC_PROP_AUTONEG:
3390 if (hw->phy.media_type != e1000_media_type_copper) {
3391 err = ENOTSUP;
3392 break;
3394 Adapter->param_adv_autoneg = *(uint8_t *)pr_val;
3395 goto reset;
3396 case MAC_PROP_FLOWCTRL:
3397 fc->send_xon = B_TRUE;
3398 bcopy(pr_val, &flowctrl, sizeof (flowctrl));
3400 switch (flowctrl) {
3401 default:
3402 err = EINVAL;
3403 break;
3404 case LINK_FLOWCTRL_NONE:
3405 fc->requested_mode = e1000_fc_none;
3406 break;
3407 case LINK_FLOWCTRL_RX:
3408 fc->requested_mode = e1000_fc_rx_pause;
3409 break;
3410 case LINK_FLOWCTRL_TX:
3411 fc->requested_mode = e1000_fc_tx_pause;
3412 break;
3413 case LINK_FLOWCTRL_BI:
3414 fc->requested_mode = e1000_fc_full;
3415 break;
3417 reset:
3418 if (err == 0) {
3419 /* check PCH limits & reset the link */
3420 e1000g_pch_limits(Adapter);
3421 if (e1000g_reset_link(Adapter) != DDI_SUCCESS)
3422 err = EINVAL;
3424 break;
3425 case MAC_PROP_ADV_1000FDX_CAP:
3426 case MAC_PROP_ADV_1000HDX_CAP:
3427 case MAC_PROP_ADV_100FDX_CAP:
3428 case MAC_PROP_ADV_100HDX_CAP:
3429 case MAC_PROP_ADV_10FDX_CAP:
3430 case MAC_PROP_ADV_10HDX_CAP:
3431 case MAC_PROP_EN_1000HDX_CAP:
3432 case MAC_PROP_STATUS:
3433 case MAC_PROP_SPEED:
3434 case MAC_PROP_DUPLEX:
3435 err = ENOTSUP; /* read-only prop. Can't set this. */
3436 break;
3437 case MAC_PROP_MTU:
3438 /* adapter must be stopped for an MTU change */
3439 if (Adapter->e1000g_state & E1000G_STARTED) {
3440 err = EBUSY;
3441 break;
3444 cur_mtu = Adapter->default_mtu;
3446 /* get new requested MTU */
3447 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3448 if (new_mtu == cur_mtu) {
3449 err = 0;
3450 break;
3453 if ((new_mtu < DEFAULT_MTU) ||
3454 (new_mtu > Adapter->max_mtu)) {
3455 err = EINVAL;
3456 break;
3459 /* inform MAC framework of new MTU */
3460 err = mac_maxsdu_update(Adapter->mh, new_mtu);
3462 if (err == 0) {
3463 Adapter->default_mtu = new_mtu;
3464 Adapter->max_frame_size =
3465 e1000g_mtu2maxframe(new_mtu);
3468 * check PCH limits & set buffer sizes to
3469 * match new MTU
3471 e1000g_pch_limits(Adapter);
3472 e1000g_set_bufsize(Adapter);
3475 * decrease the number of descriptors and free
3476 * packets for jumbo frames to reduce tx/rx
3477 * resource consumption
3479 if (Adapter->max_frame_size >=
3480 (FRAME_SIZE_UPTO_4K)) {
3481 if (Adapter->tx_desc_num_flag == 0)
3482 Adapter->tx_desc_num =
3483 DEFAULT_JUMBO_NUM_TX_DESC;
3485 if (Adapter->rx_desc_num_flag == 0)
3486 Adapter->rx_desc_num =
3487 DEFAULT_JUMBO_NUM_RX_DESC;
3489 if (Adapter->tx_buf_num_flag == 0)
3490 Adapter->tx_freelist_num =
3491 DEFAULT_JUMBO_NUM_TX_BUF;
3493 if (Adapter->rx_buf_num_flag == 0)
3494 Adapter->rx_freelist_limit =
3495 DEFAULT_JUMBO_NUM_RX_BUF;
3496 } else {
3497 if (Adapter->tx_desc_num_flag == 0)
3498 Adapter->tx_desc_num =
3499 DEFAULT_NUM_TX_DESCRIPTOR;
3501 if (Adapter->rx_desc_num_flag == 0)
3502 Adapter->rx_desc_num =
3503 DEFAULT_NUM_RX_DESCRIPTOR;
3505 if (Adapter->tx_buf_num_flag == 0)
3506 Adapter->tx_freelist_num =
3507 DEFAULT_NUM_TX_FREELIST;
3509 if (Adapter->rx_buf_num_flag == 0)
3510 Adapter->rx_freelist_limit =
3511 DEFAULT_NUM_RX_FREELIST;
3514 break;
3515 case MAC_PROP_PRIVATE:
3516 err = e1000g_set_priv_prop(Adapter, pr_name,
3517 pr_valsize, pr_val);
3518 break;
3519 default:
3520 err = ENOTSUP;
3521 break;
3523 rw_exit(&Adapter->chip_lock);
3524 return (err);
3527 static int
3528 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3529 uint_t pr_valsize, void *pr_val)
3531 struct e1000g *Adapter = arg;
3532 struct e1000_fc_info *fc = &Adapter->shared.fc;
3533 int err = 0;
3534 link_flowctrl_t flowctrl;
3535 uint64_t tmp = 0;
3537 switch (pr_num) {
3538 case MAC_PROP_DUPLEX:
3539 ASSERT(pr_valsize >= sizeof (link_duplex_t));
3540 bcopy(&Adapter->link_duplex, pr_val,
3541 sizeof (link_duplex_t));
3542 break;
3543 case MAC_PROP_SPEED:
3544 ASSERT(pr_valsize >= sizeof (uint64_t));
3545 tmp = Adapter->link_speed * 1000000ull;
3546 bcopy(&tmp, pr_val, sizeof (tmp));
3547 break;
3548 case MAC_PROP_AUTONEG:
3549 *(uint8_t *)pr_val = Adapter->param_adv_autoneg;
3550 break;
3551 case MAC_PROP_FLOWCTRL:
3552 ASSERT(pr_valsize >= sizeof (link_flowctrl_t));
3553 switch (fc->current_mode) {
3554 case e1000_fc_none:
3555 flowctrl = LINK_FLOWCTRL_NONE;
3556 break;
3557 case e1000_fc_rx_pause:
3558 flowctrl = LINK_FLOWCTRL_RX;
3559 break;
3560 case e1000_fc_tx_pause:
3561 flowctrl = LINK_FLOWCTRL_TX;
3562 break;
3563 case e1000_fc_full:
3564 flowctrl = LINK_FLOWCTRL_BI;
3565 break;
3567 bcopy(&flowctrl, pr_val, sizeof (flowctrl));
3568 break;
3569 case MAC_PROP_ADV_1000FDX_CAP:
3570 *(uint8_t *)pr_val = Adapter->param_adv_1000fdx;
3571 break;
3572 case MAC_PROP_EN_1000FDX_CAP:
3573 *(uint8_t *)pr_val = Adapter->param_en_1000fdx;
3574 break;
3575 case MAC_PROP_ADV_1000HDX_CAP:
3576 *(uint8_t *)pr_val = Adapter->param_adv_1000hdx;
3577 break;
3578 case MAC_PROP_EN_1000HDX_CAP:
3579 *(uint8_t *)pr_val = Adapter->param_en_1000hdx;
3580 break;
3581 case MAC_PROP_ADV_100FDX_CAP:
3582 *(uint8_t *)pr_val = Adapter->param_adv_100fdx;
3583 break;
3584 case MAC_PROP_EN_100FDX_CAP:
3585 *(uint8_t *)pr_val = Adapter->param_en_100fdx;
3586 break;
3587 case MAC_PROP_ADV_100HDX_CAP:
3588 *(uint8_t *)pr_val = Adapter->param_adv_100hdx;
3589 break;
3590 case MAC_PROP_EN_100HDX_CAP:
3591 *(uint8_t *)pr_val = Adapter->param_en_100hdx;
3592 break;
3593 case MAC_PROP_ADV_10FDX_CAP:
3594 *(uint8_t *)pr_val = Adapter->param_adv_10fdx;
3595 break;
3596 case MAC_PROP_EN_10FDX_CAP:
3597 *(uint8_t *)pr_val = Adapter->param_en_10fdx;
3598 break;
3599 case MAC_PROP_ADV_10HDX_CAP:
3600 *(uint8_t *)pr_val = Adapter->param_adv_10hdx;
3601 break;
3602 case MAC_PROP_EN_10HDX_CAP:
3603 *(uint8_t *)pr_val = Adapter->param_en_10hdx;
3604 break;
3605 case MAC_PROP_ADV_100T4_CAP:
3606 case MAC_PROP_EN_100T4_CAP:
3607 *(uint8_t *)pr_val = Adapter->param_adv_100t4;
3608 break;
3609 case MAC_PROP_PRIVATE:
3610 err = e1000g_get_priv_prop(Adapter, pr_name,
3611 pr_valsize, pr_val);
3612 break;
3613 default:
3614 err = ENOTSUP;
3615 break;
3618 return (err);
3621 static void
3622 e1000g_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3623 mac_prop_info_handle_t prh)
3625 struct e1000g *Adapter = arg;
3626 struct e1000_hw *hw = &Adapter->shared;
3628 switch (pr_num) {
3629 case MAC_PROP_DUPLEX:
3630 case MAC_PROP_SPEED:
3631 case MAC_PROP_ADV_1000FDX_CAP:
3632 case MAC_PROP_ADV_1000HDX_CAP:
3633 case MAC_PROP_ADV_100FDX_CAP:
3634 case MAC_PROP_ADV_100HDX_CAP:
3635 case MAC_PROP_ADV_10FDX_CAP:
3636 case MAC_PROP_ADV_10HDX_CAP:
3637 case MAC_PROP_ADV_100T4_CAP:
3638 case MAC_PROP_EN_100T4_CAP:
3639 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3640 break;
3642 case MAC_PROP_EN_1000FDX_CAP:
3643 if (hw->phy.media_type != e1000_media_type_copper) {
3644 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3645 } else {
3646 mac_prop_info_set_default_uint8(prh,
3647 ((Adapter->phy_ext_status &
3648 IEEE_ESR_1000T_FD_CAPS) ||
3649 (Adapter->phy_ext_status &
3650 IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0);
3652 break;
3654 case MAC_PROP_EN_100FDX_CAP:
3655 if (hw->phy.media_type != e1000_media_type_copper) {
3656 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3657 } else {
3658 mac_prop_info_set_default_uint8(prh,
3659 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
3660 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
3661 ? 1 : 0);
3663 break;
3665 case MAC_PROP_EN_100HDX_CAP:
3666 if (hw->phy.media_type != e1000_media_type_copper) {
3667 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3668 } else {
3669 mac_prop_info_set_default_uint8(prh,
3670 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
3671 (Adapter->phy_status & MII_SR_100T2_HD_CAPS))
3672 ? 1 : 0);
3674 break;
3676 case MAC_PROP_EN_10FDX_CAP:
3677 if (hw->phy.media_type != e1000_media_type_copper) {
3678 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3679 } else {
3680 mac_prop_info_set_default_uint8(prh,
3681 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0);
3683 break;
3685 case MAC_PROP_EN_10HDX_CAP:
3686 if (hw->phy.media_type != e1000_media_type_copper) {
3687 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3688 } else {
3689 mac_prop_info_set_default_uint8(prh,
3690 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0);
3692 break;
3694 case MAC_PROP_EN_1000HDX_CAP:
3695 if (hw->phy.media_type != e1000_media_type_copper)
3696 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3697 break;
3699 case MAC_PROP_AUTONEG:
3700 if (hw->phy.media_type != e1000_media_type_copper) {
3701 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3702 } else {
3703 mac_prop_info_set_default_uint8(prh,
3704 (Adapter->phy_status & MII_SR_AUTONEG_CAPS)
3705 ? 1 : 0);
3707 break;
3709 case MAC_PROP_FLOWCTRL:
3710 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
3711 break;
3713 case MAC_PROP_MTU: {
3714 struct e1000_mac_info *mac = &Adapter->shared.mac;
3715 struct e1000_phy_info *phy = &Adapter->shared.phy;
3716 uint32_t max;
3718 /* some MAC types do not support jumbo frames */
3719 if ((mac->type == e1000_ich8lan) ||
3720 ((mac->type == e1000_ich9lan) && (phy->type ==
3721 e1000_phy_ife))) {
3722 max = DEFAULT_MTU;
3723 } else {
3724 max = Adapter->max_mtu;
3727 mac_prop_info_set_range_uint32(prh, DEFAULT_MTU, max);
3728 break;
3730 case MAC_PROP_PRIVATE: {
3731 char valstr[64];
3732 int value;
3734 if (strcmp(pr_name, "_adv_pause_cap") == 0 ||
3735 strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3736 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3737 return;
3738 } else if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3739 value = DEFAULT_TX_BCOPY_THRESHOLD;
3740 } else if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3741 value = DEFAULT_TX_INTR_ENABLE;
3742 } else if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3743 value = DEFAULT_TX_INTR_DELAY;
3744 } else if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3745 value = DEFAULT_TX_INTR_ABS_DELAY;
3746 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3747 value = DEFAULT_RX_BCOPY_THRESHOLD;
3748 } else if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3749 value = DEFAULT_RX_LIMIT_ON_INTR;
3750 } else if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3751 value = DEFAULT_RX_INTR_DELAY;
3752 } else if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3753 value = DEFAULT_RX_INTR_ABS_DELAY;
3754 } else if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3755 value = DEFAULT_INTR_THROTTLING;
3756 } else if (strcmp(pr_name, "_intr_adaptive") == 0) {
3757 value = 1;
3758 } else {
3759 return;
3762 (void) snprintf(valstr, sizeof (valstr), "%d", value);
3763 mac_prop_info_set_default_str(prh, valstr);
3764 break;
3769 /* ARGSUSED2 */
3770 static int
3771 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name,
3772 uint_t pr_valsize, const void *pr_val)
3774 int err = 0;
3775 long result;
3776 struct e1000_hw *hw = &Adapter->shared;
3778 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3779 if (pr_val == NULL) {
3780 err = EINVAL;
3781 return (err);
3783 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3784 if (result < MIN_TX_BCOPY_THRESHOLD ||
3785 result > MAX_TX_BCOPY_THRESHOLD)
3786 err = EINVAL;
3787 else {
3788 Adapter->tx_bcopy_thresh = (uint32_t)result;
3790 return (err);
3792 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3793 if (pr_val == NULL) {
3794 err = EINVAL;
3795 return (err);
3797 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3798 if (result < 0 || result > 1)
3799 err = EINVAL;
3800 else {
3801 Adapter->tx_intr_enable = (result == 1) ?
3802 B_TRUE: B_FALSE;
3803 if (Adapter->tx_intr_enable)
3804 e1000g_mask_tx_interrupt(Adapter);
3805 else
3806 e1000g_clear_tx_interrupt(Adapter);
3807 if (e1000g_check_acc_handle(
3808 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3809 ddi_fm_service_impact(Adapter->dip,
3810 DDI_SERVICE_DEGRADED);
3811 err = EIO;
3814 return (err);
3816 if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3817 if (pr_val == NULL) {
3818 err = EINVAL;
3819 return (err);
3821 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3822 if (result < MIN_TX_INTR_DELAY ||
3823 result > MAX_TX_INTR_DELAY)
3824 err = EINVAL;
3825 else {
3826 Adapter->tx_intr_delay = (uint32_t)result;
3827 E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay);
3828 if (e1000g_check_acc_handle(
3829 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3830 ddi_fm_service_impact(Adapter->dip,
3831 DDI_SERVICE_DEGRADED);
3832 err = EIO;
3835 return (err);
3837 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3838 if (pr_val == NULL) {
3839 err = EINVAL;
3840 return (err);
3842 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3843 if (result < MIN_TX_INTR_ABS_DELAY ||
3844 result > MAX_TX_INTR_ABS_DELAY)
3845 err = EINVAL;
3846 else {
3847 Adapter->tx_intr_abs_delay = (uint32_t)result;
3848 E1000_WRITE_REG(hw, E1000_TADV,
3849 Adapter->tx_intr_abs_delay);
3850 if (e1000g_check_acc_handle(
3851 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3852 ddi_fm_service_impact(Adapter->dip,
3853 DDI_SERVICE_DEGRADED);
3854 err = EIO;
3857 return (err);
3859 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3860 if (pr_val == NULL) {
3861 err = EINVAL;
3862 return (err);
3864 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3865 if (result < MIN_RX_BCOPY_THRESHOLD ||
3866 result > MAX_RX_BCOPY_THRESHOLD)
3867 err = EINVAL;
3868 else
3869 Adapter->rx_bcopy_thresh = (uint32_t)result;
3870 return (err);
3872 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3873 if (pr_val == NULL) {
3874 err = EINVAL;
3875 return (err);
3877 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3878 if (result < MIN_RX_LIMIT_ON_INTR ||
3879 result > MAX_RX_LIMIT_ON_INTR)
3880 err = EINVAL;
3881 else
3882 Adapter->rx_limit_onintr = (uint32_t)result;
3883 return (err);
3885 if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3886 if (pr_val == NULL) {
3887 err = EINVAL;
3888 return (err);
3890 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3891 if (result < MIN_RX_INTR_DELAY ||
3892 result > MAX_RX_INTR_DELAY)
3893 err = EINVAL;
3894 else {
3895 Adapter->rx_intr_delay = (uint32_t)result;
3896 E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay);
3897 if (e1000g_check_acc_handle(
3898 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3899 ddi_fm_service_impact(Adapter->dip,
3900 DDI_SERVICE_DEGRADED);
3901 err = EIO;
3904 return (err);
3906 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3907 if (pr_val == NULL) {
3908 err = EINVAL;
3909 return (err);
3911 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3912 if (result < MIN_RX_INTR_ABS_DELAY ||
3913 result > MAX_RX_INTR_ABS_DELAY)
3914 err = EINVAL;
3915 else {
3916 Adapter->rx_intr_abs_delay = (uint32_t)result;
3917 E1000_WRITE_REG(hw, E1000_RADV,
3918 Adapter->rx_intr_abs_delay);
3919 if (e1000g_check_acc_handle(
3920 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3921 ddi_fm_service_impact(Adapter->dip,
3922 DDI_SERVICE_DEGRADED);
3923 err = EIO;
3926 return (err);
3928 if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3929 if (pr_val == NULL) {
3930 err = EINVAL;
3931 return (err);
3933 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3934 if (result < MIN_INTR_THROTTLING ||
3935 result > MAX_INTR_THROTTLING)
3936 err = EINVAL;
3937 else {
3938 if (hw->mac.type >= e1000_82540) {
3939 Adapter->intr_throttling_rate =
3940 (uint32_t)result;
3941 E1000_WRITE_REG(hw, E1000_ITR,
3942 Adapter->intr_throttling_rate);
3943 if (e1000g_check_acc_handle(
3944 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3945 ddi_fm_service_impact(Adapter->dip,
3946 DDI_SERVICE_DEGRADED);
3947 err = EIO;
3949 } else
3950 err = EINVAL;
3952 return (err);
3954 if (strcmp(pr_name, "_intr_adaptive") == 0) {
3955 if (pr_val == NULL) {
3956 err = EINVAL;
3957 return (err);
3959 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3960 if (result < 0 || result > 1)
3961 err = EINVAL;
3962 else {
3963 if (hw->mac.type >= e1000_82540) {
3964 Adapter->intr_adaptive = (result == 1) ?
3965 B_TRUE : B_FALSE;
3966 } else {
3967 err = EINVAL;
3970 return (err);
3972 return (ENOTSUP);
3975 static int
3976 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name,
3977 uint_t pr_valsize, void *pr_val)
3979 int err = ENOTSUP;
3980 int value;
3982 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
3983 value = Adapter->param_adv_pause;
3984 err = 0;
3985 goto done;
3987 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3988 value = Adapter->param_adv_asym_pause;
3989 err = 0;
3990 goto done;
3992 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3993 value = Adapter->tx_bcopy_thresh;
3994 err = 0;
3995 goto done;
3997 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3998 value = Adapter->tx_intr_enable;
3999 err = 0;
4000 goto done;
4002 if (strcmp(pr_name, "_tx_intr_delay") == 0) {
4003 value = Adapter->tx_intr_delay;
4004 err = 0;
4005 goto done;
4007 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
4008 value = Adapter->tx_intr_abs_delay;
4009 err = 0;
4010 goto done;
4012 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
4013 value = Adapter->rx_bcopy_thresh;
4014 err = 0;
4015 goto done;
4017 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
4018 value = Adapter->rx_limit_onintr;
4019 err = 0;
4020 goto done;
4022 if (strcmp(pr_name, "_rx_intr_delay") == 0) {
4023 value = Adapter->rx_intr_delay;
4024 err = 0;
4025 goto done;
4027 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
4028 value = Adapter->rx_intr_abs_delay;
4029 err = 0;
4030 goto done;
4032 if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
4033 value = Adapter->intr_throttling_rate;
4034 err = 0;
4035 goto done;
4037 if (strcmp(pr_name, "_intr_adaptive") == 0) {
4038 value = Adapter->intr_adaptive;
4039 err = 0;
4040 goto done;
4042 done:
4043 if (err == 0) {
4044 (void) snprintf(pr_val, pr_valsize, "%d", value);
4046 return (err);
4050 * e1000g_get_conf - get configurations set in e1000g.conf
4051 * This routine gets user-configured values out of the configuration
4052 * file e1000g.conf.
4054 * For each configurable value, there is a minimum, a maximum, and a
4055 * default.
4056 * If user does not configure a value, use the default.
4057 * If user configures below the minimum, use the minumum.
4058 * If user configures above the maximum, use the maxumum.
4060 static void
4061 e1000g_get_conf(struct e1000g *Adapter)
4063 struct e1000_hw *hw = &Adapter->shared;
4064 boolean_t tbi_compatibility = B_FALSE;
4065 boolean_t is_jumbo = B_FALSE;
4066 int propval;
4068 * decrease the number of descriptors and free packets
4069 * for jumbo frames to reduce tx/rx resource consumption
4071 if (Adapter->max_frame_size >= FRAME_SIZE_UPTO_4K) {
4072 is_jumbo = B_TRUE;
4076 * get each configurable property from e1000g.conf
4080 * NumTxDescriptors
4082 Adapter->tx_desc_num_flag =
4083 e1000g_get_prop(Adapter, "NumTxDescriptors",
4084 MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
4085 is_jumbo ? DEFAULT_JUMBO_NUM_TX_DESC
4086 : DEFAULT_NUM_TX_DESCRIPTOR, &propval);
4087 Adapter->tx_desc_num = propval;
4090 * NumRxDescriptors
4092 Adapter->rx_desc_num_flag =
4093 e1000g_get_prop(Adapter, "NumRxDescriptors",
4094 MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
4095 is_jumbo ? DEFAULT_JUMBO_NUM_RX_DESC
4096 : DEFAULT_NUM_RX_DESCRIPTOR, &propval);
4097 Adapter->rx_desc_num = propval;
4100 * NumRxFreeList
4102 Adapter->rx_buf_num_flag =
4103 e1000g_get_prop(Adapter, "NumRxFreeList",
4104 MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
4105 is_jumbo ? DEFAULT_JUMBO_NUM_RX_BUF
4106 : DEFAULT_NUM_RX_FREELIST, &propval);
4107 Adapter->rx_freelist_limit = propval;
4110 * NumTxPacketList
4112 Adapter->tx_buf_num_flag =
4113 e1000g_get_prop(Adapter, "NumTxPacketList",
4114 MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
4115 is_jumbo ? DEFAULT_JUMBO_NUM_TX_BUF
4116 : DEFAULT_NUM_TX_FREELIST, &propval);
4117 Adapter->tx_freelist_num = propval;
4120 * FlowControl
4122 hw->fc.send_xon = B_TRUE;
4123 (void) e1000g_get_prop(Adapter, "FlowControl",
4124 e1000_fc_none, 4, DEFAULT_FLOW_CONTROL, &propval);
4125 hw->fc.requested_mode = propval;
4126 /* 4 is the setting that says "let the eeprom decide" */
4127 if (hw->fc.requested_mode == 4)
4128 hw->fc.requested_mode = e1000_fc_default;
4131 * Max Num Receive Packets on Interrupt
4133 (void) e1000g_get_prop(Adapter, "MaxNumReceivePackets",
4134 MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
4135 DEFAULT_RX_LIMIT_ON_INTR, &propval);
4136 Adapter->rx_limit_onintr = propval;
4139 * PHY master slave setting
4141 (void) e1000g_get_prop(Adapter, "SetMasterSlave",
4142 e1000_ms_hw_default, e1000_ms_auto,
4143 e1000_ms_hw_default, &propval);
4144 hw->phy.ms_type = propval;
4147 * Parameter which controls TBI mode workaround, which is only
4148 * needed on certain switches such as Cisco 6500/Foundry
4150 (void) e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
4151 0, 1, DEFAULT_TBI_COMPAT_ENABLE, &propval);
4152 tbi_compatibility = (propval == 1);
4153 e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
4156 * MSI Enable
4158 (void) e1000g_get_prop(Adapter, "MSIEnable",
4159 0, 1, DEFAULT_MSI_ENABLE, &propval);
4160 Adapter->msi_enable = (propval == 1);
4163 * Interrupt Throttling Rate
4165 (void) e1000g_get_prop(Adapter, "intr_throttling_rate",
4166 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
4167 DEFAULT_INTR_THROTTLING, &propval);
4168 Adapter->intr_throttling_rate = propval;
4171 * Adaptive Interrupt Blanking Enable/Disable
4172 * It is enabled by default
4174 (void) e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1,
4175 &propval);
4176 Adapter->intr_adaptive = (propval == 1);
4179 * Hardware checksum enable/disable parameter
4181 (void) e1000g_get_prop(Adapter, "tx_hcksum_enable",
4182 0, 1, DEFAULT_TX_HCKSUM_ENABLE, &propval);
4183 Adapter->tx_hcksum_enable = (propval == 1);
4185 * Checksum on/off selection via global parameters.
4187 * If the chip is flagged as not capable of (correctly)
4188 * handling checksumming, we don't enable it on either
4189 * Rx or Tx side. Otherwise, we take this chip's settings
4190 * from the patchable global defaults.
4192 * We advertise our capabilities only if TX offload is
4193 * enabled. On receive, the stack will accept checksummed
4194 * packets anyway, even if we haven't said we can deliver
4195 * them.
4197 switch (hw->mac.type) {
4198 case e1000_82540:
4199 case e1000_82544:
4200 case e1000_82545:
4201 case e1000_82545_rev_3:
4202 case e1000_82546:
4203 case e1000_82546_rev_3:
4204 case e1000_82571:
4205 case e1000_82572:
4206 case e1000_82573:
4207 case e1000_80003es2lan:
4208 break;
4210 * For the following Intel PRO/1000 chipsets, we have not
4211 * tested the hardware checksum offload capability, so we
4212 * disable the capability for them.
4213 * e1000_82542,
4214 * e1000_82543,
4215 * e1000_82541,
4216 * e1000_82541_rev_2,
4217 * e1000_82547,
4218 * e1000_82547_rev_2,
4220 default:
4221 Adapter->tx_hcksum_enable = B_FALSE;
4225 * Large Send Offloading(LSO) Enable/Disable
4226 * If the tx hardware checksum is not enabled, LSO should be
4227 * disabled.
4229 (void) e1000g_get_prop(Adapter, "lso_enable",
4230 0, 1, DEFAULT_LSO_ENABLE, &propval);
4231 Adapter->lso_enable = (propval == 1);
4233 switch (hw->mac.type) {
4234 case e1000_82546:
4235 case e1000_82546_rev_3:
4236 if (Adapter->lso_enable)
4237 Adapter->lso_premature_issue = B_TRUE;
4238 /* FALLTHRU */
4239 case e1000_82571:
4240 case e1000_82572:
4241 case e1000_82573:
4242 case e1000_80003es2lan:
4243 break;
4244 default:
4245 Adapter->lso_enable = B_FALSE;
4248 if (!Adapter->tx_hcksum_enable) {
4249 Adapter->lso_premature_issue = B_FALSE;
4250 Adapter->lso_enable = B_FALSE;
4254 * If mem_workaround_82546 is enabled, the rx buffer allocated by
4255 * e1000_82545, e1000_82546 and e1000_82546_rev_3
4256 * will not cross 64k boundary.
4258 (void) e1000g_get_prop(Adapter, "mem_workaround_82546",
4259 0, 1, DEFAULT_MEM_WORKAROUND_82546, &propval);
4260 Adapter->mem_workaround_82546 = (propval == 1);
4263 * Max number of multicast addresses
4265 (void) e1000g_get_prop(Adapter, "mcast_max_num",
4266 MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32,
4267 &propval);
4268 Adapter->mcast_max_num = propval;
4272 * e1000g_get_prop - routine to read properties
4274 * Get a user-configure property value out of the configuration
4275 * file e1000g.conf.
4277 * Caller provides name of the property, a default value, a minimum
4278 * value, a maximum value and a pointer to the returned property
4279 * value.
4281 * Return B_TRUE if the configured value of the property is not a default
4282 * value, otherwise return B_FALSE.
4284 static boolean_t
4285 e1000g_get_prop(struct e1000g *Adapter, /* point to per-adapter structure */
4286 char *propname, /* name of the property */
4287 int minval, /* minimum acceptable value */
4288 int maxval, /* maximim acceptable value */
4289 int defval, /* default value */
4290 int *propvalue) /* property value return to caller */
4292 int propval; /* value returned for requested property */
4293 int *props; /* point to array of properties returned */
4294 uint_t nprops; /* number of property value returned */
4295 boolean_t ret = B_TRUE;
4298 * get the array of properties from the config file
4300 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
4301 DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
4302 /* got some properties, test if we got enough */
4303 if (Adapter->instance < nprops) {
4304 propval = props[Adapter->instance];
4305 } else {
4306 /* not enough properties configured */
4307 propval = defval;
4308 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4309 "Not Enough %s values found in e1000g.conf"
4310 " - set to %d\n",
4311 propname, propval);
4312 ret = B_FALSE;
4315 /* free memory allocated for properties */
4316 ddi_prop_free(props);
4318 } else {
4319 propval = defval;
4320 ret = B_FALSE;
4324 * enforce limits
4326 if (propval > maxval) {
4327 propval = maxval;
4328 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4329 "Too High %s value in e1000g.conf - set to %d\n",
4330 propname, propval);
4333 if (propval < minval) {
4334 propval = minval;
4335 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4336 "Too Low %s value in e1000g.conf - set to %d\n",
4337 propname, propval);
4340 *propvalue = propval;
4341 return (ret);
4344 static boolean_t
4345 e1000g_link_check(struct e1000g *Adapter)
4347 uint16_t speed, duplex, phydata;
4348 boolean_t link_changed = B_FALSE;
4349 struct e1000_hw *hw;
4350 uint32_t reg_tarc;
4352 hw = &Adapter->shared;
4354 if (e1000g_link_up(Adapter)) {
4356 * The Link is up, check whether it was marked as down earlier
4358 if (Adapter->link_state != LINK_STATE_UP) {
4359 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex);
4360 Adapter->link_speed = speed;
4361 Adapter->link_duplex = duplex;
4362 Adapter->link_state = LINK_STATE_UP;
4363 link_changed = B_TRUE;
4365 if (Adapter->link_speed == SPEED_1000)
4366 Adapter->stall_threshold = TX_STALL_TIME_2S;
4367 else
4368 Adapter->stall_threshold = TX_STALL_TIME_8S;
4370 Adapter->tx_link_down_timeout = 0;
4372 if ((hw->mac.type == e1000_82571) ||
4373 (hw->mac.type == e1000_82572)) {
4374 reg_tarc = E1000_READ_REG(hw, E1000_TARC(0));
4375 if (speed == SPEED_1000)
4376 reg_tarc |= (1 << 21);
4377 else
4378 reg_tarc &= ~(1 << 21);
4379 E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc);
4382 Adapter->smartspeed = 0;
4383 } else {
4384 if (Adapter->link_state != LINK_STATE_DOWN) {
4385 Adapter->link_speed = 0;
4386 Adapter->link_duplex = 0;
4387 Adapter->link_state = LINK_STATE_DOWN;
4388 link_changed = B_TRUE;
4391 * SmartSpeed workaround for Tabor/TanaX, When the
4392 * driver loses link disable auto master/slave
4393 * resolution.
4395 if (hw->phy.type == e1000_phy_igp) {
4396 (void) e1000_read_phy_reg(hw,
4397 PHY_1000T_CTRL, &phydata);
4398 phydata |= CR_1000T_MS_ENABLE;
4399 (void) e1000_write_phy_reg(hw,
4400 PHY_1000T_CTRL, phydata);
4402 } else {
4403 e1000g_smartspeed(Adapter);
4406 if (Adapter->e1000g_state & E1000G_STARTED) {
4407 if (Adapter->tx_link_down_timeout <
4408 MAX_TX_LINK_DOWN_TIMEOUT) {
4409 Adapter->tx_link_down_timeout++;
4410 } else if (Adapter->tx_link_down_timeout ==
4411 MAX_TX_LINK_DOWN_TIMEOUT) {
4412 e1000g_tx_clean(Adapter);
4413 Adapter->tx_link_down_timeout++;
4418 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4419 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4421 return (link_changed);
4425 * e1000g_reset_link - Using the link properties to setup the link
4428 e1000g_reset_link(struct e1000g *Adapter)
4430 struct e1000_mac_info *mac;
4431 struct e1000_phy_info *phy;
4432 struct e1000_hw *hw;
4433 boolean_t invalid;
4435 mac = &Adapter->shared.mac;
4436 phy = &Adapter->shared.phy;
4437 hw = &Adapter->shared;
4438 invalid = B_FALSE;
4440 if (hw->phy.media_type != e1000_media_type_copper)
4441 goto out;
4443 if (Adapter->param_adv_autoneg == 1) {
4444 mac->autoneg = B_TRUE;
4445 phy->autoneg_advertised = 0;
4448 * 1000hdx is not supported for autonegotiation
4450 if (Adapter->param_adv_1000fdx == 1)
4451 phy->autoneg_advertised |= ADVERTISE_1000_FULL;
4453 if (Adapter->param_adv_100fdx == 1)
4454 phy->autoneg_advertised |= ADVERTISE_100_FULL;
4456 if (Adapter->param_adv_100hdx == 1)
4457 phy->autoneg_advertised |= ADVERTISE_100_HALF;
4459 if (Adapter->param_adv_10fdx == 1)
4460 phy->autoneg_advertised |= ADVERTISE_10_FULL;
4462 if (Adapter->param_adv_10hdx == 1)
4463 phy->autoneg_advertised |= ADVERTISE_10_HALF;
4465 if (phy->autoneg_advertised == 0)
4466 invalid = B_TRUE;
4467 } else {
4468 mac->autoneg = B_FALSE;
4471 * For Intel copper cards, 1000fdx and 1000hdx are not
4472 * supported for forced link
4474 if (Adapter->param_adv_100fdx == 1)
4475 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4476 else if (Adapter->param_adv_100hdx == 1)
4477 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4478 else if (Adapter->param_adv_10fdx == 1)
4479 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4480 else if (Adapter->param_adv_10hdx == 1)
4481 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4482 else
4483 invalid = B_TRUE;
4487 if (invalid) {
4488 e1000g_log(Adapter, CE_WARN,
4489 "Invalid link settings. Setup link to "
4490 "support autonegotiation with all link capabilities.");
4491 mac->autoneg = B_TRUE;
4492 phy->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
4495 out:
4496 return (e1000_setup_link(&Adapter->shared));
4499 static void
4500 e1000g_timer_tx_resched(struct e1000g *Adapter)
4502 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
4504 rw_enter(&Adapter->chip_lock, RW_READER);
4506 if (tx_ring->resched_needed &&
4507 ((ddi_get_lbolt() - tx_ring->resched_timestamp) >
4508 drv_usectohz(1000000)) &&
4509 (Adapter->e1000g_state & E1000G_STARTED) &&
4510 (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) {
4511 tx_ring->resched_needed = B_FALSE;
4512 mac_tx_update(Adapter->mh);
4513 E1000G_STAT(tx_ring->stat_reschedule);
4514 E1000G_STAT(tx_ring->stat_timer_reschedule);
4517 rw_exit(&Adapter->chip_lock);
4520 static void
4521 e1000g_local_timer(void *ws)
4523 struct e1000g *Adapter = (struct e1000g *)ws;
4524 struct e1000_hw *hw;
4525 e1000g_ether_addr_t ether_addr;
4526 boolean_t link_changed;
4528 hw = &Adapter->shared;
4530 if (Adapter->e1000g_state & E1000G_ERROR) {
4531 rw_enter(&Adapter->chip_lock, RW_WRITER);
4532 Adapter->e1000g_state &= ~E1000G_ERROR;
4533 rw_exit(&Adapter->chip_lock);
4535 Adapter->reset_count++;
4536 if (e1000g_global_reset(Adapter)) {
4537 ddi_fm_service_impact(Adapter->dip,
4538 DDI_SERVICE_RESTORED);
4539 e1000g_timer_tx_resched(Adapter);
4540 } else
4541 ddi_fm_service_impact(Adapter->dip,
4542 DDI_SERVICE_LOST);
4543 return;
4546 if (e1000g_stall_check(Adapter)) {
4547 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4548 "Tx stall detected. Activate automatic recovery.\n");
4549 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL);
4550 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
4551 Adapter->reset_count++;
4552 if (e1000g_reset_adapter(Adapter)) {
4553 ddi_fm_service_impact(Adapter->dip,
4554 DDI_SERVICE_RESTORED);
4555 e1000g_timer_tx_resched(Adapter);
4557 return;
4560 link_changed = B_FALSE;
4561 rw_enter(&Adapter->chip_lock, RW_READER);
4562 if (Adapter->link_complete)
4563 link_changed = e1000g_link_check(Adapter);
4564 rw_exit(&Adapter->chip_lock);
4566 if (link_changed) {
4567 if (!Adapter->reset_flag &&
4568 (Adapter->e1000g_state & E1000G_STARTED) &&
4569 !(Adapter->e1000g_state & E1000G_SUSPENDED))
4570 mac_link_update(Adapter->mh, Adapter->link_state);
4571 if (Adapter->link_state == LINK_STATE_UP)
4572 Adapter->reset_flag = B_FALSE;
4575 * Workaround for esb2. Data stuck in fifo on a link
4576 * down event. Reset the adapter to recover it.
4578 if (Adapter->esb2_workaround) {
4579 Adapter->esb2_workaround = B_FALSE;
4580 (void) e1000g_reset_adapter(Adapter);
4581 return;
4585 * With 82571 controllers, any locally administered address will
4586 * be overwritten when there is a reset on the other port.
4587 * Detect this circumstance and correct it.
4589 if ((hw->mac.type == e1000_82571) &&
4590 (e1000_get_laa_state_82571(hw) == B_TRUE)) {
4591 ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
4592 ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
4594 ether_addr.reg.low = ntohl(ether_addr.reg.low);
4595 ether_addr.reg.high = ntohl(ether_addr.reg.high);
4597 if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
4598 (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
4599 (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
4600 (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
4601 (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
4602 (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
4603 (void) e1000_rar_set(hw, hw->mac.addr, 0);
4608 * Long TTL workaround for 82541/82547
4610 (void) e1000_igp_ttl_workaround_82547(hw);
4613 * Check for Adaptive IFS settings If there are lots of collisions
4614 * change the value in steps...
4615 * These properties should only be set for 10/100
4617 if ((hw->phy.media_type == e1000_media_type_copper) &&
4618 ((Adapter->link_speed == SPEED_100) ||
4619 (Adapter->link_speed == SPEED_10))) {
4620 e1000_update_adaptive(hw);
4623 * Set Timer Interrupts
4625 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
4627 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4628 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4629 else
4630 e1000g_timer_tx_resched(Adapter);
4632 restart_watchdog_timer(Adapter);
4636 * The function e1000g_link_timer() is called when the timer for link setup
4637 * is expired, which indicates the completion of the link setup. The link
4638 * state will not be updated until the link setup is completed. And the
4639 * link state will not be sent to the upper layer through mac_link_update()
4640 * in this function. It will be updated in the local timer routine or the
4641 * interrupt service routine after the interface is started (plumbed).
4643 static void
4644 e1000g_link_timer(void *arg)
4646 struct e1000g *Adapter = (struct e1000g *)arg;
4648 mutex_enter(&Adapter->link_lock);
4649 Adapter->link_complete = B_TRUE;
4650 Adapter->link_tid = 0;
4651 mutex_exit(&Adapter->link_lock);
4655 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
4657 * This function read the forced speed and duplex for 10/100 Mbps speeds
4658 * and also for 1000 Mbps speeds from the e1000g.conf file
4660 static void
4661 e1000g_force_speed_duplex(struct e1000g *Adapter)
4663 int forced;
4664 int propval;
4665 struct e1000_mac_info *mac = &Adapter->shared.mac;
4666 struct e1000_phy_info *phy = &Adapter->shared.phy;
4669 * get value out of config file
4671 (void) e1000g_get_prop(Adapter, "ForceSpeedDuplex",
4672 GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY, &forced);
4674 switch (forced) {
4675 case GDIAG_10_HALF:
4677 * Disable Auto Negotiation
4679 mac->autoneg = B_FALSE;
4680 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4681 break;
4682 case GDIAG_10_FULL:
4684 * Disable Auto Negotiation
4686 mac->autoneg = B_FALSE;
4687 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4688 break;
4689 case GDIAG_100_HALF:
4691 * Disable Auto Negotiation
4693 mac->autoneg = B_FALSE;
4694 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4695 break;
4696 case GDIAG_100_FULL:
4698 * Disable Auto Negotiation
4700 mac->autoneg = B_FALSE;
4701 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4702 break;
4703 case GDIAG_1000_FULL:
4705 * The gigabit spec requires autonegotiation. Therefore,
4706 * when the user wants to force the speed to 1000Mbps, we
4707 * enable AutoNeg, but only allow the harware to advertise
4708 * 1000Mbps. This is different from 10/100 operation, where
4709 * we are allowed to link without any negotiation.
4711 mac->autoneg = B_TRUE;
4712 phy->autoneg_advertised = ADVERTISE_1000_FULL;
4713 break;
4714 default: /* obey the setting of AutoNegAdvertised */
4715 mac->autoneg = B_TRUE;
4716 (void) e1000g_get_prop(Adapter, "AutoNegAdvertised",
4717 0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
4718 AUTONEG_ADVERTISE_SPEED_DEFAULT, &propval);
4719 phy->autoneg_advertised = (uint16_t)propval;
4720 break;
4721 } /* switch */
4725 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
4727 * This function reads MaxFrameSize from e1000g.conf
4729 static void
4730 e1000g_get_max_frame_size(struct e1000g *Adapter)
4732 int max_frame;
4735 * get value out of config file
4737 (void) e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0,
4738 &max_frame);
4740 switch (max_frame) {
4741 case 0:
4742 Adapter->default_mtu = ETHERMTU;
4743 break;
4744 case 1:
4745 Adapter->default_mtu = FRAME_SIZE_UPTO_4K -
4746 sizeof (struct ether_vlan_header) - ETHERFCSL;
4747 break;
4748 case 2:
4749 Adapter->default_mtu = FRAME_SIZE_UPTO_8K -
4750 sizeof (struct ether_vlan_header) - ETHERFCSL;
4751 break;
4752 case 3:
4753 Adapter->default_mtu = FRAME_SIZE_UPTO_16K -
4754 sizeof (struct ether_vlan_header) - ETHERFCSL;
4755 break;
4756 default:
4757 Adapter->default_mtu = ETHERMTU;
4758 break;
4759 } /* switch */
4762 * If the user configed MTU is larger than the deivce's maximum MTU,
4763 * the MTU is set to the deivce's maximum value.
4765 if (Adapter->default_mtu > Adapter->max_mtu)
4766 Adapter->default_mtu = Adapter->max_mtu;
4768 Adapter->max_frame_size = e1000g_mtu2maxframe(Adapter->default_mtu);
4772 * e1000g_pch_limits - Apply limits of the PCH silicon type
4774 * At any frame size larger than the ethernet default,
4775 * prevent linking at 10/100 speeds.
4777 static void
4778 e1000g_pch_limits(struct e1000g *Adapter)
4780 struct e1000_hw *hw = &Adapter->shared;
4782 /* only applies to PCH silicon type */
4783 if (hw->mac.type != e1000_pchlan && hw->mac.type != e1000_pch2lan)
4784 return;
4786 /* only applies to frames larger than ethernet default */
4787 if (Adapter->max_frame_size > DEFAULT_FRAME_SIZE) {
4788 hw->mac.autoneg = B_TRUE;
4789 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
4791 Adapter->param_adv_autoneg = 1;
4792 Adapter->param_adv_1000fdx = 1;
4794 Adapter->param_adv_100fdx = 0;
4795 Adapter->param_adv_100hdx = 0;
4796 Adapter->param_adv_10fdx = 0;
4797 Adapter->param_adv_10hdx = 0;
4799 e1000g_param_sync(Adapter);
4804 * e1000g_mtu2maxframe - convert given MTU to maximum frame size
4806 static uint32_t
4807 e1000g_mtu2maxframe(uint32_t mtu)
4809 uint32_t maxframe;
4811 maxframe = mtu + sizeof (struct ether_vlan_header) + ETHERFCSL;
4813 return (maxframe);
4816 static void
4817 arm_watchdog_timer(struct e1000g *Adapter)
4819 Adapter->watchdog_tid =
4820 timeout(e1000g_local_timer,
4821 (void *)Adapter, 1 * drv_usectohz(1000000));
4823 #pragma inline(arm_watchdog_timer)
4825 static void
4826 enable_watchdog_timer(struct e1000g *Adapter)
4828 mutex_enter(&Adapter->watchdog_lock);
4830 if (!Adapter->watchdog_timer_enabled) {
4831 Adapter->watchdog_timer_enabled = B_TRUE;
4832 Adapter->watchdog_timer_started = B_TRUE;
4833 arm_watchdog_timer(Adapter);
4836 mutex_exit(&Adapter->watchdog_lock);
4839 static void
4840 disable_watchdog_timer(struct e1000g *Adapter)
4842 timeout_id_t tid;
4844 mutex_enter(&Adapter->watchdog_lock);
4846 Adapter->watchdog_timer_enabled = B_FALSE;
4847 Adapter->watchdog_timer_started = B_FALSE;
4848 tid = Adapter->watchdog_tid;
4849 Adapter->watchdog_tid = 0;
4851 mutex_exit(&Adapter->watchdog_lock);
4853 if (tid != 0)
4854 (void) untimeout(tid);
4857 static void
4858 start_watchdog_timer(struct e1000g *Adapter)
4860 mutex_enter(&Adapter->watchdog_lock);
4862 if (Adapter->watchdog_timer_enabled) {
4863 if (!Adapter->watchdog_timer_started) {
4864 Adapter->watchdog_timer_started = B_TRUE;
4865 arm_watchdog_timer(Adapter);
4869 mutex_exit(&Adapter->watchdog_lock);
4872 static void
4873 restart_watchdog_timer(struct e1000g *Adapter)
4875 mutex_enter(&Adapter->watchdog_lock);
4877 if (Adapter->watchdog_timer_started)
4878 arm_watchdog_timer(Adapter);
4880 mutex_exit(&Adapter->watchdog_lock);
4883 static void
4884 stop_watchdog_timer(struct e1000g *Adapter)
4886 timeout_id_t tid;
4888 mutex_enter(&Adapter->watchdog_lock);
4890 Adapter->watchdog_timer_started = B_FALSE;
4891 tid = Adapter->watchdog_tid;
4892 Adapter->watchdog_tid = 0;
4894 mutex_exit(&Adapter->watchdog_lock);
4896 if (tid != 0)
4897 (void) untimeout(tid);
4900 static void
4901 stop_link_timer(struct e1000g *Adapter)
4903 timeout_id_t tid;
4905 /* Disable the link timer */
4906 mutex_enter(&Adapter->link_lock);
4908 tid = Adapter->link_tid;
4909 Adapter->link_tid = 0;
4911 mutex_exit(&Adapter->link_lock);
4913 if (tid != 0)
4914 (void) untimeout(tid);
4917 static void
4918 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
4920 timeout_id_t tid;
4922 /* Disable the tx timer for 82547 chipset */
4923 mutex_enter(&tx_ring->tx_lock);
4925 tx_ring->timer_enable_82547 = B_FALSE;
4926 tid = tx_ring->timer_id_82547;
4927 tx_ring->timer_id_82547 = 0;
4929 mutex_exit(&tx_ring->tx_lock);
4931 if (tid != 0)
4932 (void) untimeout(tid);
4935 void
4936 e1000g_clear_interrupt(struct e1000g *Adapter)
4938 E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
4939 0xffffffff & ~E1000_IMS_RXSEQ);
4942 void
4943 e1000g_mask_interrupt(struct e1000g *Adapter)
4945 E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
4946 IMS_ENABLE_MASK & ~E1000_IMS_TXDW);
4948 if (Adapter->tx_intr_enable)
4949 e1000g_mask_tx_interrupt(Adapter);
4953 * This routine is called by e1000g_quiesce(), therefore must not block.
4955 void
4956 e1000g_clear_all_interrupts(struct e1000g *Adapter)
4958 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
4961 void
4962 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
4964 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW);
4967 void
4968 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
4970 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW);
4973 static void
4974 e1000g_smartspeed(struct e1000g *Adapter)
4976 struct e1000_hw *hw = &Adapter->shared;
4977 uint16_t phy_status;
4978 uint16_t phy_ctrl;
4981 * If we're not T-or-T, or we're not autoneg'ing, or we're not
4982 * advertising 1000Full, we don't even use the workaround
4984 if ((hw->phy.type != e1000_phy_igp) ||
4985 !hw->mac.autoneg ||
4986 !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
4987 return;
4990 * True if this is the first call of this function or after every
4991 * 30 seconds of not having link
4993 if (Adapter->smartspeed == 0) {
4995 * If Master/Slave config fault is asserted twice, we
4996 * assume back-to-back
4998 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4999 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
5000 return;
5002 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
5003 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
5004 return;
5006 * We're assuming back-2-back because our status register
5007 * insists! there's a fault in the master/slave
5008 * relationship that was "negotiated"
5010 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
5012 * Is the phy configured for manual configuration of
5013 * master/slave?
5015 if (phy_ctrl & CR_1000T_MS_ENABLE) {
5017 * Yes. Then disable manual configuration (enable
5018 * auto configuration) of master/slave
5020 phy_ctrl &= ~CR_1000T_MS_ENABLE;
5021 (void) e1000_write_phy_reg(hw,
5022 PHY_1000T_CTRL, phy_ctrl);
5024 * Effectively starting the clock
5026 Adapter->smartspeed++;
5028 * Restart autonegotiation
5030 if (!e1000_phy_setup_autoneg(hw) &&
5031 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
5032 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
5033 MII_CR_RESTART_AUTO_NEG);
5034 (void) e1000_write_phy_reg(hw,
5035 PHY_CONTROL, phy_ctrl);
5038 return;
5040 * Has 6 seconds transpired still without link? Remember,
5041 * you should reset the smartspeed counter once you obtain
5042 * link
5044 } else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
5046 * Yes. Remember, we did at the start determine that
5047 * there's a master/slave configuration fault, so we're
5048 * still assuming there's someone on the other end, but we
5049 * just haven't yet been able to talk to it. We then
5050 * re-enable auto configuration of master/slave to see if
5051 * we're running 2/3 pair cables.
5054 * If still no link, perhaps using 2/3 pair cable
5056 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
5057 phy_ctrl |= CR_1000T_MS_ENABLE;
5058 (void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
5060 * Restart autoneg with phy enabled for manual
5061 * configuration of master/slave
5063 if (!e1000_phy_setup_autoneg(hw) &&
5064 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
5065 phy_ctrl |=
5066 (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
5067 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
5070 * Hopefully, there are no more faults and we've obtained
5071 * link as a result.
5075 * Restart process after E1000_SMARTSPEED_MAX iterations (30
5076 * seconds)
5078 if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
5079 Adapter->smartspeed = 0;
5082 static boolean_t
5083 is_valid_mac_addr(uint8_t *mac_addr)
5085 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
5086 const uint8_t addr_test2[6] =
5087 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5089 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
5090 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
5091 return (B_FALSE);
5093 return (B_TRUE);
5097 * e1000g_stall_check - check for tx stall
5099 * This function checks if the adapter is stalled (in transmit).
5101 * It is called each time the watchdog timeout is invoked.
5102 * If the transmit descriptor reclaim continuously fails,
5103 * the watchdog value will increment by 1. If the watchdog
5104 * value exceeds the threshold, the adapter is assumed to
5105 * have stalled and need to be reset.
5107 static boolean_t
5108 e1000g_stall_check(struct e1000g *Adapter)
5110 e1000g_tx_ring_t *tx_ring;
5112 tx_ring = Adapter->tx_ring;
5114 if (Adapter->link_state != LINK_STATE_UP)
5115 return (B_FALSE);
5117 (void) e1000g_recycle(tx_ring);
5119 if (Adapter->stall_flag)
5120 return (B_TRUE);
5122 return (B_FALSE);
5125 #ifdef E1000G_DEBUG
5126 static enum ioc_reply
5127 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
5129 void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
5130 e1000g_peekpoke_t *ppd;
5131 uint64_t mem_va;
5132 uint64_t maxoff;
5133 boolean_t peek;
5135 switch (iocp->ioc_cmd) {
5137 case E1000G_IOC_REG_PEEK:
5138 peek = B_TRUE;
5139 break;
5141 case E1000G_IOC_REG_POKE:
5142 peek = B_FALSE;
5143 break;
5145 deault:
5146 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
5147 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
5148 iocp->ioc_cmd);
5149 return (IOC_INVAL);
5153 * Validate format of ioctl
5155 if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
5156 return (IOC_INVAL);
5157 if (mp->b_cont == NULL)
5158 return (IOC_INVAL);
5160 ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr;
5163 * Validate request parameters
5165 switch (ppd->pp_acc_space) {
5167 default:
5168 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
5169 "e1000g_diag_ioctl: invalid access space 0x%X\n",
5170 ppd->pp_acc_space);
5171 return (IOC_INVAL);
5173 case E1000G_PP_SPACE_REG:
5175 * Memory-mapped I/O space
5177 ASSERT(ppd->pp_acc_size == 4);
5178 if (ppd->pp_acc_size != 4)
5179 return (IOC_INVAL);
5181 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
5182 return (IOC_INVAL);
5184 mem_va = 0;
5185 maxoff = 0x10000;
5186 ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
5187 break;
5189 case E1000G_PP_SPACE_E1000G:
5191 * E1000g data structure!
5193 mem_va = (uintptr_t)e1000gp;
5194 maxoff = sizeof (struct e1000g);
5195 ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
5196 break;
5200 if (ppd->pp_acc_offset >= maxoff)
5201 return (IOC_INVAL);
5203 if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
5204 return (IOC_INVAL);
5207 * All OK - go!
5209 ppd->pp_acc_offset += mem_va;
5210 (*ppfn)(e1000gp, ppd);
5211 return (peek ? IOC_REPLY : IOC_ACK);
5214 static void
5215 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5217 ddi_acc_handle_t handle;
5218 uint32_t *regaddr;
5220 handle = e1000gp->osdep.reg_handle;
5221 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5222 (uintptr_t)ppd->pp_acc_offset);
5224 ppd->pp_acc_data = ddi_get32(handle, regaddr);
5227 static void
5228 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5230 ddi_acc_handle_t handle;
5231 uint32_t *regaddr;
5232 uint32_t value;
5234 handle = e1000gp->osdep.reg_handle;
5235 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5236 (uintptr_t)ppd->pp_acc_offset);
5237 value = (uint32_t)ppd->pp_acc_data;
5239 ddi_put32(handle, regaddr, value);
5242 static void
5243 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5245 uint64_t value;
5246 void *vaddr;
5248 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5250 switch (ppd->pp_acc_size) {
5251 case 1:
5252 value = *(uint8_t *)vaddr;
5253 break;
5255 case 2:
5256 value = *(uint16_t *)vaddr;
5257 break;
5259 case 4:
5260 value = *(uint32_t *)vaddr;
5261 break;
5263 case 8:
5264 value = *(uint64_t *)vaddr;
5265 break;
5268 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5269 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
5270 (void *)e1000gp, (void *)ppd, value, vaddr);
5272 ppd->pp_acc_data = value;
5275 static void
5276 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5278 uint64_t value;
5279 void *vaddr;
5281 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5282 value = ppd->pp_acc_data;
5284 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5285 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
5286 (void *)e1000gp, (void *)ppd, value, vaddr);
5288 switch (ppd->pp_acc_size) {
5289 case 1:
5290 *(uint8_t *)vaddr = (uint8_t)value;
5291 break;
5293 case 2:
5294 *(uint16_t *)vaddr = (uint16_t)value;
5295 break;
5297 case 4:
5298 *(uint32_t *)vaddr = (uint32_t)value;
5299 break;
5301 case 8:
5302 *(uint64_t *)vaddr = (uint64_t)value;
5303 break;
5306 #endif
5309 * Loopback Support
5311 static lb_property_t lb_normal =
5312 { normal, "normal", E1000G_LB_NONE };
5313 static lb_property_t lb_external1000 =
5314 { external, "1000Mbps", E1000G_LB_EXTERNAL_1000 };
5315 static lb_property_t lb_external100 =
5316 { external, "100Mbps", E1000G_LB_EXTERNAL_100 };
5317 static lb_property_t lb_external10 =
5318 { external, "10Mbps", E1000G_LB_EXTERNAL_10 };
5319 static lb_property_t lb_phy =
5320 { internal, "PHY", E1000G_LB_INTERNAL_PHY };
5322 static enum ioc_reply
5323 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
5325 lb_info_sz_t *lbsp;
5326 lb_property_t *lbpp;
5327 struct e1000_hw *hw;
5328 uint32_t *lbmp;
5329 uint32_t size;
5330 uint32_t value;
5332 hw = &Adapter->shared;
5334 if (mp->b_cont == NULL)
5335 return (IOC_INVAL);
5337 if (!e1000g_check_loopback_support(hw)) {
5338 e1000g_log(NULL, CE_WARN,
5339 "Loopback is not supported on e1000g%d", Adapter->instance);
5340 return (IOC_INVAL);
5343 switch (iocp->ioc_cmd) {
5344 default:
5345 return (IOC_INVAL);
5347 case LB_GET_INFO_SIZE:
5348 size = sizeof (lb_info_sz_t);
5349 if (iocp->ioc_count != size)
5350 return (IOC_INVAL);
5352 rw_enter(&Adapter->chip_lock, RW_WRITER);
5353 e1000g_get_phy_state(Adapter);
5356 * Workaround for hardware faults. In order to get a stable
5357 * state of phy, we will wait for a specific interval and
5358 * try again. The time delay is an experiential value based
5359 * on our testing.
5361 msec_delay(100);
5362 e1000g_get_phy_state(Adapter);
5363 rw_exit(&Adapter->chip_lock);
5365 value = sizeof (lb_normal);
5366 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5367 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5368 (hw->phy.media_type == e1000_media_type_fiber) ||
5369 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5370 value += sizeof (lb_phy);
5371 switch (hw->mac.type) {
5372 case e1000_82571:
5373 case e1000_82572:
5374 case e1000_80003es2lan:
5375 value += sizeof (lb_external1000);
5376 break;
5379 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5380 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5381 value += sizeof (lb_external100);
5382 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5383 value += sizeof (lb_external10);
5385 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
5386 *lbsp = value;
5387 break;
5389 case LB_GET_INFO:
5390 value = sizeof (lb_normal);
5391 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5392 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5393 (hw->phy.media_type == e1000_media_type_fiber) ||
5394 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5395 value += sizeof (lb_phy);
5396 switch (hw->mac.type) {
5397 case e1000_82571:
5398 case e1000_82572:
5399 case e1000_80003es2lan:
5400 value += sizeof (lb_external1000);
5401 break;
5404 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5405 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5406 value += sizeof (lb_external100);
5407 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5408 value += sizeof (lb_external10);
5410 size = value;
5411 if (iocp->ioc_count != size)
5412 return (IOC_INVAL);
5414 value = 0;
5415 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
5416 lbpp[value++] = lb_normal;
5417 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5418 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5419 (hw->phy.media_type == e1000_media_type_fiber) ||
5420 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5421 lbpp[value++] = lb_phy;
5422 switch (hw->mac.type) {
5423 case e1000_82571:
5424 case e1000_82572:
5425 case e1000_80003es2lan:
5426 lbpp[value++] = lb_external1000;
5427 break;
5430 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5431 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5432 lbpp[value++] = lb_external100;
5433 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5434 lbpp[value++] = lb_external10;
5435 break;
5437 case LB_GET_MODE:
5438 size = sizeof (uint32_t);
5439 if (iocp->ioc_count != size)
5440 return (IOC_INVAL);
5442 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5443 *lbmp = Adapter->loopback_mode;
5444 break;
5446 case LB_SET_MODE:
5447 size = 0;
5448 if (iocp->ioc_count != sizeof (uint32_t))
5449 return (IOC_INVAL);
5451 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5452 if (!e1000g_set_loopback_mode(Adapter, *lbmp))
5453 return (IOC_INVAL);
5454 break;
5457 iocp->ioc_count = size;
5458 iocp->ioc_error = 0;
5460 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
5461 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
5462 return (IOC_INVAL);
5465 return (IOC_REPLY);
5468 static boolean_t
5469 e1000g_check_loopback_support(struct e1000_hw *hw)
5471 switch (hw->mac.type) {
5472 case e1000_82540:
5473 case e1000_82545:
5474 case e1000_82545_rev_3:
5475 case e1000_82546:
5476 case e1000_82546_rev_3:
5477 case e1000_82541:
5478 case e1000_82541_rev_2:
5479 case e1000_82547:
5480 case e1000_82547_rev_2:
5481 case e1000_82571:
5482 case e1000_82572:
5483 case e1000_82573:
5484 case e1000_82574:
5485 case e1000_80003es2lan:
5486 case e1000_ich9lan:
5487 case e1000_ich10lan:
5488 return (B_TRUE);
5490 return (B_FALSE);
5493 static boolean_t
5494 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
5496 struct e1000_hw *hw;
5497 int i, times;
5498 boolean_t link_up;
5500 if (mode == Adapter->loopback_mode)
5501 return (B_TRUE);
5503 hw = &Adapter->shared;
5504 times = 0;
5506 Adapter->loopback_mode = mode;
5508 if (mode == E1000G_LB_NONE) {
5509 /* Reset the chip */
5510 hw->phy.autoneg_wait_to_complete = B_TRUE;
5511 (void) e1000g_reset_adapter(Adapter);
5512 hw->phy.autoneg_wait_to_complete = B_FALSE;
5513 return (B_TRUE);
5516 again:
5518 rw_enter(&Adapter->chip_lock, RW_WRITER);
5520 switch (mode) {
5521 default:
5522 rw_exit(&Adapter->chip_lock);
5523 return (B_FALSE);
5525 case E1000G_LB_EXTERNAL_1000:
5526 e1000g_set_external_loopback_1000(Adapter);
5527 break;
5529 case E1000G_LB_EXTERNAL_100:
5530 e1000g_set_external_loopback_100(Adapter);
5531 break;
5533 case E1000G_LB_EXTERNAL_10:
5534 e1000g_set_external_loopback_10(Adapter);
5535 break;
5537 case E1000G_LB_INTERNAL_PHY:
5538 e1000g_set_internal_loopback(Adapter);
5539 break;
5542 times++;
5544 rw_exit(&Adapter->chip_lock);
5546 /* Wait for link up */
5547 for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
5548 msec_delay(100);
5550 rw_enter(&Adapter->chip_lock, RW_WRITER);
5552 link_up = e1000g_link_up(Adapter);
5554 rw_exit(&Adapter->chip_lock);
5556 if (!link_up) {
5557 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5558 "Failed to get the link up");
5559 if (times < 2) {
5560 /* Reset the link */
5561 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5562 "Reset the link ...");
5563 (void) e1000g_reset_adapter(Adapter);
5564 goto again;
5568 * Reset driver to loopback none when set loopback failed
5569 * for the second time.
5571 Adapter->loopback_mode = E1000G_LB_NONE;
5573 /* Reset the chip */
5574 hw->phy.autoneg_wait_to_complete = B_TRUE;
5575 (void) e1000g_reset_adapter(Adapter);
5576 hw->phy.autoneg_wait_to_complete = B_FALSE;
5578 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5579 "Set loopback mode failed, reset to loopback none");
5581 return (B_FALSE);
5584 return (B_TRUE);
5588 * The following loopback settings are from Intel's technical
5589 * document - "How To Loopback". All the register settings and
5590 * time delay values are directly inherited from the document
5591 * without more explanations available.
5593 static void
5594 e1000g_set_internal_loopback(struct e1000g *Adapter)
5596 struct e1000_hw *hw;
5597 uint32_t ctrl;
5598 uint32_t status;
5599 uint16_t phy_ctrl;
5600 uint16_t phy_reg;
5601 uint32_t txcw;
5603 hw = &Adapter->shared;
5605 /* Disable Smart Power Down */
5606 phy_spd_state(hw, B_FALSE);
5608 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
5609 phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
5610 phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
5612 switch (hw->mac.type) {
5613 case e1000_82540:
5614 case e1000_82545:
5615 case e1000_82545_rev_3:
5616 case e1000_82546:
5617 case e1000_82546_rev_3:
5618 case e1000_82573:
5619 /* Auto-MDI/MDIX off */
5620 (void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
5621 /* Reset PHY to update Auto-MDI/MDIX */
5622 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5623 phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
5624 /* Reset PHY to auto-neg off and force 1000 */
5625 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5626 phy_ctrl | MII_CR_RESET);
5628 * Disable PHY receiver for 82540/545/546 and 82573 Family.
5629 * See comments above e1000g_set_internal_loopback() for the
5630 * background.
5632 (void) e1000_write_phy_reg(hw, 29, 0x001F);
5633 (void) e1000_write_phy_reg(hw, 30, 0x8FFC);
5634 (void) e1000_write_phy_reg(hw, 29, 0x001A);
5635 (void) e1000_write_phy_reg(hw, 30, 0x8FF0);
5636 break;
5637 case e1000_80003es2lan:
5638 /* Force Link Up */
5639 (void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
5640 0x1CC);
5641 /* Sets PCS loopback at 1Gbs */
5642 (void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
5643 0x1046);
5644 break;
5648 * The following registers should be set for e1000_phy_bm phy type.
5649 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy.
5650 * For others, we do not need to set these registers.
5652 if (hw->phy.type == e1000_phy_bm) {
5653 /* Set Default MAC Interface speed to 1GB */
5654 (void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg);
5655 phy_reg &= ~0x0007;
5656 phy_reg |= 0x006;
5657 (void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg);
5658 /* Assert SW reset for above settings to take effect */
5659 (void) e1000_phy_commit(hw);
5660 msec_delay(1);
5661 /* Force Full Duplex */
5662 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5663 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5664 phy_reg | 0x000C);
5665 /* Set Link Up (in force link) */
5666 (void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg);
5667 (void) e1000_write_phy_reg(hw, PHY_REG(776, 16),
5668 phy_reg | 0x0040);
5669 /* Force Link */
5670 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5671 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5672 phy_reg | 0x0040);
5673 /* Set Early Link Enable */
5674 (void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg);
5675 (void) e1000_write_phy_reg(hw, PHY_REG(769, 20),
5676 phy_reg | 0x0400);
5679 /* Set loopback */
5680 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
5682 msec_delay(250);
5684 /* Now set up the MAC to the same speed/duplex as the PHY. */
5685 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5686 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5687 ctrl |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5688 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5689 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
5690 E1000_CTRL_FD); /* Force Duplex to FULL */
5692 switch (hw->mac.type) {
5693 case e1000_82540:
5694 case e1000_82545:
5695 case e1000_82545_rev_3:
5696 case e1000_82546:
5697 case e1000_82546_rev_3:
5699 * For some serdes we'll need to commit the writes now
5700 * so that the status is updated on link
5702 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
5703 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5704 msec_delay(100);
5705 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5708 if (hw->phy.media_type == e1000_media_type_copper) {
5709 /* Invert Loss of Signal */
5710 ctrl |= E1000_CTRL_ILOS;
5711 } else {
5712 /* Set ILOS on fiber nic if half duplex is detected */
5713 status = E1000_READ_REG(hw, E1000_STATUS);
5714 if ((status & E1000_STATUS_FD) == 0)
5715 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5717 break;
5719 case e1000_82571:
5720 case e1000_82572:
5722 * The fiber/SerDes versions of this adapter do not contain an
5723 * accessible PHY. Therefore, loopback beyond MAC must be done
5724 * using SerDes analog loopback.
5726 if (hw->phy.media_type != e1000_media_type_copper) {
5727 /* Disable autoneg by setting bit 31 of TXCW to zero */
5728 txcw = E1000_READ_REG(hw, E1000_TXCW);
5729 txcw &= ~((uint32_t)1 << 31);
5730 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5733 * Write 0x410 to Serdes Control register
5734 * to enable Serdes analog loopback
5736 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5737 msec_delay(10);
5740 status = E1000_READ_REG(hw, E1000_STATUS);
5741 /* Set ILOS on fiber nic if half duplex is detected */
5742 if ((hw->phy.media_type == e1000_media_type_fiber) &&
5743 ((status & E1000_STATUS_FD) == 0 ||
5744 (status & E1000_STATUS_LU) == 0))
5745 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5746 else if (hw->phy.media_type == e1000_media_type_internal_serdes)
5747 ctrl |= E1000_CTRL_SLU;
5748 break;
5750 case e1000_82573:
5751 ctrl |= E1000_CTRL_ILOS;
5752 break;
5753 case e1000_ich9lan:
5754 case e1000_ich10lan:
5755 ctrl |= E1000_CTRL_SLU;
5756 break;
5758 if (hw->phy.type == e1000_phy_bm)
5759 ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS;
5761 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5764 static void
5765 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
5767 struct e1000_hw *hw;
5768 uint32_t rctl;
5769 uint32_t ctrl_ext;
5770 uint32_t ctrl;
5771 uint32_t status;
5772 uint32_t txcw;
5773 uint16_t phydata;
5775 hw = &Adapter->shared;
5777 /* Disable Smart Power Down */
5778 phy_spd_state(hw, B_FALSE);
5780 switch (hw->mac.type) {
5781 case e1000_82571:
5782 case e1000_82572:
5783 switch (hw->phy.media_type) {
5784 case e1000_media_type_copper:
5785 /* Force link up (Must be done before the PHY writes) */
5786 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5787 ctrl |= E1000_CTRL_SLU; /* Force Link Up */
5788 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5790 rctl = E1000_READ_REG(hw, E1000_RCTL);
5791 rctl |= (E1000_RCTL_EN |
5792 E1000_RCTL_SBP |
5793 E1000_RCTL_UPE |
5794 E1000_RCTL_MPE |
5795 E1000_RCTL_LPE |
5796 E1000_RCTL_BAM); /* 0x803E */
5797 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
5799 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5800 ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
5801 E1000_CTRL_EXT_SDP6_DATA |
5802 E1000_CTRL_EXT_SDP3_DATA |
5803 E1000_CTRL_EXT_SDP4_DIR |
5804 E1000_CTRL_EXT_SDP6_DIR |
5805 E1000_CTRL_EXT_SDP3_DIR); /* 0x0DD0 */
5806 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5809 * This sequence tunes the PHY's SDP and no customer
5810 * settable values. For background, see comments above
5811 * e1000g_set_internal_loopback().
5813 (void) e1000_write_phy_reg(hw, 0x0, 0x140);
5814 msec_delay(10);
5815 (void) e1000_write_phy_reg(hw, 0x9, 0x1A00);
5816 (void) e1000_write_phy_reg(hw, 0x12, 0xC10);
5817 (void) e1000_write_phy_reg(hw, 0x12, 0x1C10);
5818 (void) e1000_write_phy_reg(hw, 0x1F37, 0x76);
5819 (void) e1000_write_phy_reg(hw, 0x1F33, 0x1);
5820 (void) e1000_write_phy_reg(hw, 0x1F33, 0x0);
5822 (void) e1000_write_phy_reg(hw, 0x1F35, 0x65);
5823 (void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
5824 (void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
5825 (void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
5826 (void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
5828 msec_delay(50);
5829 break;
5830 case e1000_media_type_fiber:
5831 case e1000_media_type_internal_serdes:
5832 status = E1000_READ_REG(hw, E1000_STATUS);
5833 if (((status & E1000_STATUS_LU) == 0) ||
5834 (hw->phy.media_type ==
5835 e1000_media_type_internal_serdes)) {
5836 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5837 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5838 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5841 /* Disable autoneg by setting bit 31 of TXCW to zero */
5842 txcw = E1000_READ_REG(hw, E1000_TXCW);
5843 txcw &= ~((uint32_t)1 << 31);
5844 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5847 * Write 0x410 to Serdes Control register
5848 * to enable Serdes analog loopback
5850 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5851 msec_delay(10);
5852 break;
5853 default:
5854 break;
5856 break;
5857 case e1000_82574:
5858 case e1000_80003es2lan:
5859 case e1000_ich9lan:
5860 case e1000_ich10lan:
5861 (void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata);
5862 (void) e1000_write_phy_reg(hw, GG82563_REG(6, 16),
5863 phydata | (1 << 5));
5864 Adapter->param_adv_autoneg = 1;
5865 Adapter->param_adv_1000fdx = 1;
5866 (void) e1000g_reset_link(Adapter);
5867 break;
5871 static void
5872 e1000g_set_external_loopback_100(struct e1000g *Adapter)
5874 struct e1000_hw *hw;
5875 uint32_t ctrl;
5876 uint16_t phy_ctrl;
5878 hw = &Adapter->shared;
5880 /* Disable Smart Power Down */
5881 phy_spd_state(hw, B_FALSE);
5883 phy_ctrl = (MII_CR_FULL_DUPLEX |
5884 MII_CR_SPEED_100);
5886 /* Force 100/FD, reset PHY */
5887 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5888 phy_ctrl | MII_CR_RESET); /* 0xA100 */
5889 msec_delay(10);
5891 /* Force 100/FD */
5892 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5893 phy_ctrl); /* 0x2100 */
5894 msec_delay(10);
5896 /* Now setup the MAC to the same speed/duplex as the PHY. */
5897 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5898 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5899 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */
5900 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5901 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5902 E1000_CTRL_SPD_100 | /* Force Speed to 100 */
5903 E1000_CTRL_FD); /* Force Duplex to FULL */
5905 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5908 static void
5909 e1000g_set_external_loopback_10(struct e1000g *Adapter)
5911 struct e1000_hw *hw;
5912 uint32_t ctrl;
5913 uint16_t phy_ctrl;
5915 hw = &Adapter->shared;
5917 /* Disable Smart Power Down */
5918 phy_spd_state(hw, B_FALSE);
5920 phy_ctrl = (MII_CR_FULL_DUPLEX |
5921 MII_CR_SPEED_10);
5923 /* Force 10/FD, reset PHY */
5924 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5925 phy_ctrl | MII_CR_RESET); /* 0x8100 */
5926 msec_delay(10);
5928 /* Force 10/FD */
5929 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5930 phy_ctrl); /* 0x0100 */
5931 msec_delay(10);
5933 /* Now setup the MAC to the same speed/duplex as the PHY. */
5934 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5935 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5936 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */
5937 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5938 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5939 E1000_CTRL_SPD_10 | /* Force Speed to 10 */
5940 E1000_CTRL_FD); /* Force Duplex to FULL */
5942 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5946 static int
5947 e1000g_add_intrs(struct e1000g *Adapter)
5949 dev_info_t *devinfo;
5950 int intr_types;
5951 int rc;
5953 devinfo = Adapter->dip;
5955 /* Get supported interrupt types */
5956 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
5958 if (rc != DDI_SUCCESS) {
5959 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5960 "Get supported interrupt types failed: %d\n", rc);
5961 return (DDI_FAILURE);
5965 * Based on Intel Technical Advisory document (TA-160), there are some
5966 * cases where some older Intel PCI-X NICs may "advertise" to the OS
5967 * that it supports MSI, but in fact has problems.
5968 * So we should only enable MSI for PCI-E NICs and disable MSI for old
5969 * PCI/PCI-X NICs.
5971 if (Adapter->shared.mac.type < e1000_82571)
5972 Adapter->msi_enable = B_FALSE;
5974 if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) {
5975 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
5977 if (rc != DDI_SUCCESS) {
5978 /* EMPTY */
5979 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5980 "Add MSI failed, trying Legacy interrupts\n");
5981 } else {
5982 Adapter->intr_type = DDI_INTR_TYPE_MSI;
5986 if ((Adapter->intr_type == 0) &&
5987 (intr_types & DDI_INTR_TYPE_FIXED)) {
5988 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
5990 if (rc != DDI_SUCCESS) {
5991 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5992 "Add Legacy interrupts failed\n");
5993 return (DDI_FAILURE);
5996 Adapter->intr_type = DDI_INTR_TYPE_FIXED;
5999 if (Adapter->intr_type == 0) {
6000 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
6001 "No interrupts registered\n");
6002 return (DDI_FAILURE);
6005 return (DDI_SUCCESS);
6009 * e1000g_intr_add() handles MSI/Legacy interrupts
6011 static int
6012 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
6014 dev_info_t *devinfo;
6015 int count, avail, actual;
6016 int x, y, rc, inum = 0;
6017 int flag;
6018 ddi_intr_handler_t *intr_handler;
6020 devinfo = Adapter->dip;
6022 /* get number of interrupts */
6023 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
6024 if ((rc != DDI_SUCCESS) || (count == 0)) {
6025 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6026 "Get interrupt number failed. Return: %d, count: %d\n",
6027 rc, count);
6028 return (DDI_FAILURE);
6031 /* get number of available interrupts */
6032 rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
6033 if ((rc != DDI_SUCCESS) || (avail == 0)) {
6034 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6035 "Get interrupt available number failed. "
6036 "Return: %d, available: %d\n", rc, avail);
6037 return (DDI_FAILURE);
6040 if (avail < count) {
6041 /* EMPTY */
6042 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6043 "Interrupts count: %d, available: %d\n",
6044 count, avail);
6047 /* Allocate an array of interrupt handles */
6048 Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
6049 Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
6051 /* Set NORMAL behavior for both MSI and FIXED interrupt */
6052 flag = DDI_INTR_ALLOC_NORMAL;
6054 /* call ddi_intr_alloc() */
6055 rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
6056 count, &actual, flag);
6058 if ((rc != DDI_SUCCESS) || (actual == 0)) {
6059 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6060 "Allocate interrupts failed: %d\n", rc);
6062 kmem_free(Adapter->htable, Adapter->intr_size);
6063 return (DDI_FAILURE);
6066 if (actual < count) {
6067 /* EMPTY */
6068 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6069 "Interrupts requested: %d, received: %d\n",
6070 count, actual);
6073 Adapter->intr_cnt = actual;
6075 /* Get priority for first msi, assume remaining are all the same */
6076 rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
6078 if (rc != DDI_SUCCESS) {
6079 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6080 "Get interrupt priority failed: %d\n", rc);
6082 /* Free already allocated intr */
6083 for (y = 0; y < actual; y++)
6084 (void) ddi_intr_free(Adapter->htable[y]);
6086 kmem_free(Adapter->htable, Adapter->intr_size);
6087 return (DDI_FAILURE);
6091 * In Legacy Interrupt mode, for PCI-Express adapters, we should
6092 * use the interrupt service routine e1000g_intr_pciexpress()
6093 * to avoid interrupt stealing when sharing interrupt with other
6094 * devices.
6096 if (Adapter->shared.mac.type < e1000_82571)
6097 intr_handler = (ddi_intr_handler_t *)e1000g_intr;
6098 else
6099 intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress;
6101 /* Call ddi_intr_add_handler() */
6102 for (x = 0; x < actual; x++) {
6103 rc = ddi_intr_add_handler(Adapter->htable[x],
6104 intr_handler, (caddr_t)Adapter, NULL);
6106 if (rc != DDI_SUCCESS) {
6107 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6108 "Add interrupt handler failed: %d\n", rc);
6110 /* Remove already added handler */
6111 for (y = 0; y < x; y++)
6112 (void) ddi_intr_remove_handler(
6113 Adapter->htable[y]);
6115 /* Free already allocated intr */
6116 for (y = 0; y < actual; y++)
6117 (void) ddi_intr_free(Adapter->htable[y]);
6119 kmem_free(Adapter->htable, Adapter->intr_size);
6120 return (DDI_FAILURE);
6124 rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
6126 if (rc != DDI_SUCCESS) {
6127 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6128 "Get interrupt cap failed: %d\n", rc);
6130 /* Free already allocated intr */
6131 for (y = 0; y < actual; y++) {
6132 (void) ddi_intr_remove_handler(Adapter->htable[y]);
6133 (void) ddi_intr_free(Adapter->htable[y]);
6136 kmem_free(Adapter->htable, Adapter->intr_size);
6137 return (DDI_FAILURE);
6140 return (DDI_SUCCESS);
6143 static int
6144 e1000g_rem_intrs(struct e1000g *Adapter)
6146 int x;
6147 int rc;
6149 for (x = 0; x < Adapter->intr_cnt; x++) {
6150 rc = ddi_intr_remove_handler(Adapter->htable[x]);
6151 if (rc != DDI_SUCCESS) {
6152 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6153 "Remove intr handler failed: %d\n", rc);
6154 return (DDI_FAILURE);
6157 rc = ddi_intr_free(Adapter->htable[x]);
6158 if (rc != DDI_SUCCESS) {
6159 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6160 "Free intr failed: %d\n", rc);
6161 return (DDI_FAILURE);
6165 kmem_free(Adapter->htable, Adapter->intr_size);
6167 return (DDI_SUCCESS);
6170 static int
6171 e1000g_enable_intrs(struct e1000g *Adapter)
6173 int x;
6174 int rc;
6176 /* Enable interrupts */
6177 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6178 /* Call ddi_intr_block_enable() for MSI */
6179 rc = ddi_intr_block_enable(Adapter->htable,
6180 Adapter->intr_cnt);
6181 if (rc != DDI_SUCCESS) {
6182 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6183 "Enable block intr failed: %d\n", rc);
6184 return (DDI_FAILURE);
6186 } else {
6187 /* Call ddi_intr_enable() for Legacy/MSI non block enable */
6188 for (x = 0; x < Adapter->intr_cnt; x++) {
6189 rc = ddi_intr_enable(Adapter->htable[x]);
6190 if (rc != DDI_SUCCESS) {
6191 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6192 "Enable intr failed: %d\n", rc);
6193 return (DDI_FAILURE);
6198 return (DDI_SUCCESS);
6201 static int
6202 e1000g_disable_intrs(struct e1000g *Adapter)
6204 int x;
6205 int rc;
6207 /* Disable all interrupts */
6208 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6209 rc = ddi_intr_block_disable(Adapter->htable,
6210 Adapter->intr_cnt);
6211 if (rc != DDI_SUCCESS) {
6212 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6213 "Disable block intr failed: %d\n", rc);
6214 return (DDI_FAILURE);
6216 } else {
6217 for (x = 0; x < Adapter->intr_cnt; x++) {
6218 rc = ddi_intr_disable(Adapter->htable[x]);
6219 if (rc != DDI_SUCCESS) {
6220 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6221 "Disable intr failed: %d\n", rc);
6222 return (DDI_FAILURE);
6227 return (DDI_SUCCESS);
6231 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
6233 static void
6234 e1000g_get_phy_state(struct e1000g *Adapter)
6236 struct e1000_hw *hw = &Adapter->shared;
6238 if (hw->phy.media_type == e1000_media_type_copper) {
6239 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl);
6240 (void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status);
6241 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
6242 &Adapter->phy_an_adv);
6243 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP,
6244 &Adapter->phy_an_exp);
6245 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS,
6246 &Adapter->phy_ext_status);
6247 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL,
6248 &Adapter->phy_1000t_ctrl);
6249 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS,
6250 &Adapter->phy_1000t_status);
6251 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY,
6252 &Adapter->phy_lp_able);
6254 Adapter->param_autoneg_cap =
6255 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
6256 Adapter->param_pause_cap =
6257 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6258 Adapter->param_asym_pause_cap =
6259 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6260 Adapter->param_1000fdx_cap =
6261 ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
6262 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
6263 Adapter->param_1000hdx_cap =
6264 ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
6265 (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
6266 Adapter->param_100t4_cap =
6267 (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
6268 Adapter->param_100fdx_cap =
6269 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
6270 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
6271 Adapter->param_100hdx_cap =
6272 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
6273 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
6274 Adapter->param_10fdx_cap =
6275 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
6276 Adapter->param_10hdx_cap =
6277 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
6279 Adapter->param_adv_autoneg = hw->mac.autoneg;
6280 Adapter->param_adv_pause =
6281 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6282 Adapter->param_adv_asym_pause =
6283 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6284 Adapter->param_adv_1000hdx =
6285 (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
6286 Adapter->param_adv_100t4 =
6287 (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
6288 if (Adapter->param_adv_autoneg == 1) {
6289 Adapter->param_adv_1000fdx =
6290 (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS)
6291 ? 1 : 0;
6292 Adapter->param_adv_100fdx =
6293 (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS)
6294 ? 1 : 0;
6295 Adapter->param_adv_100hdx =
6296 (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS)
6297 ? 1 : 0;
6298 Adapter->param_adv_10fdx =
6299 (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
6300 Adapter->param_adv_10hdx =
6301 (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
6304 Adapter->param_lp_autoneg =
6305 (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
6306 Adapter->param_lp_pause =
6307 (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
6308 Adapter->param_lp_asym_pause =
6309 (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
6310 Adapter->param_lp_1000fdx =
6311 (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
6312 Adapter->param_lp_1000hdx =
6313 (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
6314 Adapter->param_lp_100t4 =
6315 (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
6316 Adapter->param_lp_100fdx =
6317 (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
6318 Adapter->param_lp_100hdx =
6319 (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
6320 Adapter->param_lp_10fdx =
6321 (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
6322 Adapter->param_lp_10hdx =
6323 (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
6324 } else {
6326 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning,
6327 * it can only work with 1Gig Full Duplex Link Partner.
6329 Adapter->param_autoneg_cap = 0;
6330 Adapter->param_pause_cap = 1;
6331 Adapter->param_asym_pause_cap = 1;
6332 Adapter->param_1000fdx_cap = 1;
6333 Adapter->param_1000hdx_cap = 0;
6334 Adapter->param_100t4_cap = 0;
6335 Adapter->param_100fdx_cap = 0;
6336 Adapter->param_100hdx_cap = 0;
6337 Adapter->param_10fdx_cap = 0;
6338 Adapter->param_10hdx_cap = 0;
6340 Adapter->param_adv_autoneg = 0;
6341 Adapter->param_adv_pause = 1;
6342 Adapter->param_adv_asym_pause = 1;
6343 Adapter->param_adv_1000fdx = 1;
6344 Adapter->param_adv_1000hdx = 0;
6345 Adapter->param_adv_100t4 = 0;
6346 Adapter->param_adv_100fdx = 0;
6347 Adapter->param_adv_100hdx = 0;
6348 Adapter->param_adv_10fdx = 0;
6349 Adapter->param_adv_10hdx = 0;
6351 Adapter->param_lp_autoneg = 0;
6352 Adapter->param_lp_pause = 0;
6353 Adapter->param_lp_asym_pause = 0;
6354 Adapter->param_lp_1000fdx = 0;
6355 Adapter->param_lp_1000hdx = 0;
6356 Adapter->param_lp_100t4 = 0;
6357 Adapter->param_lp_100fdx = 0;
6358 Adapter->param_lp_100hdx = 0;
6359 Adapter->param_lp_10fdx = 0;
6360 Adapter->param_lp_10hdx = 0;
6365 * FMA support
6369 e1000g_check_acc_handle(ddi_acc_handle_t handle)
6371 ddi_fm_error_t de;
6373 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
6374 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
6375 return (de.fme_status);
6379 e1000g_check_dma_handle(ddi_dma_handle_t handle)
6381 ddi_fm_error_t de;
6383 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
6384 return (de.fme_status);
6388 * The IO fault service error handling callback function
6390 /* ARGSUSED2 */
6391 static int
6392 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6395 * as the driver can always deal with an error in any dma or
6396 * access handle, we can just return the fme_status value.
6398 pci_ereport_post(dip, err, NULL);
6399 return (err->fme_status);
6402 static void
6403 e1000g_fm_init(struct e1000g *Adapter)
6405 ddi_iblock_cookie_t iblk;
6406 int fma_dma_flag;
6408 /* Only register with IO Fault Services if we have some capability */
6409 if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
6410 e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
6411 } else {
6412 e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
6415 if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
6416 fma_dma_flag = 1;
6417 } else {
6418 fma_dma_flag = 0;
6421 (void) e1000g_set_fma_flags(fma_dma_flag);
6423 if (Adapter->fm_capabilities) {
6425 /* Register capabilities with IO Fault Services */
6426 ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk);
6429 * Initialize pci ereport capabilities if ereport capable
6431 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6432 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6433 pci_ereport_setup(Adapter->dip);
6436 * Register error callback if error callback capable
6438 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6439 ddi_fm_handler_register(Adapter->dip,
6440 e1000g_fm_error_cb, (void*) Adapter);
6444 static void
6445 e1000g_fm_fini(struct e1000g *Adapter)
6447 /* Only unregister FMA capabilities if we registered some */
6448 if (Adapter->fm_capabilities) {
6451 * Release any resources allocated by pci_ereport_setup()
6453 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6454 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6455 pci_ereport_teardown(Adapter->dip);
6458 * Un-register error callback if error callback capable
6460 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6461 ddi_fm_handler_unregister(Adapter->dip);
6463 /* Unregister from IO Fault Services */
6464 mutex_enter(&e1000g_rx_detach_lock);
6465 ddi_fm_fini(Adapter->dip);
6466 if (Adapter->priv_dip != NULL) {
6467 DEVI(Adapter->priv_dip)->devi_fmhdl = NULL;
6469 mutex_exit(&e1000g_rx_detach_lock);
6473 void
6474 e1000g_fm_ereport(struct e1000g *Adapter, char *detail)
6476 uint64_t ena;
6477 char buf[FM_MAX_CLASS];
6479 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6480 ena = fm_ena_generate(0, FM_ENA_FMT1);
6481 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) {
6482 ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP,
6483 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
6488 * quiesce(9E) entry point.
6490 * This function is called when the system is single-threaded at high
6491 * PIL with preemption disabled. Therefore, this function must not be
6492 * blocked.
6494 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6495 * DDI_FAILURE indicates an error condition and should almost never happen.
6497 static int
6498 e1000g_quiesce(dev_info_t *devinfo)
6500 struct e1000g *Adapter;
6502 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
6504 if (Adapter == NULL)
6505 return (DDI_FAILURE);
6507 e1000g_clear_all_interrupts(Adapter);
6509 (void) e1000_reset_hw(&Adapter->shared);
6511 /* Setup our HW Tx Head & Tail descriptor pointers */
6512 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
6513 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
6515 /* Setup our HW Rx Head & Tail descriptor pointers */
6516 E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0);
6517 E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0);
6519 return (DDI_SUCCESS);
6523 * synchronize the adv* and en* parameters.
6525 * See comments in <sys/dld.h> for details of the *_en_*
6526 * parameters. The usage of ndd for setting adv parameters will
6527 * synchronize all the en parameters with the e1000g parameters,
6528 * implicitly disabling any settings made via dladm.
6530 static void
6531 e1000g_param_sync(struct e1000g *Adapter)
6533 Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx;
6534 Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx;
6535 Adapter->param_en_100fdx = Adapter->param_adv_100fdx;
6536 Adapter->param_en_100hdx = Adapter->param_adv_100hdx;
6537 Adapter->param_en_10fdx = Adapter->param_adv_10fdx;
6538 Adapter->param_en_10hdx = Adapter->param_adv_10hdx;
6542 * e1000g_get_driver_control - tell manageability firmware that the driver
6543 * has control.
6545 static void
6546 e1000g_get_driver_control(struct e1000_hw *hw)
6548 uint32_t ctrl_ext;
6549 uint32_t swsm;
6551 /* tell manageability firmware the driver has taken over */
6552 switch (hw->mac.type) {
6553 case e1000_82573:
6554 swsm = E1000_READ_REG(hw, E1000_SWSM);
6555 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
6556 break;
6557 case e1000_82571:
6558 case e1000_82572:
6559 case e1000_82574:
6560 case e1000_80003es2lan:
6561 case e1000_ich8lan:
6562 case e1000_ich9lan:
6563 case e1000_ich10lan:
6564 case e1000_pchlan:
6565 case e1000_pch2lan:
6566 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6567 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6568 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
6569 break;
6570 default:
6571 /* no manageability firmware: do nothing */
6572 break;
6577 * e1000g_release_driver_control - tell manageability firmware that the driver
6578 * has released control.
6580 static void
6581 e1000g_release_driver_control(struct e1000_hw *hw)
6583 uint32_t ctrl_ext;
6584 uint32_t swsm;
6586 /* tell manageability firmware the driver has released control */
6587 switch (hw->mac.type) {
6588 case e1000_82573:
6589 swsm = E1000_READ_REG(hw, E1000_SWSM);
6590 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
6591 break;
6592 case e1000_82571:
6593 case e1000_82572:
6594 case e1000_82574:
6595 case e1000_80003es2lan:
6596 case e1000_ich8lan:
6597 case e1000_ich9lan:
6598 case e1000_ich10lan:
6599 case e1000_pchlan:
6600 case e1000_pch2lan:
6601 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6602 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6603 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
6604 break;
6605 default:
6606 /* no manageability firmware: do nothing */
6607 break;
6612 * Restore e1000g promiscuous mode.
6614 static void
6615 e1000g_restore_promisc(struct e1000g *Adapter)
6617 if (Adapter->e1000g_promisc) {
6618 uint32_t rctl;
6620 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
6621 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
6622 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);