Merge commit '06307114472bd8aad5ff18ccdb8e25f128ae6652'
[unleashed.git] / kernel / drivers / net / ixgbe / ixgbe_main.c
blobdc078aae97291157e45fce08ef603810fe628fea
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Copyright (c) 2017, Joyent, Inc.
29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved.
31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved.
32 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
35 #include "ixgbe_sw.h"
37 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
40 * Local function protoypes
42 static int ixgbe_register_mac(ixgbe_t *);
43 static int ixgbe_identify_hardware(ixgbe_t *);
44 static int ixgbe_regs_map(ixgbe_t *);
45 static void ixgbe_init_properties(ixgbe_t *);
46 static int ixgbe_init_driver_settings(ixgbe_t *);
47 static void ixgbe_init_locks(ixgbe_t *);
48 static void ixgbe_destroy_locks(ixgbe_t *);
49 static int ixgbe_init(ixgbe_t *);
50 static int ixgbe_chip_start(ixgbe_t *);
51 static void ixgbe_chip_stop(ixgbe_t *);
52 static int ixgbe_reset(ixgbe_t *);
53 static void ixgbe_tx_clean(ixgbe_t *);
54 static bool ixgbe_tx_drain(ixgbe_t *);
55 static bool ixgbe_rx_drain(ixgbe_t *);
56 static int ixgbe_alloc_rings(ixgbe_t *);
57 static void ixgbe_free_rings(ixgbe_t *);
58 static int ixgbe_alloc_rx_data(ixgbe_t *);
59 static void ixgbe_free_rx_data(ixgbe_t *);
60 static void ixgbe_setup_rings(ixgbe_t *);
61 static void ixgbe_setup_rx(ixgbe_t *);
62 static void ixgbe_setup_tx(ixgbe_t *);
63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
65 static void ixgbe_setup_rss(ixgbe_t *);
66 static void ixgbe_setup_vmdq(ixgbe_t *);
67 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
68 static void ixgbe_setup_rss_table(ixgbe_t *);
69 static void ixgbe_init_unicst(ixgbe_t *);
70 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
71 static void ixgbe_setup_multicst(ixgbe_t *);
72 static void ixgbe_get_hw_state(ixgbe_t *);
73 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
74 static void ixgbe_get_conf(ixgbe_t *);
75 static void ixgbe_init_params(ixgbe_t *);
76 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
77 static void ixgbe_driver_link_check(ixgbe_t *);
78 static void ixgbe_sfp_check(void *);
79 static void ixgbe_overtemp_check(void *);
80 static void ixgbe_phy_check(void *);
81 static void ixgbe_link_timer(void *);
82 static void ixgbe_local_timer(void *);
83 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
84 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
85 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
86 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
87 static bool is_valid_mac_addr(uint8_t *);
88 static bool ixgbe_stall_check(ixgbe_t *);
89 static bool ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
90 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
91 static bool ixgbe_find_mac_address(ixgbe_t *);
92 static int ixgbe_alloc_intrs(ixgbe_t *);
93 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
94 static int ixgbe_add_intr_handlers(ixgbe_t *);
95 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
96 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
97 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
98 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
99 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
100 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
101 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
102 static void ixgbe_setup_adapter_vector(ixgbe_t *);
103 static void ixgbe_rem_intr_handlers(ixgbe_t *);
104 static void ixgbe_rem_intrs(ixgbe_t *);
105 static int ixgbe_enable_intrs(ixgbe_t *);
106 static int ixgbe_disable_intrs(ixgbe_t *);
107 static uint_t ixgbe_intr_legacy(void *, void *);
108 static uint_t ixgbe_intr_msi(void *, void *);
109 static uint_t ixgbe_intr_msix(void *, void *);
110 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
111 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
112 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
113 static void ixgbe_get_driver_control(struct ixgbe_hw *);
114 static int ixgbe_addmac(void *, const uint8_t *);
115 static int ixgbe_remmac(void *, const uint8_t *);
116 static void ixgbe_release_driver_control(struct ixgbe_hw *);
118 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
119 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
120 static int ixgbe_resume(dev_info_t *);
121 static int ixgbe_suspend(dev_info_t *);
122 static int ixgbe_quiesce(dev_info_t *);
123 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
124 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
125 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
126 static int ixgbe_intr_cb_register(ixgbe_t *);
127 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
129 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
130 const void *impl_data);
131 static void ixgbe_fm_init(ixgbe_t *);
132 static void ixgbe_fm_fini(ixgbe_t *);
134 char *ixgbe_priv_props[] = {
135 "_tx_copy_thresh",
136 "_tx_recycle_thresh",
137 "_tx_overload_thresh",
138 "_tx_resched_thresh",
139 "_rx_copy_thresh",
140 "_rx_limit_per_intr",
141 "_intr_throttling",
142 "_adv_pause_cap",
143 "_adv_asym_pause_cap",
144 NULL
147 #define IXGBE_MAX_PRIV_PROPS \
148 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
150 static struct cb_ops ixgbe_cb_ops = {
151 nulldev, /* cb_open */
152 nulldev, /* cb_close */
153 nodev, /* cb_strategy */
154 nodev, /* cb_print */
155 nodev, /* cb_dump */
156 nodev, /* cb_read */
157 nodev, /* cb_write */
158 nodev, /* cb_ioctl */
159 nodev, /* cb_devmap */
160 nodev, /* cb_mmap */
161 nodev, /* cb_segmap */
162 nochpoll, /* cb_chpoll */
163 ddi_prop_op, /* cb_prop_op */
164 NULL, /* cb_stream */
165 D_MP | D_HOTPLUG, /* cb_flag */
166 CB_REV, /* cb_rev */
167 nodev, /* cb_aread */
168 nodev /* cb_awrite */
171 static struct dev_ops ixgbe_dev_ops = {
172 DEVO_REV, /* devo_rev */
173 0, /* devo_refcnt */
174 NULL, /* devo_getinfo */
175 nulldev, /* devo_identify */
176 nulldev, /* devo_probe */
177 ixgbe_attach, /* devo_attach */
178 ixgbe_detach, /* devo_detach */
179 nodev, /* devo_reset */
180 &ixgbe_cb_ops, /* devo_cb_ops */
181 NULL, /* devo_bus_ops */
182 ddi_power, /* devo_power */
183 ixgbe_quiesce, /* devo_quiesce */
186 static struct modldrv ixgbe_modldrv = {
187 &mod_driverops, /* Type of module. This one is a driver */
188 ixgbe_ident, /* Discription string */
189 &ixgbe_dev_ops /* driver ops */
192 static struct modlinkage ixgbe_modlinkage = {
193 MODREV_1, &ixgbe_modldrv, NULL
197 * Access attributes for register mapping
199 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
200 DDI_DEVICE_ATTR_V1,
201 DDI_STRUCTURE_LE_ACC,
202 DDI_STRICTORDER_ACC,
203 DDI_FLAGERR_ACC
207 * Loopback property
209 static lb_property_t lb_normal = {
210 normal, "normal", IXGBE_LB_NONE
213 static lb_property_t lb_mac = {
214 internal, "MAC", IXGBE_LB_INTERNAL_MAC
217 static lb_property_t lb_external = {
218 external, "External", IXGBE_LB_EXTERNAL
221 #define IXGBE_M_CALLBACK_FLAGS \
222 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
224 static mac_callbacks_t ixgbe_m_callbacks = {
225 IXGBE_M_CALLBACK_FLAGS,
226 ixgbe_m_stat,
227 ixgbe_m_start,
228 ixgbe_m_stop,
229 ixgbe_m_promisc,
230 ixgbe_m_multicst,
231 NULL,
232 NULL,
233 NULL,
234 ixgbe_m_ioctl,
235 ixgbe_m_getcapab,
236 NULL,
237 NULL,
238 ixgbe_m_setprop,
239 ixgbe_m_getprop,
240 ixgbe_m_propinfo
244 * Initialize capabilities of each supported adapter type
246 static adapter_info_t ixgbe_82598eb_cap = {
247 64, /* maximum number of rx queues */
248 1, /* minimum number of rx queues */
249 64, /* default number of rx queues */
250 16, /* maximum number of rx groups */
251 1, /* minimum number of rx groups */
252 1, /* default number of rx groups */
253 32, /* maximum number of tx queues */
254 1, /* minimum number of tx queues */
255 8, /* default number of tx queues */
256 16366, /* maximum MTU size */
257 0xFFFF, /* maximum interrupt throttle rate */
258 0, /* minimum interrupt throttle rate */
259 200, /* default interrupt throttle rate */
260 18, /* maximum total msix vectors */
261 16, /* maximum number of ring vectors */
262 2, /* maximum number of other vectors */
263 IXGBE_EICR_LSC, /* "other" interrupt types handled */
264 0, /* "other" interrupt types enable mask */
265 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */
266 | IXGBE_FLAG_RSS_CAPABLE
267 | IXGBE_FLAG_VMDQ_CAPABLE)
270 static adapter_info_t ixgbe_82599eb_cap = {
271 128, /* maximum number of rx queues */
272 1, /* minimum number of rx queues */
273 128, /* default number of rx queues */
274 64, /* maximum number of rx groups */
275 1, /* minimum number of rx groups */
276 1, /* default number of rx groups */
277 128, /* maximum number of tx queues */
278 1, /* minimum number of tx queues */
279 8, /* default number of tx queues */
280 15500, /* maximum MTU size */
281 0xFF8, /* maximum interrupt throttle rate */
282 0, /* minimum interrupt throttle rate */
283 200, /* default interrupt throttle rate */
284 64, /* maximum total msix vectors */
285 16, /* maximum number of ring vectors */
286 2, /* maximum number of other vectors */
287 (IXGBE_EICR_LSC
288 | IXGBE_EICR_GPI_SDP1
289 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
291 (IXGBE_SDP1_GPIEN
292 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
294 (IXGBE_FLAG_DCA_CAPABLE
295 | IXGBE_FLAG_RSS_CAPABLE
296 | IXGBE_FLAG_VMDQ_CAPABLE
297 | IXGBE_FLAG_RSC_CAPABLE
298 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
301 static adapter_info_t ixgbe_X540_cap = {
302 128, /* maximum number of rx queues */
303 1, /* minimum number of rx queues */
304 128, /* default number of rx queues */
305 64, /* maximum number of rx groups */
306 1, /* minimum number of rx groups */
307 1, /* default number of rx groups */
308 128, /* maximum number of tx queues */
309 1, /* minimum number of tx queues */
310 8, /* default number of tx queues */
311 15500, /* maximum MTU size */
312 0xFF8, /* maximum interrupt throttle rate */
313 0, /* minimum interrupt throttle rate */
314 200, /* default interrupt throttle rate */
315 64, /* maximum total msix vectors */
316 16, /* maximum number of ring vectors */
317 2, /* maximum number of other vectors */
318 (IXGBE_EICR_LSC
319 | IXGBE_EICR_GPI_SDP1_X540
320 | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */
322 (IXGBE_SDP1_GPIEN_X540
323 | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */
325 (IXGBE_FLAG_DCA_CAPABLE
326 | IXGBE_FLAG_RSS_CAPABLE
327 | IXGBE_FLAG_VMDQ_CAPABLE
328 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
331 static adapter_info_t ixgbe_X550_cap = {
332 128, /* maximum number of rx queues */
333 1, /* minimum number of rx queues */
334 128, /* default number of rx queues */
335 64, /* maximum number of rx groups */
336 1, /* minimum number of rx groups */
337 1, /* default number of rx groups */
338 128, /* maximum number of tx queues */
339 1, /* minimum number of tx queues */
340 8, /* default number of tx queues */
341 15500, /* maximum MTU size */
342 0xFF8, /* maximum interrupt throttle rate */
343 0, /* minimum interrupt throttle rate */
344 0x200, /* default interrupt throttle rate */
345 64, /* maximum total msix vectors */
346 16, /* maximum number of ring vectors */
347 2, /* maximum number of other vectors */
348 IXGBE_EICR_LSC, /* "other" interrupt types handled */
349 0, /* "other" interrupt types enable mask */
350 (IXGBE_FLAG_RSS_CAPABLE
351 | IXGBE_FLAG_VMDQ_CAPABLE
352 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
356 * Module Initialization Functions.
360 _init(void)
362 int status;
364 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
366 status = mod_install(&ixgbe_modlinkage);
368 if (status != DDI_SUCCESS) {
369 mac_fini_ops(&ixgbe_dev_ops);
372 return (status);
376 _fini(void)
378 int status;
380 status = mod_remove(&ixgbe_modlinkage);
382 if (status == DDI_SUCCESS) {
383 mac_fini_ops(&ixgbe_dev_ops);
386 return (status);
390 _info(struct modinfo *modinfop)
392 int status;
394 status = mod_info(&ixgbe_modlinkage, modinfop);
396 return (status);
400 * ixgbe_attach - Driver attach.
402 * This function is the device specific initialization entry
403 * point. This entry point is required and must be written.
404 * The DDI_ATTACH command must be provided in the attach entry
405 * point. When attach() is called with cmd set to DDI_ATTACH,
406 * all normal kernel services (such as kmem_alloc(9F)) are
407 * available for use by the driver.
409 * The attach() function will be called once for each instance
410 * of the device on the system with cmd set to DDI_ATTACH.
411 * Until attach() succeeds, the only driver entry points which
412 * may be called are open(9E) and getinfo(9E).
414 static int
415 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
417 ixgbe_t *ixgbe;
418 struct ixgbe_osdep *osdep;
419 struct ixgbe_hw *hw;
420 int instance;
421 char taskqname[32];
424 * Check the command and perform corresponding operations
426 switch (cmd) {
427 default:
428 return (DDI_FAILURE);
430 case DDI_RESUME:
431 return (ixgbe_resume(devinfo));
433 case DDI_ATTACH:
434 break;
437 /* Get the device instance */
438 instance = ddi_get_instance(devinfo);
440 /* Allocate memory for the instance data structure */
441 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
443 ixgbe->dip = devinfo;
444 ixgbe->instance = instance;
446 hw = &ixgbe->hw;
447 osdep = &ixgbe->osdep;
448 hw->back = osdep;
449 osdep->ixgbe = ixgbe;
451 /* Attach the instance pointer to the dev_info data structure */
452 ddi_set_driver_private(devinfo, ixgbe);
455 * Initialize for FMA support
457 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
458 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
459 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
460 ixgbe_fm_init(ixgbe);
461 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
464 * Map PCI config space registers
466 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
467 ixgbe_error(ixgbe, "Failed to map PCI configurations");
468 goto attach_fail;
470 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
473 * Identify the chipset family
475 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
476 ixgbe_error(ixgbe, "Failed to identify hardware");
477 goto attach_fail;
481 * Map device registers
483 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
484 ixgbe_error(ixgbe, "Failed to map device registers");
485 goto attach_fail;
487 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
490 * Initialize driver parameters
492 ixgbe_init_properties(ixgbe);
493 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
496 * Register interrupt callback
498 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
499 ixgbe_error(ixgbe, "Failed to register interrupt callback");
500 goto attach_fail;
504 * Allocate interrupts
506 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
507 ixgbe_error(ixgbe, "Failed to allocate interrupts");
508 goto attach_fail;
510 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
513 * Allocate rx/tx rings based on the ring numbers.
514 * The actual numbers of rx/tx rings are decided by the number of
515 * allocated interrupt vectors, so we should allocate the rings after
516 * interrupts are allocated.
518 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
519 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
520 goto attach_fail;
522 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
525 * Map rings to interrupt vectors
527 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
528 ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
529 goto attach_fail;
533 * Add interrupt handlers
535 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
536 ixgbe_error(ixgbe, "Failed to add interrupt handlers");
537 goto attach_fail;
539 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
542 * Create a taskq for sfp-change
544 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance);
545 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
546 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
547 ixgbe_error(ixgbe, "sfp_taskq create failed");
548 goto attach_fail;
550 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
553 * Create a taskq for over-temp
555 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance);
556 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname,
557 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
558 ixgbe_error(ixgbe, "overtemp_taskq create failed");
559 goto attach_fail;
561 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ;
564 * Create a taskq for processing external PHY interrupts
566 (void) sprintf(taskqname, "ixgbe%d_phy_taskq", instance);
567 if ((ixgbe->phy_taskq = ddi_taskq_create(devinfo, taskqname,
568 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
569 ixgbe_error(ixgbe, "phy_taskq create failed");
570 goto attach_fail;
572 ixgbe->attach_progress |= ATTACH_PROGRESS_PHY_TASKQ;
575 * Initialize driver parameters
577 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
578 ixgbe_error(ixgbe, "Failed to initialize driver settings");
579 goto attach_fail;
583 * Initialize mutexes for this device.
584 * Do this before enabling the interrupt handler and
585 * register the softint to avoid the condition where
586 * interrupt handler can try using uninitialized mutex.
588 ixgbe_init_locks(ixgbe);
589 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
592 * Initialize chipset hardware
594 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
595 ixgbe_error(ixgbe, "Failed to initialize adapter");
596 goto attach_fail;
598 ixgbe->link_check_complete = B_FALSE;
599 ixgbe->link_check_hrtime = gethrtime() +
600 (IXGBE_LINK_UP_TIME * 100000000ULL);
601 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
603 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
604 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
605 goto attach_fail;
609 * Initialize adapter capabilities
611 ixgbe_init_params(ixgbe);
614 * Initialize statistics
616 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
617 ixgbe_error(ixgbe, "Failed to initialize statistics");
618 goto attach_fail;
620 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
623 * Register the driver to the MAC
625 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
626 ixgbe_error(ixgbe, "Failed to register MAC");
627 goto attach_fail;
629 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
630 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
632 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
633 IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
634 if (ixgbe->periodic_id == 0) {
635 ixgbe_error(ixgbe, "Failed to add the link check timer");
636 goto attach_fail;
638 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
641 * Now that mutex locks are initialized, and the chip is also
642 * initialized, enable interrupts.
644 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
645 ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
646 goto attach_fail;
648 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
650 ixgbe_log(ixgbe, "%s", ixgbe_ident);
651 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
653 return (DDI_SUCCESS);
655 attach_fail:
656 ixgbe_unconfigure(devinfo, ixgbe);
657 return (DDI_FAILURE);
661 * ixgbe_detach - Driver detach.
663 * The detach() function is the complement of the attach routine.
664 * If cmd is set to DDI_DETACH, detach() is used to remove the
665 * state associated with a given instance of a device node
666 * prior to the removal of that instance from the system.
668 * The detach() function will be called once for each instance
669 * of the device for which there has been a successful attach()
670 * once there are no longer any opens on the device.
672 * Interrupts routine are disabled, All memory allocated by this
673 * driver are freed.
675 static int
676 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
678 ixgbe_t *ixgbe;
681 * Check detach command
683 switch (cmd) {
684 default:
685 return (DDI_FAILURE);
687 case DDI_SUSPEND:
688 return (ixgbe_suspend(devinfo));
690 case DDI_DETACH:
691 break;
695 * Get the pointer to the driver private data structure
697 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
698 if (ixgbe == NULL)
699 return (DDI_FAILURE);
702 * If the device is still running, it needs to be stopped first.
703 * This check is necessary because under some specific circumstances,
704 * the detach routine can be called without stopping the interface
705 * first.
707 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
708 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
709 mutex_enter(&ixgbe->gen_lock);
710 ixgbe_stop(ixgbe, B_TRUE);
711 mutex_exit(&ixgbe->gen_lock);
712 /* Disable and stop the watchdog timer */
713 ixgbe_disable_watchdog_timer(ixgbe);
717 * Check if there are still rx buffers held by the upper layer.
718 * If so, fail the detach.
720 if (!ixgbe_rx_drain(ixgbe))
721 return (DDI_FAILURE);
724 * Do the remaining unconfigure routines
726 ixgbe_unconfigure(devinfo, ixgbe);
728 return (DDI_SUCCESS);
732 * quiesce(9E) entry point.
734 * This function is called when the system is single-threaded at high
735 * PIL with preemption disabled. Therefore, this function must not be
736 * blocked.
738 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
739 * DDI_FAILURE indicates an error condition and should almost never happen.
741 static int
742 ixgbe_quiesce(dev_info_t *devinfo)
744 ixgbe_t *ixgbe;
745 struct ixgbe_hw *hw;
747 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
749 if (ixgbe == NULL)
750 return (DDI_FAILURE);
752 hw = &ixgbe->hw;
755 * Disable the adapter interrupts
757 ixgbe_disable_adapter_interrupts(ixgbe);
760 * Tell firmware driver is no longer in control
762 ixgbe_release_driver_control(hw);
765 * Reset the chipset
767 (void) ixgbe_reset_hw(hw);
770 * Reset PHY
772 (void) ixgbe_reset_phy(hw);
774 return (DDI_SUCCESS);
777 static void
778 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
781 * Disable interrupt
783 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
784 (void) ixgbe_disable_intrs(ixgbe);
788 * remove the link check timer
790 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
791 if (ixgbe->periodic_id != NULL) {
792 ddi_periodic_delete(ixgbe->periodic_id);
793 ixgbe->periodic_id = NULL;
798 * Unregister MAC
800 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
801 (void) mac_unregister(ixgbe->mac_hdl);
805 * Free statistics
807 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
808 kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
812 * Remove interrupt handlers
814 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
815 ixgbe_rem_intr_handlers(ixgbe);
819 * Remove taskq for sfp-status-change
821 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
822 ddi_taskq_destroy(ixgbe->sfp_taskq);
826 * Remove taskq for over-temp
828 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) {
829 ddi_taskq_destroy(ixgbe->overtemp_taskq);
833 * Remove taskq for external PHYs
835 if (ixgbe->attach_progress & ATTACH_PROGRESS_PHY_TASKQ) {
836 ddi_taskq_destroy(ixgbe->phy_taskq);
840 * Remove interrupts
842 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
843 ixgbe_rem_intrs(ixgbe);
847 * Unregister interrupt callback handler
849 (void) ddi_cb_unregister(ixgbe->cb_hdl);
852 * Remove driver properties
854 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
855 (void) ddi_prop_remove_all(devinfo);
859 * Stop the chipset
861 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
862 mutex_enter(&ixgbe->gen_lock);
863 ixgbe_chip_stop(ixgbe);
864 mutex_exit(&ixgbe->gen_lock);
868 * Free register handle
870 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
871 if (ixgbe->osdep.reg_handle != NULL)
872 ddi_regs_map_free(&ixgbe->osdep.reg_handle);
876 * Free PCI config handle
878 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
879 if (ixgbe->osdep.cfg_handle != NULL)
880 pci_config_teardown(&ixgbe->osdep.cfg_handle);
884 * Free locks
886 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
887 ixgbe_destroy_locks(ixgbe);
891 * Free the rx/tx rings
893 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
894 ixgbe_free_rings(ixgbe);
898 * Unregister FMA capabilities
900 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
901 ixgbe_fm_fini(ixgbe);
905 * Free the driver data structure
907 kmem_free(ixgbe, sizeof (ixgbe_t));
909 ddi_set_driver_private(devinfo, NULL);
913 * ixgbe_register_mac - Register the driver and its function pointers with
914 * the GLD interface.
916 static int
917 ixgbe_register_mac(ixgbe_t *ixgbe)
919 struct ixgbe_hw *hw = &ixgbe->hw;
920 mac_register_t *mac;
921 int status;
923 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
924 return (IXGBE_FAILURE);
926 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
927 mac->m_driver = ixgbe;
928 mac->m_dip = ixgbe->dip;
929 mac->m_src_addr = hw->mac.addr;
930 mac->m_callbacks = &ixgbe_m_callbacks;
931 mac->m_min_sdu = 0;
932 mac->m_max_sdu = ixgbe->default_mtu;
933 mac->m_margin = VLAN_TAGSZ;
934 mac->m_priv_props = ixgbe_priv_props;
935 mac->m_v12n = MAC_VIRT_LEVEL1;
937 status = mac_register(mac, &ixgbe->mac_hdl);
939 mac_free(mac);
941 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
945 * ixgbe_identify_hardware - Identify the type of the chipset.
947 static int
948 ixgbe_identify_hardware(ixgbe_t *ixgbe)
950 struct ixgbe_hw *hw = &ixgbe->hw;
951 struct ixgbe_osdep *osdep = &ixgbe->osdep;
954 * Get the device id
956 hw->vendor_id =
957 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
958 hw->device_id =
959 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
960 hw->revision_id =
961 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
962 hw->subsystem_device_id =
963 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
964 hw->subsystem_vendor_id =
965 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
968 * Set the mac type of the adapter based on the device id
970 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
971 return (IXGBE_FAILURE);
975 * Install adapter capabilities
977 switch (hw->mac.type) {
978 case ixgbe_mac_82598EB:
979 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n");
980 ixgbe->capab = &ixgbe_82598eb_cap;
982 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
983 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
984 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
985 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
987 break;
989 case ixgbe_mac_82599EB:
990 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
991 ixgbe->capab = &ixgbe_82599eb_cap;
993 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
994 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
995 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
996 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
998 break;
1000 case ixgbe_mac_X540:
1001 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
1002 ixgbe->capab = &ixgbe_X540_cap;
1004 * For now, X540 is all set in its capab structure.
1005 * As other X540 variants show up, things can change here.
1007 break;
1009 case ixgbe_mac_X550:
1010 case ixgbe_mac_X550EM_x:
1011 IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n");
1012 ixgbe->capab = &ixgbe_X550_cap;
1014 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1015 ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE;
1018 * Link detection on X552 SFP+ and X552/X557-AT
1020 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1021 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
1022 ixgbe->capab->other_intr |=
1023 IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
1024 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN_X540;
1026 break;
1028 default:
1029 IXGBE_DEBUGLOG_1(ixgbe,
1030 "adapter not supported in ixgbe_identify_hardware(): %d\n",
1031 hw->mac.type);
1032 return (IXGBE_FAILURE);
1035 return (IXGBE_SUCCESS);
1039 * ixgbe_regs_map - Map the device registers.
1042 static int
1043 ixgbe_regs_map(ixgbe_t *ixgbe)
1045 dev_info_t *devinfo = ixgbe->dip;
1046 struct ixgbe_hw *hw = &ixgbe->hw;
1047 struct ixgbe_osdep *osdep = &ixgbe->osdep;
1048 off_t mem_size;
1051 * First get the size of device registers to be mapped.
1053 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
1054 != DDI_SUCCESS) {
1055 return (IXGBE_FAILURE);
1059 * Call ddi_regs_map_setup() to map registers
1061 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
1062 (caddr_t *)&hw->hw_addr, 0,
1063 mem_size, &ixgbe_regs_acc_attr,
1064 &osdep->reg_handle)) != DDI_SUCCESS) {
1065 return (IXGBE_FAILURE);
1068 return (IXGBE_SUCCESS);
1072 * ixgbe_init_properties - Initialize driver properties.
1074 static void
1075 ixgbe_init_properties(ixgbe_t *ixgbe)
1078 * Get conf file properties, including link settings
1079 * jumbo frames, ring number, descriptor number, etc.
1081 ixgbe_get_conf(ixgbe);
1085 * ixgbe_init_driver_settings - Initialize driver settings.
1087 * The settings include hardware function pointers, bus information,
1088 * rx/tx rings settings, link state, and any other parameters that
1089 * need to be setup during driver initialization.
1091 static int
1092 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
1094 struct ixgbe_hw *hw = &ixgbe->hw;
1095 dev_info_t *devinfo = ixgbe->dip;
1096 ixgbe_rx_ring_t *rx_ring;
1097 ixgbe_rx_group_t *rx_group;
1098 ixgbe_tx_ring_t *tx_ring;
1099 uint32_t rx_size;
1100 uint32_t tx_size;
1101 uint32_t ring_per_group;
1102 int i;
1105 * Initialize chipset specific hardware function pointers
1107 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
1108 return (IXGBE_FAILURE);
1112 * Get the system page size
1114 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
1117 * Set rx buffer size
1119 * The IP header alignment room is counted in the calculation.
1120 * The rx buffer size is in unit of 1K that is required by the
1121 * chipset hardware.
1123 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
1124 ixgbe->rx_buf_size = ((rx_size >> 10) +
1125 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1128 * Set tx buffer size
1130 tx_size = ixgbe->max_frame_size;
1131 ixgbe->tx_buf_size = ((tx_size >> 10) +
1132 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1135 * Initialize rx/tx rings/groups parameters
1137 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
1138 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1139 rx_ring = &ixgbe->rx_rings[i];
1140 rx_ring->index = i;
1141 rx_ring->ixgbe = ixgbe;
1142 rx_ring->group_index = i / ring_per_group;
1143 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
1146 for (i = 0; i < ixgbe->num_rx_groups; i++) {
1147 rx_group = &ixgbe->rx_groups[i];
1148 rx_group->index = i;
1149 rx_group->ixgbe = ixgbe;
1152 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1153 tx_ring = &ixgbe->tx_rings[i];
1154 tx_ring->index = i;
1155 tx_ring->ixgbe = ixgbe;
1156 if (ixgbe->tx_head_wb_enable)
1157 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1158 else
1159 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1161 tx_ring->ring_size = ixgbe->tx_ring_size;
1162 tx_ring->free_list_size = ixgbe->tx_ring_size +
1163 (ixgbe->tx_ring_size >> 1);
1167 * Initialize values of interrupt throttling rate
1169 for (i = 1; i < MAX_INTR_VECTOR; i++)
1170 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
1173 * The initial link state should be "unknown"
1175 ixgbe->link_state = LINK_STATE_UNKNOWN;
1177 return (IXGBE_SUCCESS);
1181 * ixgbe_init_locks - Initialize locks.
1183 static void
1184 ixgbe_init_locks(ixgbe_t *ixgbe)
1186 ixgbe_rx_ring_t *rx_ring;
1187 ixgbe_tx_ring_t *tx_ring;
1188 int i;
1190 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1191 rx_ring = &ixgbe->rx_rings[i];
1192 mutex_init(&rx_ring->rx_lock, NULL,
1193 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1196 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1197 tx_ring = &ixgbe->tx_rings[i];
1198 mutex_init(&tx_ring->tx_lock, NULL,
1199 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1200 mutex_init(&tx_ring->recycle_lock, NULL,
1201 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1202 mutex_init(&tx_ring->tcb_head_lock, NULL,
1203 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1204 mutex_init(&tx_ring->tcb_tail_lock, NULL,
1205 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1208 mutex_init(&ixgbe->gen_lock, NULL,
1209 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1211 mutex_init(&ixgbe->watchdog_lock, NULL,
1212 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1216 * ixgbe_destroy_locks - Destroy locks.
1218 static void
1219 ixgbe_destroy_locks(ixgbe_t *ixgbe)
1221 ixgbe_rx_ring_t *rx_ring;
1222 ixgbe_tx_ring_t *tx_ring;
1223 int i;
1225 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1226 rx_ring = &ixgbe->rx_rings[i];
1227 mutex_destroy(&rx_ring->rx_lock);
1230 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1231 tx_ring = &ixgbe->tx_rings[i];
1232 mutex_destroy(&tx_ring->tx_lock);
1233 mutex_destroy(&tx_ring->recycle_lock);
1234 mutex_destroy(&tx_ring->tcb_head_lock);
1235 mutex_destroy(&tx_ring->tcb_tail_lock);
1238 mutex_destroy(&ixgbe->gen_lock);
1239 mutex_destroy(&ixgbe->watchdog_lock);
1243 * We need to try and determine which LED index in hardware corresponds to the
1244 * link/activity LED. This is the one that'll be overwritten when we perform
1245 * GLDv3 LED activity.
1247 static void
1248 ixgbe_led_init(ixgbe_t *ixgbe)
1250 uint32_t reg, i;
1251 struct ixgbe_hw *hw = &ixgbe->hw;
1253 reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1254 for (i = 0; i < 4; i++) {
1255 if (((reg >> IXGBE_LED_MODE_SHIFT(i)) &
1256 IXGBE_LED_MODE_MASK_BASE) == IXGBE_LED_LINK_ACTIVE) {
1257 ixgbe->ixgbe_led_index = i;
1258 return;
1263 * If we couldn't determine this, we use the default for various MACs
1264 * based on information Intel has inserted into other drivers over the
1265 * years. Note, when we have support for the X553 which should add the
1266 * ixgbe_x550_em_a mac type, that should be at index 0.
1268 switch (hw->mac.type) {
1269 case ixgbe_mac_X550EM_x:
1270 ixgbe->ixgbe_led_index = 1;
1271 break;
1272 default:
1273 ixgbe->ixgbe_led_index = 2;
1274 break;
1278 static int
1279 ixgbe_resume(dev_info_t *devinfo)
1281 ixgbe_t *ixgbe;
1282 int i;
1284 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1285 if (ixgbe == NULL)
1286 return (DDI_FAILURE);
1288 mutex_enter(&ixgbe->gen_lock);
1290 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1291 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1292 mutex_exit(&ixgbe->gen_lock);
1293 return (DDI_FAILURE);
1297 * Enable and start the watchdog timer
1299 ixgbe_enable_watchdog_timer(ixgbe);
1302 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1304 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1305 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1306 mac_tx_ring_update(ixgbe->mac_hdl,
1307 ixgbe->tx_rings[i].ring_handle);
1311 mutex_exit(&ixgbe->gen_lock);
1313 return (DDI_SUCCESS);
1316 static int
1317 ixgbe_suspend(dev_info_t *devinfo)
1319 ixgbe_t *ixgbe;
1321 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1322 if (ixgbe == NULL)
1323 return (DDI_FAILURE);
1325 mutex_enter(&ixgbe->gen_lock);
1327 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1328 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1329 mutex_exit(&ixgbe->gen_lock);
1330 return (DDI_SUCCESS);
1332 ixgbe_stop(ixgbe, B_FALSE);
1334 mutex_exit(&ixgbe->gen_lock);
1337 * Disable and stop the watchdog timer
1339 ixgbe_disable_watchdog_timer(ixgbe);
1341 return (DDI_SUCCESS);
1345 * ixgbe_init - Initialize the device.
1347 static int
1348 ixgbe_init(ixgbe_t *ixgbe)
1350 struct ixgbe_hw *hw = &ixgbe->hw;
1351 u8 pbanum[IXGBE_PBANUM_LENGTH];
1352 int rv;
1354 mutex_enter(&ixgbe->gen_lock);
1357 * Configure/Initialize hardware
1359 rv = ixgbe_init_hw(hw);
1360 if (rv != IXGBE_SUCCESS) {
1361 switch (rv) {
1364 * The first three errors are not prohibitive to us progressing
1365 * further, and are maily advisory in nature. In the case of a
1366 * SFP module not being present or not deemed supported by the
1367 * common code, we adivse the operator of this fact but carry on
1368 * instead of failing hard, as SFPs can be inserted or replaced
1369 * while the driver is running. In the case of a unknown error,
1370 * we fail-hard, logging the reason and emitting a FMA event.
1372 case IXGBE_ERR_EEPROM_VERSION:
1373 ixgbe_error(ixgbe,
1374 "This Intel 10Gb Ethernet device is pre-release and"
1375 " contains outdated firmware. Please contact your"
1376 " hardware vendor for a replacement.");
1377 break;
1378 case IXGBE_ERR_SFP_NOT_PRESENT:
1379 ixgbe_error(ixgbe,
1380 "No SFP+ module detected on this interface. Please "
1381 "install a supported SFP+ module for this "
1382 "interface to become operational.");
1383 break;
1384 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1385 ixgbe_error(ixgbe,
1386 "Unsupported SFP+ module detected. Please replace "
1387 "it with a supported SFP+ module per Intel "
1388 "documentation, or bypass this check with "
1389 "allow_unsupported_sfp=1 in ixgbe.conf.");
1390 break;
1391 default:
1392 ixgbe_error(ixgbe,
1393 "Failed to initialize hardware. ixgbe_init_hw "
1394 "returned %d", rv);
1395 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1396 goto init_fail;
1401 * Need to init eeprom before validating the checksum.
1403 if (ixgbe_init_eeprom_params(hw) < 0) {
1404 ixgbe_error(ixgbe,
1405 "Unable to intitialize the eeprom interface.");
1406 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1407 goto init_fail;
1411 * NVM validation
1413 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1415 * Some PCI-E parts fail the first check due to
1416 * the link being in sleep state. Call it again,
1417 * if it fails a second time it's a real issue.
1419 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1420 ixgbe_error(ixgbe,
1421 "Invalid NVM checksum. Please contact "
1422 "the vendor to update the NVM.");
1423 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1424 goto init_fail;
1429 * Setup default flow control thresholds - enable/disable
1430 * & flow control type is controlled by ixgbe.conf
1432 hw->fc.high_water[0] = DEFAULT_FCRTH;
1433 hw->fc.low_water[0] = DEFAULT_FCRTL;
1434 hw->fc.pause_time = DEFAULT_FCPAUSE;
1435 hw->fc.send_xon = B_TRUE;
1438 * Initialize flow control
1440 (void) ixgbe_start_hw(hw);
1443 * Initialize link settings
1445 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1448 * Initialize the chipset hardware
1450 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1451 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1452 goto init_fail;
1456 * Read identifying information and place in devinfo.
1458 pbanum[0] = '\0';
1459 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum));
1460 if (*pbanum != '\0') {
1461 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip,
1462 "printed-board-assembly", (char *)pbanum);
1466 * Determine LED index.
1468 ixgbe_led_init(ixgbe);
1470 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1471 goto init_fail;
1474 mutex_exit(&ixgbe->gen_lock);
1475 return (IXGBE_SUCCESS);
1477 init_fail:
1479 * Reset PHY
1481 (void) ixgbe_reset_phy(hw);
1483 mutex_exit(&ixgbe->gen_lock);
1484 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1485 return (IXGBE_FAILURE);
1489 * ixgbe_chip_start - Initialize and start the chipset hardware.
1491 static int
1492 ixgbe_chip_start(ixgbe_t *ixgbe)
1494 struct ixgbe_hw *hw = &ixgbe->hw;
1495 int i;
1497 ASSERT(mutex_owned(&ixgbe->gen_lock));
1500 * Get the mac address
1501 * This function should handle SPARC case correctly.
1503 if (!ixgbe_find_mac_address(ixgbe)) {
1504 ixgbe_error(ixgbe, "Failed to get the mac address");
1505 return (IXGBE_FAILURE);
1509 * Validate the mac address
1511 (void) ixgbe_init_rx_addrs(hw);
1512 if (!is_valid_mac_addr(hw->mac.addr)) {
1513 ixgbe_error(ixgbe, "Invalid mac address");
1514 return (IXGBE_FAILURE);
1518 * Re-enable relaxed ordering for performance. It is disabled
1519 * by default in the hardware init.
1521 if (ixgbe->relax_order_enable == B_TRUE)
1522 ixgbe_enable_relaxed_ordering(hw);
1525 * Setup adapter interrupt vectors
1527 ixgbe_setup_adapter_vector(ixgbe);
1530 * Initialize unicast addresses.
1532 ixgbe_init_unicst(ixgbe);
1535 * Setup and initialize the mctable structures.
1537 ixgbe_setup_multicst(ixgbe);
1540 * Set interrupt throttling rate
1542 for (i = 0; i < ixgbe->intr_cnt; i++) {
1543 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1547 * Disable Wake-on-LAN
1549 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
1552 * Some adapters offer Energy Efficient Ethernet (EEE) support.
1553 * Due to issues with EEE in e1000g/igb, we disable this by default
1554 * as a precautionary measure.
1556 * Currently, the only known adapter which supports EEE in the ixgbe
1557 * line is 8086,15AB (IXGBE_DEV_ID_X550EM_X_KR), and only after the
1558 * first revision of it, as well as any X550 with MAC type 6 (non-EM)
1560 (void) ixgbe_setup_eee(hw, B_FALSE);
1563 * Turn on any present SFP Tx laser
1565 ixgbe_enable_tx_laser(hw);
1568 * Power on the PHY
1570 (void) ixgbe_set_phy_power(hw, B_TRUE);
1573 * Save the state of the PHY
1575 ixgbe_get_hw_state(ixgbe);
1578 * Make sure driver has control
1580 ixgbe_get_driver_control(hw);
1582 return (IXGBE_SUCCESS);
1586 * ixgbe_chip_stop - Stop the chipset hardware
1588 static void
1589 ixgbe_chip_stop(ixgbe_t *ixgbe)
1591 struct ixgbe_hw *hw = &ixgbe->hw;
1592 int rv;
1594 ASSERT(mutex_owned(&ixgbe->gen_lock));
1597 * Stop interupt generation and disable Tx unit
1599 hw->adapter_stopped = B_FALSE;
1600 (void) ixgbe_stop_adapter(hw);
1603 * Reset the chipset
1605 (void) ixgbe_reset_hw(hw);
1608 * Reset PHY
1610 (void) ixgbe_reset_phy(hw);
1613 * Enter LPLU (Low Power, Link Up) mode, if available. Avoid resetting
1614 * the PHY while doing so. Else, just power down the PHY.
1616 if (hw->phy.ops.enter_lplu != NULL) {
1617 hw->phy.reset_disable = B_TRUE;
1618 rv = hw->phy.ops.enter_lplu(hw);
1619 if (rv != IXGBE_SUCCESS)
1620 ixgbe_error(ixgbe, "Error while entering LPLU: %d", rv);
1621 hw->phy.reset_disable = B_FALSE;
1622 } else {
1623 (void) ixgbe_set_phy_power(hw, B_FALSE);
1627 * Turn off any present SFP Tx laser
1628 * Expected for health and safety reasons
1630 ixgbe_disable_tx_laser(hw);
1633 * Tell firmware driver is no longer in control
1635 ixgbe_release_driver_control(hw);
1640 * ixgbe_reset - Reset the chipset and re-start the driver.
1642 * It involves stopping and re-starting the chipset,
1643 * and re-configuring the rx/tx rings.
1645 static int
1646 ixgbe_reset(ixgbe_t *ixgbe)
1648 int i;
1651 * Disable and stop the watchdog timer
1653 ixgbe_disable_watchdog_timer(ixgbe);
1655 mutex_enter(&ixgbe->gen_lock);
1657 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1658 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1660 ixgbe_stop(ixgbe, B_FALSE);
1662 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1663 mutex_exit(&ixgbe->gen_lock);
1664 return (IXGBE_FAILURE);
1668 * After resetting, need to recheck the link status.
1670 ixgbe->link_check_complete = B_FALSE;
1671 ixgbe->link_check_hrtime = gethrtime() +
1672 (IXGBE_LINK_UP_TIME * 100000000ULL);
1674 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1676 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1677 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1678 mac_tx_ring_update(ixgbe->mac_hdl,
1679 ixgbe->tx_rings[i].ring_handle);
1683 mutex_exit(&ixgbe->gen_lock);
1686 * Enable and start the watchdog timer
1688 ixgbe_enable_watchdog_timer(ixgbe);
1690 return (IXGBE_SUCCESS);
1694 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1696 static void
1697 ixgbe_tx_clean(ixgbe_t *ixgbe)
1699 ixgbe_tx_ring_t *tx_ring;
1700 tx_control_block_t *tcb;
1701 link_list_t pending_list;
1702 uint32_t desc_num;
1703 int i, j;
1705 LINK_LIST_INIT(&pending_list);
1707 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1708 tx_ring = &ixgbe->tx_rings[i];
1710 mutex_enter(&tx_ring->recycle_lock);
1713 * Clean the pending tx data - the pending packets in the
1714 * work_list that have no chances to be transmitted again.
1716 * We must ensure the chipset is stopped or the link is down
1717 * before cleaning the transmit packets.
1719 desc_num = 0;
1720 for (j = 0; j < tx_ring->ring_size; j++) {
1721 tcb = tx_ring->work_list[j];
1722 if (tcb != NULL) {
1723 desc_num += tcb->desc_num;
1725 tx_ring->work_list[j] = NULL;
1727 ixgbe_free_tcb(tcb);
1729 LIST_PUSH_TAIL(&pending_list, &tcb->link);
1733 if (desc_num > 0) {
1734 atomic_add_32(&tx_ring->tbd_free, desc_num);
1735 ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1738 * Reset the head and tail pointers of the tbd ring;
1739 * Reset the writeback head if it's enable.
1741 tx_ring->tbd_head = 0;
1742 tx_ring->tbd_tail = 0;
1743 if (ixgbe->tx_head_wb_enable)
1744 *tx_ring->tbd_head_wb = 0;
1746 IXGBE_WRITE_REG(&ixgbe->hw,
1747 IXGBE_TDH(tx_ring->index), 0);
1748 IXGBE_WRITE_REG(&ixgbe->hw,
1749 IXGBE_TDT(tx_ring->index), 0);
1752 mutex_exit(&tx_ring->recycle_lock);
1755 * Add the tx control blocks in the pending list to
1756 * the free list.
1758 ixgbe_put_free_list(tx_ring, &pending_list);
1763 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1764 * transmitted.
1766 static bool
1767 ixgbe_tx_drain(ixgbe_t *ixgbe)
1769 ixgbe_tx_ring_t *tx_ring;
1770 bool done;
1771 int i, j;
1774 * Wait for a specific time to allow pending tx packets
1775 * to be transmitted.
1777 * Check the counter tbd_free to see if transmission is done.
1778 * No lock protection is needed here.
1780 * Return B_TRUE if all pending packets have been transmitted;
1781 * Otherwise return B_FALSE;
1783 for (i = 0; i < TX_DRAIN_TIME; i++) {
1785 done = B_TRUE;
1786 for (j = 0; j < ixgbe->num_tx_rings; j++) {
1787 tx_ring = &ixgbe->tx_rings[j];
1788 done = done &&
1789 (tx_ring->tbd_free == tx_ring->ring_size);
1792 if (done)
1793 break;
1795 msec_delay(1);
1798 return (done);
1802 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1804 static bool
1805 ixgbe_rx_drain(ixgbe_t *ixgbe)
1807 bool done = B_TRUE;
1808 int i;
1811 * Polling the rx free list to check if those rx buffers held by
1812 * the upper layer are released.
1814 * Check the counter rcb_free to see if all pending buffers are
1815 * released. No lock protection is needed here.
1817 * Return B_TRUE if all pending buffers have been released;
1818 * Otherwise return B_FALSE;
1820 for (i = 0; i < RX_DRAIN_TIME; i++) {
1821 done = (ixgbe->rcb_pending == 0);
1823 if (done)
1824 break;
1826 msec_delay(1);
1829 return (done);
1833 * ixgbe_start - Start the driver/chipset.
1836 ixgbe_start(ixgbe_t *ixgbe, bool alloc_buffer)
1838 struct ixgbe_hw *hw = &ixgbe->hw;
1839 int i;
1841 ASSERT(mutex_owned(&ixgbe->gen_lock));
1843 if (alloc_buffer) {
1844 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1845 ixgbe_error(ixgbe,
1846 "Failed to allocate software receive rings");
1847 return (IXGBE_FAILURE);
1850 /* Allocate buffers for all the rx/tx rings */
1851 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1852 ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1853 return (IXGBE_FAILURE);
1856 ixgbe->tx_ring_init = B_TRUE;
1857 } else {
1858 ixgbe->tx_ring_init = B_FALSE;
1861 for (i = 0; i < ixgbe->num_rx_rings; i++)
1862 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1863 for (i = 0; i < ixgbe->num_tx_rings; i++)
1864 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1867 * Start the chipset hardware
1869 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1870 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1871 goto start_failure;
1875 * Configure link now for X550
1877 * X550 possesses a LPLU (Low-Power Link Up) mode which keeps the
1878 * resting state of the adapter at a 1Gb FDX speed. Prior to the X550,
1879 * the resting state of the link would be the maximum speed that
1880 * autonegotiation will allow (usually 10Gb, infrastructure allowing)
1881 * so we never bothered with explicitly setting the link to 10Gb as it
1882 * would already be at that state on driver attach. With X550, we must
1883 * trigger a re-negotiation of the link in order to switch from a LPLU
1884 * 1Gb link to 10Gb (cable and link partner permitting.)
1886 if (hw->mac.type == ixgbe_mac_X550 ||
1887 hw->mac.type == ixgbe_mac_X550EM_x) {
1888 (void) ixgbe_driver_setup_link(ixgbe, B_TRUE);
1889 ixgbe_get_hw_state(ixgbe);
1892 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1893 goto start_failure;
1897 * Setup the rx/tx rings
1899 ixgbe_setup_rings(ixgbe);
1902 * ixgbe_start() will be called when resetting, however if reset
1903 * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1904 * before enabling the interrupts.
1906 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1907 | IXGBE_STALL| IXGBE_OVERTEMP));
1910 * Enable adapter interrupts
1911 * The interrupts must be enabled after the driver state is START
1913 ixgbe_enable_adapter_interrupts(ixgbe);
1915 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1916 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1917 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1918 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1920 return (IXGBE_SUCCESS);
1922 start_failure:
1923 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1924 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1925 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1926 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1928 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1930 return (IXGBE_FAILURE);
1934 * ixgbe_stop - Stop the driver/chipset.
1936 void
1937 ixgbe_stop(ixgbe_t *ixgbe, bool free_buffer)
1939 int i;
1941 ASSERT(mutex_owned(&ixgbe->gen_lock));
1944 * Disable the adapter interrupts
1946 ixgbe_disable_adapter_interrupts(ixgbe);
1949 * Drain the pending tx packets
1951 (void) ixgbe_tx_drain(ixgbe);
1953 for (i = 0; i < ixgbe->num_rx_rings; i++)
1954 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1955 for (i = 0; i < ixgbe->num_tx_rings; i++)
1956 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1959 * Stop the chipset hardware
1961 ixgbe_chip_stop(ixgbe);
1963 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1964 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1968 * Clean the pending tx data/resources
1970 ixgbe_tx_clean(ixgbe);
1972 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1973 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1974 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1975 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1977 if (ixgbe->link_state == LINK_STATE_UP) {
1978 ixgbe->link_state = LINK_STATE_UNKNOWN;
1979 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1982 if (free_buffer) {
1984 * Release the DMA/memory resources of rx/tx rings
1986 ixgbe_free_dma(ixgbe);
1987 ixgbe_free_rx_data(ixgbe);
1992 * ixgbe_cbfunc - Driver interface for generic DDI callbacks
1994 /* ARGSUSED */
1995 static int
1996 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
1997 void *arg1, void *arg2)
1999 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
2001 switch (cbaction) {
2002 /* IRM callback */
2003 int count;
2004 case DDI_CB_INTR_ADD:
2005 case DDI_CB_INTR_REMOVE:
2006 count = (int)(uintptr_t)cbarg;
2007 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX);
2008 DTRACE_PROBE2(ixgbe__irm__callback, int, count,
2009 int, ixgbe->intr_cnt);
2010 if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
2011 DDI_SUCCESS) {
2012 ixgbe_error(ixgbe,
2013 "IRM CB: Failed to adjust interrupts");
2014 goto cb_fail;
2016 break;
2017 default:
2018 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported",
2019 cbaction);
2020 return (DDI_ENOTSUP);
2022 return (DDI_SUCCESS);
2023 cb_fail:
2024 return (DDI_FAILURE);
2028 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request.
2030 static int
2031 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count)
2033 int i, rc, actual;
2035 if (count == 0)
2036 return (DDI_SUCCESS);
2038 if ((cbaction == DDI_CB_INTR_ADD &&
2039 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) ||
2040 (cbaction == DDI_CB_INTR_REMOVE &&
2041 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min))
2042 return (DDI_FAILURE);
2044 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
2045 return (DDI_FAILURE);
2048 for (i = 0; i < ixgbe->num_rx_rings; i++)
2049 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL);
2050 for (i = 0; i < ixgbe->num_tx_rings; i++)
2051 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL);
2053 mutex_enter(&ixgbe->gen_lock);
2054 ixgbe->ixgbe_state &= ~IXGBE_STARTED;
2055 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST;
2056 ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
2057 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
2059 ixgbe_stop(ixgbe, B_FALSE);
2061 * Disable interrupts
2063 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
2064 rc = ixgbe_disable_intrs(ixgbe);
2065 ASSERT(rc == IXGBE_SUCCESS);
2067 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR;
2070 * Remove interrupt handlers
2072 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
2073 ixgbe_rem_intr_handlers(ixgbe);
2075 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR;
2078 * Clear vect_map
2080 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
2081 switch (cbaction) {
2082 case DDI_CB_INTR_ADD:
2083 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable,
2084 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual,
2085 DDI_INTR_ALLOC_NORMAL);
2086 if (rc != DDI_SUCCESS || actual != count) {
2087 ixgbe_log(ixgbe, "Adjust interrupts failed."
2088 "return: %d, irm cb size: %d, actual: %d",
2089 rc, count, actual);
2090 goto intr_adjust_fail;
2092 ixgbe->intr_cnt += count;
2093 break;
2095 case DDI_CB_INTR_REMOVE:
2096 for (i = ixgbe->intr_cnt - count;
2097 i < ixgbe->intr_cnt; i ++) {
2098 rc = ddi_intr_free(ixgbe->htable[i]);
2099 ixgbe->htable[i] = NULL;
2100 if (rc != DDI_SUCCESS) {
2101 ixgbe_log(ixgbe, "Adjust interrupts failed."
2102 "return: %d, irm cb size: %d, actual: %d",
2103 rc, count, actual);
2104 goto intr_adjust_fail;
2107 ixgbe->intr_cnt -= count;
2108 break;
2112 * Get priority for first vector, assume remaining are all the same
2114 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
2115 if (rc != DDI_SUCCESS) {
2116 ixgbe_log(ixgbe,
2117 "Get interrupt priority failed: %d", rc);
2118 goto intr_adjust_fail;
2120 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
2121 if (rc != DDI_SUCCESS) {
2122 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc);
2123 goto intr_adjust_fail;
2125 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
2128 * Map rings to interrupt vectors
2130 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
2131 ixgbe_error(ixgbe,
2132 "IRM CB: Failed to map interrupts to vectors");
2133 goto intr_adjust_fail;
2137 * Add interrupt handlers
2139 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
2140 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers");
2141 goto intr_adjust_fail;
2143 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
2146 * Now that mutex locks are initialized, and the chip is also
2147 * initialized, enable interrupts.
2149 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
2150 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts");
2151 goto intr_adjust_fail;
2153 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
2154 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
2155 ixgbe_error(ixgbe, "IRM CB: Failed to start");
2156 goto intr_adjust_fail;
2158 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST;
2159 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
2160 ixgbe->ixgbe_state |= IXGBE_STARTED;
2161 mutex_exit(&ixgbe->gen_lock);
2163 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2164 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle,
2165 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]);
2167 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2168 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle,
2169 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]);
2172 /* Wakeup all Tx rings */
2173 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2174 mac_tx_ring_update(ixgbe->mac_hdl,
2175 ixgbe->tx_rings[i].ring_handle);
2178 IXGBE_DEBUGLOG_3(ixgbe,
2179 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).",
2180 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max);
2181 return (DDI_SUCCESS);
2183 intr_adjust_fail:
2184 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
2185 mutex_exit(&ixgbe->gen_lock);
2186 return (DDI_FAILURE);
2190 * ixgbe_intr_cb_register - Register interrupt callback function.
2192 static int
2193 ixgbe_intr_cb_register(ixgbe_t *ixgbe)
2195 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc,
2196 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) {
2197 return (IXGBE_FAILURE);
2199 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered.");
2200 return (IXGBE_SUCCESS);
2204 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
2206 static int
2207 ixgbe_alloc_rings(ixgbe_t *ixgbe)
2210 * Allocate memory space for rx rings
2212 ixgbe->rx_rings = kmem_zalloc(
2213 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
2214 KM_NOSLEEP);
2216 if (ixgbe->rx_rings == NULL) {
2217 return (IXGBE_FAILURE);
2221 * Allocate memory space for tx rings
2223 ixgbe->tx_rings = kmem_zalloc(
2224 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
2225 KM_NOSLEEP);
2227 if (ixgbe->tx_rings == NULL) {
2228 kmem_free(ixgbe->rx_rings,
2229 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2230 ixgbe->rx_rings = NULL;
2231 return (IXGBE_FAILURE);
2235 * Allocate memory space for rx ring groups
2237 ixgbe->rx_groups = kmem_zalloc(
2238 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
2239 KM_NOSLEEP);
2241 if (ixgbe->rx_groups == NULL) {
2242 kmem_free(ixgbe->rx_rings,
2243 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2244 kmem_free(ixgbe->tx_rings,
2245 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2246 ixgbe->rx_rings = NULL;
2247 ixgbe->tx_rings = NULL;
2248 return (IXGBE_FAILURE);
2251 return (IXGBE_SUCCESS);
2255 * ixgbe_free_rings - Free the memory space of rx/tx rings.
2257 static void
2258 ixgbe_free_rings(ixgbe_t *ixgbe)
2260 if (ixgbe->rx_rings != NULL) {
2261 kmem_free(ixgbe->rx_rings,
2262 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2263 ixgbe->rx_rings = NULL;
2266 if (ixgbe->tx_rings != NULL) {
2267 kmem_free(ixgbe->tx_rings,
2268 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2269 ixgbe->tx_rings = NULL;
2272 if (ixgbe->rx_groups != NULL) {
2273 kmem_free(ixgbe->rx_groups,
2274 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
2275 ixgbe->rx_groups = NULL;
2279 static int
2280 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
2282 ixgbe_rx_ring_t *rx_ring;
2283 int i;
2285 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2286 rx_ring = &ixgbe->rx_rings[i];
2287 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
2288 goto alloc_rx_rings_failure;
2290 return (IXGBE_SUCCESS);
2292 alloc_rx_rings_failure:
2293 ixgbe_free_rx_data(ixgbe);
2294 return (IXGBE_FAILURE);
2297 static void
2298 ixgbe_free_rx_data(ixgbe_t *ixgbe)
2300 ixgbe_rx_ring_t *rx_ring;
2301 ixgbe_rx_data_t *rx_data;
2302 int i;
2304 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2305 rx_ring = &ixgbe->rx_rings[i];
2307 mutex_enter(&ixgbe->rx_pending_lock);
2308 rx_data = rx_ring->rx_data;
2310 if (rx_data != NULL) {
2311 rx_data->flag |= IXGBE_RX_STOPPED;
2313 if (rx_data->rcb_pending == 0) {
2314 ixgbe_free_rx_ring_data(rx_data);
2315 rx_ring->rx_data = NULL;
2319 mutex_exit(&ixgbe->rx_pending_lock);
2324 * ixgbe_setup_rings - Setup rx/tx rings.
2326 static void
2327 ixgbe_setup_rings(ixgbe_t *ixgbe)
2330 * Setup the rx/tx rings, including the following:
2332 * 1. Setup the descriptor ring and the control block buffers;
2333 * 2. Initialize necessary registers for receive/transmit;
2334 * 3. Initialize software pointers/parameters for receive/transmit;
2336 ixgbe_setup_rx(ixgbe);
2338 ixgbe_setup_tx(ixgbe);
2341 static void
2342 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2344 ixgbe_t *ixgbe = rx_ring->ixgbe;
2345 ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2346 struct ixgbe_hw *hw = &ixgbe->hw;
2347 rx_control_block_t *rcb;
2348 union ixgbe_adv_rx_desc *rbd;
2349 uint32_t size;
2350 uint32_t buf_low;
2351 uint32_t buf_high;
2352 uint32_t reg_val;
2353 int i;
2355 ASSERT(mutex_owned(&rx_ring->rx_lock));
2356 ASSERT(mutex_owned(&ixgbe->gen_lock));
2358 for (i = 0; i < ixgbe->rx_ring_size; i++) {
2359 rcb = rx_data->work_list[i];
2360 rbd = &rx_data->rbd_ring[i];
2362 rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2363 rbd->read.hdr_addr = (uintptr_t)NULL;
2367 * Initialize the length register
2369 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
2370 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size);
2373 * Initialize the base address registers
2375 buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2376 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2377 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high);
2378 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low);
2381 * Setup head & tail pointers
2383 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2384 rx_data->ring_size - 1);
2385 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2387 rx_data->rbd_next = 0;
2388 rx_data->lro_first = 0;
2391 * Setup the Receive Descriptor Control Register (RXDCTL)
2392 * PTHRESH=32 descriptors (half the internal cache)
2393 * HTHRESH=0 descriptors (to minimize latency on fetch)
2394 * WTHRESH defaults to 1 (writeback each descriptor)
2396 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2397 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2399 /* Not a valid value for 82599, X540 or X550 */
2400 if (hw->mac.type == ixgbe_mac_82598EB) {
2401 reg_val |= 0x0020; /* pthresh */
2403 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2405 if (hw->mac.type == ixgbe_mac_82599EB ||
2406 hw->mac.type == ixgbe_mac_X540 ||
2407 hw->mac.type == ixgbe_mac_X550 ||
2408 hw->mac.type == ixgbe_mac_X550EM_x) {
2409 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2410 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2411 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2415 * Setup the Split and Replication Receive Control Register.
2416 * Set the rx buffer size and the advanced descriptor type.
2418 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2419 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2420 reg_val |= IXGBE_SRRCTL_DROP_EN;
2421 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2424 static void
2425 ixgbe_setup_rx(ixgbe_t *ixgbe)
2427 ixgbe_rx_ring_t *rx_ring;
2428 struct ixgbe_hw *hw = &ixgbe->hw;
2429 uint32_t reg_val;
2430 uint32_t ring_mapping;
2431 uint32_t i, index;
2432 uint32_t psrtype_rss_bit;
2435 * Ensure that Rx is disabled while setting up
2436 * the Rx unit and Rx descriptor ring(s)
2438 ixgbe_disable_rx(hw);
2440 /* PSRTYPE must be configured for 82599 */
2441 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2442 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2443 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2444 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2445 reg_val |= IXGBE_PSRTYPE_L2HDR;
2446 reg_val |= 0x80000000;
2447 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2448 } else {
2449 if (ixgbe->num_rx_groups > 32) {
2450 psrtype_rss_bit = 0x20000000;
2451 } else {
2452 psrtype_rss_bit = 0x40000000;
2454 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) {
2455 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2456 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2457 reg_val |= IXGBE_PSRTYPE_L2HDR;
2458 reg_val |= psrtype_rss_bit;
2459 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val);
2464 * Set filter control in FCTRL to determine types of packets are passed
2465 * up to the driver.
2466 * - Pass broadcast packets.
2467 * - Do not pass flow control pause frames (82598-specific)
2469 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2470 reg_val |= IXGBE_FCTRL_BAM; /* Broadcast Accept Mode */
2471 if (hw->mac.type == ixgbe_mac_82598EB) {
2472 reg_val |= IXGBE_FCTRL_DPF; /* Discard Pause Frames */
2474 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
2477 * Hardware checksum settings
2479 if (ixgbe->rx_hcksum_enable) {
2480 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2481 reg_val |= IXGBE_RXCSUM_IPPCSE; /* IP checksum */
2482 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
2486 * Setup VMDq and RSS for multiple receive queues
2488 switch (ixgbe->classify_mode) {
2489 case IXGBE_CLASSIFY_RSS:
2491 * One group, only RSS is needed when more than
2492 * one ring enabled.
2494 ixgbe_setup_rss(ixgbe);
2495 break;
2497 case IXGBE_CLASSIFY_VMDQ:
2499 * Multiple groups, each group has one ring,
2500 * only VMDq is needed.
2502 ixgbe_setup_vmdq(ixgbe);
2503 break;
2505 case IXGBE_CLASSIFY_VMDQ_RSS:
2507 * Multiple groups and multiple rings, both
2508 * VMDq and RSS are needed.
2510 ixgbe_setup_vmdq_rss(ixgbe);
2511 break;
2513 default:
2514 break;
2518 * Enable the receive unit. This must be done after filter
2519 * control is set in FCTRL. On 82598, we disable the descriptor monitor.
2520 * 82598 is the only adapter which defines this RXCTRL option.
2522 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2523 if (hw->mac.type == ixgbe_mac_82598EB)
2524 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */
2525 reg_val |= IXGBE_RXCTRL_RXEN;
2526 (void) ixgbe_enable_rx_dma(hw, reg_val);
2529 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2531 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2532 rx_ring = &ixgbe->rx_rings[i];
2533 ixgbe_setup_rx_ring(rx_ring);
2537 * Setup the per-ring statistics mapping.
2539 ring_mapping = 0;
2540 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2541 index = ixgbe->rx_rings[i].hw_index;
2542 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2));
2543 ring_mapping |= (i & 0xF) << (8 * (index & 0x3));
2544 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping);
2548 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2549 * by four bytes if the packet has a VLAN field, so includes MTU,
2550 * ethernet header and frame check sequence.
2551 * Register is MAXFRS in 82599.
2553 reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD);
2554 reg_val &= ~IXGBE_MHADD_MFS_MASK;
2555 reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header)
2556 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2557 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2560 * Setup Jumbo Frame enable bit
2562 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2563 if (ixgbe->default_mtu > ETHERMTU)
2564 reg_val |= IXGBE_HLREG0_JUMBOEN;
2565 else
2566 reg_val &= ~IXGBE_HLREG0_JUMBOEN;
2567 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2570 * Setup RSC for multiple receive queues.
2572 if (ixgbe->lro_enable) {
2573 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2575 * Make sure rx_buf_size * MAXDESC not greater
2576 * than 65535.
2577 * Intel recommends 4 for MAXDESC field value.
2579 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2580 reg_val |= IXGBE_RSCCTL_RSCEN;
2581 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2582 reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2583 else
2584 reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2585 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val);
2588 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2589 reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2590 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2592 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2593 reg_val |= IXGBE_RDRXCTL_RSCACKC;
2594 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2595 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2597 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2601 static void
2602 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2604 ixgbe_t *ixgbe = tx_ring->ixgbe;
2605 struct ixgbe_hw *hw = &ixgbe->hw;
2606 uint32_t size;
2607 uint32_t buf_low;
2608 uint32_t buf_high;
2609 uint32_t reg_val;
2611 ASSERT(mutex_owned(&tx_ring->tx_lock));
2612 ASSERT(mutex_owned(&ixgbe->gen_lock));
2615 * Initialize the length register
2617 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2618 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
2621 * Initialize the base address registers
2623 buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2624 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2625 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
2626 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
2629 * Setup head & tail pointers
2631 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
2632 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
2635 * Setup head write-back
2637 if (ixgbe->tx_head_wb_enable) {
2639 * The memory of the head write-back is allocated using
2640 * the extra tbd beyond the tail of the tbd ring.
2642 tx_ring->tbd_head_wb = (uint32_t *)
2643 ((uintptr_t)tx_ring->tbd_area.address + size);
2644 *tx_ring->tbd_head_wb = 0;
2646 buf_low = (uint32_t)
2647 (tx_ring->tbd_area.dma_address + size);
2648 buf_high = (uint32_t)
2649 ((tx_ring->tbd_area.dma_address + size) >> 32);
2651 /* Set the head write-back enable bit */
2652 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2654 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2655 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2658 * Turn off relaxed ordering for head write back or it will
2659 * cause problems with the tx recycling
2662 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2663 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2664 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2665 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2666 if (hw->mac.type == ixgbe_mac_82598EB) {
2667 IXGBE_WRITE_REG(hw,
2668 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2669 } else {
2670 IXGBE_WRITE_REG(hw,
2671 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2673 } else {
2674 tx_ring->tbd_head_wb = NULL;
2677 tx_ring->tbd_head = 0;
2678 tx_ring->tbd_tail = 0;
2679 tx_ring->tbd_free = tx_ring->ring_size;
2681 if (ixgbe->tx_ring_init == B_TRUE) {
2682 tx_ring->tcb_head = 0;
2683 tx_ring->tcb_tail = 0;
2684 tx_ring->tcb_free = tx_ring->free_list_size;
2688 * Initialize the s/w context structure
2690 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2693 static void
2694 ixgbe_setup_tx(ixgbe_t *ixgbe)
2696 struct ixgbe_hw *hw = &ixgbe->hw;
2697 ixgbe_tx_ring_t *tx_ring;
2698 uint32_t reg_val;
2699 uint32_t ring_mapping;
2700 int i;
2702 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2703 tx_ring = &ixgbe->tx_rings[i];
2704 ixgbe_setup_tx_ring(tx_ring);
2708 * Setup the per-ring statistics mapping.
2710 ring_mapping = 0;
2711 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2712 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2713 if ((i & 0x3) == 0x3) {
2714 switch (hw->mac.type) {
2715 case ixgbe_mac_82598EB:
2716 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2717 ring_mapping);
2718 break;
2720 case ixgbe_mac_82599EB:
2721 case ixgbe_mac_X540:
2722 case ixgbe_mac_X550:
2723 case ixgbe_mac_X550EM_x:
2724 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2725 ring_mapping);
2726 break;
2728 default:
2729 break;
2732 ring_mapping = 0;
2735 if (i & 0x3) {
2736 switch (hw->mac.type) {
2737 case ixgbe_mac_82598EB:
2738 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2739 break;
2741 case ixgbe_mac_82599EB:
2742 case ixgbe_mac_X540:
2743 case ixgbe_mac_X550:
2744 case ixgbe_mac_X550EM_x:
2745 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2746 break;
2748 default:
2749 break;
2754 * Enable CRC appending and TX padding (for short tx frames)
2756 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2757 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2758 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2761 * enable DMA for 82599, X540 and X550 parts
2763 if (hw->mac.type == ixgbe_mac_82599EB ||
2764 hw->mac.type == ixgbe_mac_X540 ||
2765 hw->mac.type == ixgbe_mac_X550 ||
2766 hw->mac.type == ixgbe_mac_X550EM_x) {
2767 /* DMATXCTL.TE must be set after all Tx config is complete */
2768 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2769 reg_val |= IXGBE_DMATXCTL_TE;
2770 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2772 /* Disable arbiter to set MTQC */
2773 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2774 reg_val |= IXGBE_RTTDCS_ARBDIS;
2775 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2776 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2777 reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2778 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2782 * Enabling tx queues ..
2783 * For 82599 must be done after DMATXCTL.TE is set
2785 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2786 tx_ring = &ixgbe->tx_rings[i];
2787 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2788 reg_val |= IXGBE_TXDCTL_ENABLE;
2789 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2794 * ixgbe_setup_rss - Setup receive-side scaling feature.
2796 static void
2797 ixgbe_setup_rss(ixgbe_t *ixgbe)
2799 struct ixgbe_hw *hw = &ixgbe->hw;
2800 uint32_t mrqc;
2803 * Initialize RETA/ERETA table
2805 ixgbe_setup_rss_table(ixgbe);
2808 * Enable RSS & perform hash on these packet types
2810 mrqc = IXGBE_MRQC_RSSEN |
2811 IXGBE_MRQC_RSS_FIELD_IPV4 |
2812 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2813 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2814 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2815 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2816 IXGBE_MRQC_RSS_FIELD_IPV6 |
2817 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2818 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2819 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2820 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2824 * ixgbe_setup_vmdq - Setup MAC classification feature
2826 static void
2827 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2829 struct ixgbe_hw *hw = &ixgbe->hw;
2830 uint32_t vmdctl, i, vtctl;
2833 * Setup the VMDq Control register, enable VMDq based on
2834 * packet destination MAC address:
2836 switch (hw->mac.type) {
2837 case ixgbe_mac_82598EB:
2839 * VMDq Enable = 1;
2840 * VMDq Filter = 0; MAC filtering
2841 * Default VMDq output index = 0;
2843 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2844 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2845 break;
2847 case ixgbe_mac_82599EB:
2848 case ixgbe_mac_X540:
2849 case ixgbe_mac_X550:
2850 case ixgbe_mac_X550EM_x:
2852 * Enable VMDq-only.
2854 vmdctl = IXGBE_MRQC_VMDQEN;
2855 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2857 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2858 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2859 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2863 * Enable Virtualization and Replication.
2865 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2866 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2869 * Enable receiving packets to all VFs
2871 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2872 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2873 break;
2875 default:
2876 break;
2881 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2883 static void
2884 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2886 struct ixgbe_hw *hw = &ixgbe->hw;
2887 uint32_t i, mrqc;
2888 uint32_t vtctl, vmdctl;
2891 * Initialize RETA/ERETA table
2893 ixgbe_setup_rss_table(ixgbe);
2896 * Enable and setup RSS and VMDq
2898 switch (hw->mac.type) {
2899 case ixgbe_mac_82598EB:
2901 * Enable RSS & Setup RSS Hash functions
2903 mrqc = IXGBE_MRQC_RSSEN |
2904 IXGBE_MRQC_RSS_FIELD_IPV4 |
2905 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2906 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2907 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2908 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2909 IXGBE_MRQC_RSS_FIELD_IPV6 |
2910 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2911 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2912 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2913 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2916 * Enable and Setup VMDq
2917 * VMDq Filter = 0; MAC filtering
2918 * Default VMDq output index = 0;
2920 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2921 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2922 break;
2924 case ixgbe_mac_82599EB:
2925 case ixgbe_mac_X540:
2926 case ixgbe_mac_X550:
2927 case ixgbe_mac_X550EM_x:
2929 * Enable RSS & Setup RSS Hash functions
2931 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2932 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2933 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2934 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2935 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2936 IXGBE_MRQC_RSS_FIELD_IPV6 |
2937 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2938 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2939 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2942 * Enable VMDq+RSS.
2944 if (ixgbe->num_rx_groups > 32) {
2945 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2946 } else {
2947 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2950 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2952 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2953 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2954 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2956 break;
2958 default:
2959 break;
2963 if (hw->mac.type == ixgbe_mac_82599EB ||
2964 hw->mac.type == ixgbe_mac_X540 ||
2965 hw->mac.type == ixgbe_mac_X550 ||
2966 hw->mac.type == ixgbe_mac_X550EM_x) {
2968 * Enable Virtualization and Replication.
2970 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2971 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2974 * Enable receiving packets to all VFs
2976 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2977 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2982 * ixgbe_setup_rss_table - Setup RSS table
2984 static void
2985 ixgbe_setup_rss_table(ixgbe_t *ixgbe)
2987 struct ixgbe_hw *hw = &ixgbe->hw;
2988 uint32_t i, j;
2989 uint32_t random;
2990 uint32_t reta;
2991 uint32_t ring_per_group;
2992 uint32_t ring;
2993 uint32_t table_size;
2994 uint32_t index_mult;
2995 uint32_t rxcsum;
2998 * Set multiplier for RETA setup and table size based on MAC type.
2999 * RETA table sizes vary by model:
3001 * 82598, 82599, X540: 128 table entries.
3002 * X550: 512 table entries.
3004 index_mult = 0x1;
3005 table_size = 128;
3006 switch (ixgbe->hw.mac.type) {
3007 case ixgbe_mac_82598EB:
3008 index_mult = 0x11;
3009 break;
3010 case ixgbe_mac_X550:
3011 case ixgbe_mac_X550EM_x:
3012 table_size = 512;
3013 break;
3014 default:
3015 break;
3019 * Fill out RSS redirection table. The configuation of the indices is
3020 * hardware-dependent.
3022 * 82598: 8 bits wide containing two 4 bit RSS indices
3023 * 82599, X540: 8 bits wide containing one 4 bit RSS index
3024 * X550: 8 bits wide containing one 6 bit RSS index
3026 reta = 0;
3027 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3029 for (i = 0, j = 0; i < table_size; i++, j++) {
3030 if (j == ring_per_group) j = 0;
3033 * The low 8 bits are for hash value (n+0);
3034 * The next 8 bits are for hash value (n+1), etc.
3036 ring = (j * index_mult);
3037 reta = reta >> 8;
3038 reta = reta | (((uint32_t)ring) << 24);
3040 if ((i & 3) == 3) {
3042 * The first 128 table entries are programmed into the
3043 * RETA register, with any beyond that (eg; on X550)
3044 * into ERETA.
3046 if (i < 128)
3047 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3048 else
3049 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3050 reta);
3051 reta = 0;
3056 * Fill out hash function seeds with a random constant
3058 for (i = 0; i < 10; i++) {
3059 (void) random_get_pseudo_bytes((uint8_t *)&random,
3060 sizeof (uint32_t));
3061 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
3065 * Disable Packet Checksum to enable RSS for multiple receive queues.
3066 * It is an adapter hardware limitation that Packet Checksum is
3067 * mutually exclusive with RSS.
3069 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3070 rxcsum |= IXGBE_RXCSUM_PCSD;
3071 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
3072 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3076 * ixgbe_init_unicst - Initialize the unicast addresses.
3078 static void
3079 ixgbe_init_unicst(ixgbe_t *ixgbe)
3081 struct ixgbe_hw *hw = &ixgbe->hw;
3082 uint8_t *mac_addr;
3083 int slot;
3085 * Here we should consider two situations:
3087 * 1. Chipset is initialized at the first time,
3088 * Clear all the multiple unicast addresses.
3090 * 2. Chipset is reset
3091 * Recover the multiple unicast addresses from the
3092 * software data structure to the RAR registers.
3094 if (!ixgbe->unicst_init) {
3096 * Initialize the multiple unicast addresses
3098 ixgbe->unicst_total = hw->mac.num_rar_entries;
3099 ixgbe->unicst_avail = ixgbe->unicst_total;
3100 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3101 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
3102 bzero(mac_addr, ETHERADDRL);
3103 (void) ixgbe_set_rar(hw, slot, mac_addr, 0, 0);
3104 ixgbe->unicst_addr[slot].mac.set = 0;
3106 ixgbe->unicst_init = B_TRUE;
3107 } else {
3108 /* Re-configure the RAR registers */
3109 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3110 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
3111 if (ixgbe->unicst_addr[slot].mac.set == 1) {
3112 (void) ixgbe_set_rar(hw, slot, mac_addr,
3113 ixgbe->unicst_addr[slot].mac.group_index,
3114 IXGBE_RAH_AV);
3115 } else {
3116 bzero(mac_addr, ETHERADDRL);
3117 (void) ixgbe_set_rar(hw, slot, mac_addr,
3118 0, 0);
3125 * ixgbe_unicst_find - Find the slot for the specified unicast address
3128 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
3130 int slot;
3132 ASSERT(mutex_owned(&ixgbe->gen_lock));
3134 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3135 if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
3136 mac_addr, ETHERADDRL) == 0)
3137 return (slot);
3140 return (-1);
3144 * ixgbe_multicst_add - Add a multicst address.
3147 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
3149 ASSERT(mutex_owned(&ixgbe->gen_lock));
3151 if ((multiaddr[0] & 01) == 0) {
3152 return (EINVAL);
3155 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
3156 return (ENOENT);
3159 bcopy(multiaddr,
3160 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
3161 ixgbe->mcast_count++;
3164 * Update the multicast table in the hardware
3166 ixgbe_setup_multicst(ixgbe);
3168 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3169 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3170 return (EIO);
3173 return (0);
3177 * ixgbe_multicst_remove - Remove a multicst address.
3180 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
3182 int i;
3184 ASSERT(mutex_owned(&ixgbe->gen_lock));
3186 for (i = 0; i < ixgbe->mcast_count; i++) {
3187 if (bcmp(multiaddr, &ixgbe->mcast_table[i],
3188 ETHERADDRL) == 0) {
3189 for (i++; i < ixgbe->mcast_count; i++) {
3190 ixgbe->mcast_table[i - 1] =
3191 ixgbe->mcast_table[i];
3193 ixgbe->mcast_count--;
3194 break;
3199 * Update the multicast table in the hardware
3201 ixgbe_setup_multicst(ixgbe);
3203 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3204 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3205 return (EIO);
3208 return (0);
3212 * ixgbe_setup_multicast - Setup multicast data structures.
3214 * This routine initializes all of the multicast related structures
3215 * and save them in the hardware registers.
3217 static void
3218 ixgbe_setup_multicst(ixgbe_t *ixgbe)
3220 uint8_t *mc_addr_list;
3221 uint32_t mc_addr_count;
3222 struct ixgbe_hw *hw = &ixgbe->hw;
3224 ASSERT(mutex_owned(&ixgbe->gen_lock));
3226 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
3228 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
3229 mc_addr_count = ixgbe->mcast_count;
3232 * Update the multicast addresses to the MTA registers
3234 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
3235 ixgbe_mc_table_itr, TRUE);
3239 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
3241 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
3242 * Different chipsets may have different allowed configuration of vmdq and rss.
3244 static void
3245 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
3247 struct ixgbe_hw *hw = &ixgbe->hw;
3248 uint32_t ring_per_group;
3250 switch (hw->mac.type) {
3251 case ixgbe_mac_82598EB:
3253 * 82598 supports the following combination:
3254 * vmdq no. x rss no.
3255 * [5..16] x 1
3256 * [1..4] x [1..16]
3257 * However 8 rss queue per pool (vmdq) is sufficient for
3258 * most cases.
3260 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3261 if (ixgbe->num_rx_groups > 4) {
3262 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
3263 } else {
3264 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3265 min(8, ring_per_group);
3268 break;
3270 case ixgbe_mac_82599EB:
3271 case ixgbe_mac_X540:
3272 case ixgbe_mac_X550:
3273 case ixgbe_mac_X550EM_x:
3275 * 82599 supports the following combination:
3276 * vmdq no. x rss no.
3277 * [33..64] x [1..2]
3278 * [2..32] x [1..4]
3279 * 1 x [1..16]
3280 * However 8 rss queue per pool (vmdq) is sufficient for
3281 * most cases.
3283 * For now, treat X540 and X550 like the 82599.
3285 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3286 if (ixgbe->num_rx_groups == 1) {
3287 ixgbe->num_rx_rings = min(8, ring_per_group);
3288 } else if (ixgbe->num_rx_groups <= 32) {
3289 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3290 min(4, ring_per_group);
3291 } else if (ixgbe->num_rx_groups <= 64) {
3292 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3293 min(2, ring_per_group);
3295 break;
3297 default:
3298 break;
3301 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3303 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
3304 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3305 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) {
3306 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ;
3307 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) {
3308 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS;
3309 } else {
3310 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS;
3313 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d",
3314 ixgbe->num_rx_groups, ixgbe->num_rx_rings);
3318 * ixgbe_get_conf - Get driver configurations set in driver.conf.
3320 * This routine gets user-configured values out of the configuration
3321 * file ixgbe.conf.
3323 * For each configurable value, there is a minimum, a maximum, and a
3324 * default.
3325 * If user does not configure a value, use the default.
3326 * If user configures below the minimum, use the minumum.
3327 * If user configures above the maximum, use the maxumum.
3329 static void
3330 ixgbe_get_conf(ixgbe_t *ixgbe)
3332 struct ixgbe_hw *hw = &ixgbe->hw;
3333 uint32_t flow_control;
3336 * ixgbe driver supports the following user configurations:
3338 * Jumbo frame configuration:
3339 * default_mtu
3341 * Ethernet flow control configuration:
3342 * flow_control
3344 * Multiple rings configurations:
3345 * tx_queue_number
3346 * tx_ring_size
3347 * rx_queue_number
3348 * rx_ring_size
3350 * Call ixgbe_get_prop() to get the value for a specific
3351 * configuration parameter.
3355 * Jumbo frame configuration - max_frame_size controls host buffer
3356 * allocation, so includes MTU, ethernet header, vlan tag and
3357 * frame check sequence.
3359 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
3360 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
3362 ixgbe->max_frame_size = ixgbe->default_mtu +
3363 sizeof (struct ether_vlan_header) + ETHERFCSL;
3366 * Ethernet flow control configuration
3368 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
3369 ixgbe_fc_none, 3, ixgbe_fc_none);
3370 if (flow_control == 3)
3371 flow_control = ixgbe_fc_default;
3374 * fc.requested mode is what the user requests. After autoneg,
3375 * fc.current_mode will be the flow_control mode that was negotiated.
3377 hw->fc.requested_mode = flow_control;
3380 * Multiple rings configurations
3382 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
3383 ixgbe->capab->min_tx_que_num,
3384 ixgbe->capab->max_tx_que_num,
3385 ixgbe->capab->def_tx_que_num);
3386 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
3387 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
3389 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
3390 ixgbe->capab->min_rx_que_num,
3391 ixgbe->capab->max_rx_que_num,
3392 ixgbe->capab->def_rx_que_num);
3393 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
3394 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
3397 * Multiple groups configuration
3399 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
3400 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num,
3401 ixgbe->capab->def_rx_grp_num);
3403 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
3404 0, 1, DEFAULT_MR_ENABLE);
3406 if (ixgbe->mr_enable == B_FALSE) {
3407 ixgbe->num_tx_rings = 1;
3408 ixgbe->num_rx_rings = 1;
3409 ixgbe->num_rx_groups = 1;
3410 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3411 } else {
3412 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3413 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1);
3415 * The combination of num_rx_rings and num_rx_groups
3416 * may be not supported by h/w. We need to adjust
3417 * them to appropriate values.
3419 ixgbe_setup_vmdq_rss_conf(ixgbe);
3423 * Tunable used to force an interrupt type. The only use is
3424 * for testing of the lesser interrupt types.
3425 * 0 = don't force interrupt type
3426 * 1 = force interrupt type MSI-X
3427 * 2 = force interrupt type MSI
3428 * 3 = force interrupt type Legacy
3430 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3431 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3433 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3434 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3435 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3436 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3437 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3438 0, 1, DEFAULT_LSO_ENABLE);
3439 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3440 0, 1, DEFAULT_LRO_ENABLE);
3441 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3442 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3443 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3444 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3446 /* Head Write Back not recommended for 82599, X540 and X550 */
3447 if (hw->mac.type == ixgbe_mac_82599EB ||
3448 hw->mac.type == ixgbe_mac_X540 ||
3449 hw->mac.type == ixgbe_mac_X550 ||
3450 hw->mac.type == ixgbe_mac_X550EM_x) {
3451 ixgbe->tx_head_wb_enable = B_FALSE;
3455 * ixgbe LSO needs the tx h/w checksum support.
3456 * LSO will be disabled if tx h/w checksum is not
3457 * enabled.
3459 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3460 ixgbe->lso_enable = B_FALSE;
3464 * ixgbe LRO needs the rx h/w checksum support.
3465 * LRO will be disabled if rx h/w checksum is not
3466 * enabled.
3468 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3469 ixgbe->lro_enable = B_FALSE;
3473 * ixgbe LRO only supported by 82599, X540 and X550
3475 if (hw->mac.type == ixgbe_mac_82598EB) {
3476 ixgbe->lro_enable = B_FALSE;
3478 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3479 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3480 DEFAULT_TX_COPY_THRESHOLD);
3481 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3482 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3483 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3484 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3485 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3486 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3487 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3488 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3489 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3491 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3492 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3493 DEFAULT_RX_COPY_THRESHOLD);
3494 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3495 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3496 DEFAULT_RX_LIMIT_PER_INTR);
3498 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3499 ixgbe->capab->min_intr_throttle,
3500 ixgbe->capab->max_intr_throttle,
3501 ixgbe->capab->def_intr_throttle);
3503 * 82599, X540 and X550 require the interrupt throttling rate is
3504 * a multiple of 8. This is enforced by the register definiton.
3506 if (hw->mac.type == ixgbe_mac_82599EB ||
3507 hw->mac.type == ixgbe_mac_X540 ||
3508 hw->mac.type == ixgbe_mac_X550 ||
3509 hw->mac.type == ixgbe_mac_X550EM_x)
3510 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3512 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe,
3513 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP);
3516 static void
3517 ixgbe_init_params(ixgbe_t *ixgbe)
3519 struct ixgbe_hw *hw = &ixgbe->hw;
3520 ixgbe_link_speed speeds_supported = 0;
3521 bool negotiate;
3524 * Get a list of speeds the adapter supports. If the hw struct hasn't
3525 * been populated with this information yet, retrieve it from the
3526 * adapter and save it to our own variable.
3528 * On certain adapters, such as ones which use SFPs, the contents of
3529 * hw->phy.speeds_supported (and hw->phy.autoneg_advertised) are not
3530 * updated, so we must rely on calling ixgbe_get_link_capabilities()
3531 * in order to ascertain the speeds which we are capable of supporting,
3532 * and in the case of SFP-equipped adapters, which speed we are
3533 * advertising. If ixgbe_get_link_capabilities() fails for some reason,
3534 * we'll go with a default list of speeds as a last resort.
3536 speeds_supported = hw->phy.speeds_supported;
3538 if (speeds_supported == 0) {
3539 if (ixgbe_get_link_capabilities(hw, &speeds_supported,
3540 &negotiate) != IXGBE_SUCCESS) {
3541 if (hw->mac.type == ixgbe_mac_82598EB) {
3542 speeds_supported =
3543 IXGBE_LINK_SPEED_82598_AUTONEG;
3544 } else {
3545 speeds_supported =
3546 IXGBE_LINK_SPEED_82599_AUTONEG;
3550 ixgbe->speeds_supported = speeds_supported;
3553 * By default, all supported speeds are enabled and advertised.
3555 if (speeds_supported & IXGBE_LINK_SPEED_10GB_FULL) {
3556 ixgbe->param_en_10000fdx_cap = 1;
3557 ixgbe->param_adv_10000fdx_cap = 1;
3558 } else {
3559 ixgbe->param_en_10000fdx_cap = 0;
3560 ixgbe->param_adv_10000fdx_cap = 0;
3563 if (speeds_supported & IXGBE_LINK_SPEED_5GB_FULL) {
3564 ixgbe->param_en_5000fdx_cap = 1;
3565 ixgbe->param_adv_5000fdx_cap = 1;
3566 } else {
3567 ixgbe->param_en_5000fdx_cap = 0;
3568 ixgbe->param_adv_5000fdx_cap = 0;
3571 if (speeds_supported & IXGBE_LINK_SPEED_2_5GB_FULL) {
3572 ixgbe->param_en_2500fdx_cap = 1;
3573 ixgbe->param_adv_2500fdx_cap = 1;
3574 } else {
3575 ixgbe->param_en_2500fdx_cap = 0;
3576 ixgbe->param_adv_2500fdx_cap = 0;
3579 if (speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) {
3580 ixgbe->param_en_1000fdx_cap = 1;
3581 ixgbe->param_adv_1000fdx_cap = 1;
3582 } else {
3583 ixgbe->param_en_1000fdx_cap = 0;
3584 ixgbe->param_adv_1000fdx_cap = 0;
3587 if (speeds_supported & IXGBE_LINK_SPEED_100_FULL) {
3588 ixgbe->param_en_100fdx_cap = 1;
3589 ixgbe->param_adv_100fdx_cap = 1;
3590 } else {
3591 ixgbe->param_en_100fdx_cap = 0;
3592 ixgbe->param_adv_100fdx_cap = 0;
3595 ixgbe->param_pause_cap = 1;
3596 ixgbe->param_asym_pause_cap = 1;
3597 ixgbe->param_rem_fault = 0;
3599 ixgbe->param_adv_autoneg_cap = 1;
3600 ixgbe->param_adv_pause_cap = 1;
3601 ixgbe->param_adv_asym_pause_cap = 1;
3602 ixgbe->param_adv_rem_fault = 0;
3604 ixgbe->param_lp_10000fdx_cap = 0;
3605 ixgbe->param_lp_5000fdx_cap = 0;
3606 ixgbe->param_lp_2500fdx_cap = 0;
3607 ixgbe->param_lp_1000fdx_cap = 0;
3608 ixgbe->param_lp_100fdx_cap = 0;
3609 ixgbe->param_lp_autoneg_cap = 0;
3610 ixgbe->param_lp_pause_cap = 0;
3611 ixgbe->param_lp_asym_pause_cap = 0;
3612 ixgbe->param_lp_rem_fault = 0;
3616 * ixgbe_get_prop - Get a property value out of the configuration file
3617 * ixgbe.conf.
3619 * Caller provides the name of the property, a default value, a minimum
3620 * value, and a maximum value.
3622 * Return configured value of the property, with default, minimum and
3623 * maximum properly applied.
3625 static int
3626 ixgbe_get_prop(ixgbe_t *ixgbe,
3627 char *propname, /* name of the property */
3628 int minval, /* minimum acceptable value */
3629 int maxval, /* maximim acceptable value */
3630 int defval) /* default value */
3632 int value;
3635 * Call ddi_prop_get_int() to read the conf settings
3637 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
3638 DDI_PROP_DONTPASS, propname, defval);
3639 if (value > maxval)
3640 value = maxval;
3642 if (value < minval)
3643 value = minval;
3645 return (value);
3649 * ixgbe_driver_setup_link - Using the link properties to setup the link.
3652 ixgbe_driver_setup_link(ixgbe_t *ixgbe, bool setup_hw)
3654 struct ixgbe_hw *hw = &ixgbe->hw;
3655 ixgbe_link_speed advertised = 0;
3658 * Assemble a list of enabled speeds to auto-negotiate with.
3660 if (ixgbe->param_en_10000fdx_cap == 1)
3661 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3663 if (ixgbe->param_en_5000fdx_cap == 1)
3664 advertised |= IXGBE_LINK_SPEED_5GB_FULL;
3666 if (ixgbe->param_en_2500fdx_cap == 1)
3667 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
3669 if (ixgbe->param_en_1000fdx_cap == 1)
3670 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3672 if (ixgbe->param_en_100fdx_cap == 1)
3673 advertised |= IXGBE_LINK_SPEED_100_FULL;
3676 * As a last resort, autoneg with a default list of speeds.
3678 if (ixgbe->param_adv_autoneg_cap == 1 && advertised == 0) {
3679 ixgbe_notice(ixgbe, "Invalid link settings. Setting link "
3680 "to autonegotiate with full capabilities.");
3682 if (hw->mac.type == ixgbe_mac_82598EB)
3683 advertised = IXGBE_LINK_SPEED_82598_AUTONEG;
3684 else
3685 advertised = IXGBE_LINK_SPEED_82599_AUTONEG;
3688 if (setup_hw) {
3689 if (ixgbe_setup_link(&ixgbe->hw, advertised,
3690 ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) {
3691 ixgbe_notice(ixgbe, "Setup link failed on this "
3692 "device.");
3693 return (IXGBE_FAILURE);
3697 return (IXGBE_SUCCESS);
3701 * ixgbe_driver_link_check - Link status processing.
3703 * This function can be called in both kernel context and interrupt context
3705 static void
3706 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3708 struct ixgbe_hw *hw = &ixgbe->hw;
3709 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3710 bool link_up = B_FALSE;
3711 bool link_changed = B_FALSE;
3713 ASSERT(mutex_owned(&ixgbe->gen_lock));
3715 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
3716 if (link_up) {
3717 ixgbe->link_check_complete = B_TRUE;
3719 /* Link is up, enable flow control settings */
3720 (void) ixgbe_fc_enable(hw);
3723 * The Link is up, check whether it was marked as down earlier
3725 if (ixgbe->link_state != LINK_STATE_UP) {
3726 switch (speed) {
3727 case IXGBE_LINK_SPEED_10GB_FULL:
3728 ixgbe->link_speed = SPEED_10GB;
3729 break;
3730 case IXGBE_LINK_SPEED_5GB_FULL:
3731 ixgbe->link_speed = SPEED_5GB;
3732 break;
3733 case IXGBE_LINK_SPEED_2_5GB_FULL:
3734 ixgbe->link_speed = SPEED_2_5GB;
3735 break;
3736 case IXGBE_LINK_SPEED_1GB_FULL:
3737 ixgbe->link_speed = SPEED_1GB;
3738 break;
3739 case IXGBE_LINK_SPEED_100_FULL:
3740 ixgbe->link_speed = SPEED_100;
3742 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3743 ixgbe->link_state = LINK_STATE_UP;
3744 link_changed = B_TRUE;
3746 } else {
3747 if (ixgbe->link_check_complete == B_TRUE ||
3748 (ixgbe->link_check_complete == B_FALSE &&
3749 gethrtime() >= ixgbe->link_check_hrtime)) {
3751 * The link is really down
3753 ixgbe->link_check_complete = B_TRUE;
3755 if (ixgbe->link_state != LINK_STATE_DOWN) {
3756 ixgbe->link_speed = 0;
3757 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
3758 ixgbe->link_state = LINK_STATE_DOWN;
3759 link_changed = B_TRUE;
3765 * If we are in an interrupt context, need to re-enable the
3766 * interrupt, which was automasked
3768 if (servicing_interrupt() != 0) {
3769 ixgbe->eims |= IXGBE_EICR_LSC;
3770 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3773 if (link_changed) {
3774 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3779 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3781 static void
3782 ixgbe_sfp_check(void *arg)
3784 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3785 uint32_t eicr = ixgbe->eicr;
3786 struct ixgbe_hw *hw = &ixgbe->hw;
3788 mutex_enter(&ixgbe->gen_lock);
3789 (void) hw->phy.ops.identify_sfp(hw);
3790 if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
3791 /* clear the interrupt */
3792 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3794 /* if link up, do multispeed fiber setup */
3795 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3796 B_TRUE);
3797 ixgbe_driver_link_check(ixgbe);
3798 ixgbe_get_hw_state(ixgbe);
3799 } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) {
3800 /* clear the interrupt */
3801 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw));
3803 /* if link up, do sfp module setup */
3804 (void) hw->mac.ops.setup_sfp(hw);
3806 /* do multispeed fiber setup */
3807 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3808 B_TRUE);
3809 ixgbe_driver_link_check(ixgbe);
3810 ixgbe_get_hw_state(ixgbe);
3812 mutex_exit(&ixgbe->gen_lock);
3815 * We need to fully re-check the link later.
3817 ixgbe->link_check_complete = B_FALSE;
3818 ixgbe->link_check_hrtime = gethrtime() +
3819 (IXGBE_LINK_UP_TIME * 100000000ULL);
3823 * ixgbe_overtemp_check - overtemp module processing done in taskq
3825 * This routine will only be called on adapters with temperature sensor.
3826 * The indication of over-temperature can be either SDP0 interrupt or the link
3827 * status change interrupt.
3829 static void
3830 ixgbe_overtemp_check(void *arg)
3832 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3833 struct ixgbe_hw *hw = &ixgbe->hw;
3834 uint32_t eicr = ixgbe->eicr;
3835 ixgbe_link_speed speed;
3836 bool link_up;
3838 mutex_enter(&ixgbe->gen_lock);
3840 /* make sure we know current state of link */
3841 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
3843 /* check over-temp condition */
3844 if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) ||
3845 (eicr & IXGBE_EICR_LSC)) {
3846 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3847 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3850 * Disable the adapter interrupts
3852 ixgbe_disable_adapter_interrupts(ixgbe);
3855 * Disable Rx/Tx units
3857 (void) ixgbe_stop_adapter(hw);
3859 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3860 ixgbe_error(ixgbe,
3861 "Problem: Network adapter has been stopped "
3862 "because it has overheated");
3863 ixgbe_error(ixgbe,
3864 "Action: Restart the computer. "
3865 "If the problem persists, power off the system "
3866 "and replace the adapter");
3870 /* write to clear the interrupt */
3871 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3873 mutex_exit(&ixgbe->gen_lock);
3877 * ixgbe_phy_check - taskq to process interrupts from an external PHY
3879 * This routine will only be called on adapters with external PHYs
3880 * (such as X550) that may be trying to raise our attention to some event.
3881 * Currently, this is limited to claiming PHY overtemperature and link status
3882 * change (LSC) events, however this may expand to include other things in
3883 * future adapters.
3885 static void
3886 ixgbe_phy_check(void *arg)
3888 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3889 struct ixgbe_hw *hw = &ixgbe->hw;
3890 int rv;
3892 mutex_enter(&ixgbe->gen_lock);
3895 * X550 baseT PHY overtemp and LSC events are handled here.
3897 * If an overtemp event occurs, it will be reflected in the
3898 * return value of phy.ops.handle_lasi() and the common code will
3899 * automatically power off the baseT PHY. This is our cue to trigger
3900 * an FMA event.
3902 * If a link status change event occurs, phy.ops.handle_lasi() will
3903 * automatically initiate a link setup between the integrated KR PHY
3904 * and the external X557 PHY to ensure that the link speed between
3905 * them matches the link speed of the baseT link.
3907 rv = ixgbe_handle_lasi(hw);
3909 if (rv == IXGBE_ERR_OVERTEMP) {
3910 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3913 * Disable the adapter interrupts
3915 ixgbe_disable_adapter_interrupts(ixgbe);
3918 * Disable Rx/Tx units
3920 (void) ixgbe_stop_adapter(hw);
3922 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3923 ixgbe_error(ixgbe,
3924 "Problem: Network adapter has been stopped due to a "
3925 "overtemperature event being detected.");
3926 ixgbe_error(ixgbe,
3927 "Action: Shut down or restart the computer. If the issue "
3928 "persists, please take action in accordance with the "
3929 "recommendations from your system vendor.");
3932 mutex_exit(&ixgbe->gen_lock);
3936 * ixgbe_link_timer - timer for link status detection
3938 static void
3939 ixgbe_link_timer(void *arg)
3941 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3943 mutex_enter(&ixgbe->gen_lock);
3944 ixgbe_driver_link_check(ixgbe);
3945 mutex_exit(&ixgbe->gen_lock);
3949 * ixgbe_local_timer - Driver watchdog function.
3951 * This function will handle the transmit stall check and other routines.
3953 static void
3954 ixgbe_local_timer(void *arg)
3956 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3958 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP)
3959 goto out;
3961 if (ixgbe->ixgbe_state & IXGBE_ERROR) {
3962 ixgbe->reset_count++;
3963 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3964 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3965 goto out;
3968 if (ixgbe_stall_check(ixgbe)) {
3969 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
3970 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3972 ixgbe->reset_count++;
3973 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3974 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3977 out:
3978 ixgbe_restart_watchdog_timer(ixgbe);
3982 * ixgbe_stall_check - Check for transmit stall.
3984 * This function checks if the adapter is stalled (in transmit).
3986 * It is called each time the watchdog timeout is invoked.
3987 * If the transmit descriptor reclaim continuously fails,
3988 * the watchdog value will increment by 1. If the watchdog
3989 * value exceeds the threshold, the ixgbe is assumed to
3990 * have stalled and need to be reset.
3992 static bool
3993 ixgbe_stall_check(ixgbe_t *ixgbe)
3995 ixgbe_tx_ring_t *tx_ring;
3996 bool result;
3997 int i;
3999 if (ixgbe->link_state != LINK_STATE_UP)
4000 return (B_FALSE);
4003 * If any tx ring is stalled, we'll reset the chipset
4005 result = B_FALSE;
4006 for (i = 0; i < ixgbe->num_tx_rings; i++) {
4007 tx_ring = &ixgbe->tx_rings[i];
4008 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
4009 tx_ring->tx_recycle(tx_ring);
4012 if (tx_ring->recycle_fail > 0)
4013 tx_ring->stall_watchdog++;
4014 else
4015 tx_ring->stall_watchdog = 0;
4017 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
4018 result = B_TRUE;
4019 break;
4023 if (result) {
4024 tx_ring->stall_watchdog = 0;
4025 tx_ring->recycle_fail = 0;
4028 return (result);
4033 * is_valid_mac_addr - Check if the mac address is valid.
4035 static bool
4036 is_valid_mac_addr(uint8_t *mac_addr)
4038 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
4039 const uint8_t addr_test2[6] =
4040 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4042 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
4043 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
4044 return (B_FALSE);
4046 return (B_TRUE);
4049 static bool
4050 ixgbe_find_mac_address(ixgbe_t *ixgbe)
4052 _NOTE(ARGUNUSED(ixgbe));
4054 return (B_TRUE);
4057 #pragma inline(ixgbe_arm_watchdog_timer)
4058 static void
4059 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
4062 * Fire a watchdog timer
4064 ixgbe->watchdog_tid =
4065 timeout(ixgbe_local_timer,
4066 (void *)ixgbe, 1 * drv_usectohz(1000000));
4071 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
4073 void
4074 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
4076 mutex_enter(&ixgbe->watchdog_lock);
4078 if (!ixgbe->watchdog_enable) {
4079 ixgbe->watchdog_enable = B_TRUE;
4080 ixgbe->watchdog_start = B_TRUE;
4081 ixgbe_arm_watchdog_timer(ixgbe);
4084 mutex_exit(&ixgbe->watchdog_lock);
4088 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
4090 void
4091 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
4093 timeout_id_t tid;
4095 mutex_enter(&ixgbe->watchdog_lock);
4097 ixgbe->watchdog_enable = B_FALSE;
4098 ixgbe->watchdog_start = B_FALSE;
4099 tid = ixgbe->watchdog_tid;
4100 ixgbe->watchdog_tid = 0;
4102 mutex_exit(&ixgbe->watchdog_lock);
4104 if (tid != 0)
4105 (void) untimeout(tid);
4109 * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
4111 void
4112 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
4114 mutex_enter(&ixgbe->watchdog_lock);
4116 if (ixgbe->watchdog_enable) {
4117 if (!ixgbe->watchdog_start) {
4118 ixgbe->watchdog_start = B_TRUE;
4119 ixgbe_arm_watchdog_timer(ixgbe);
4123 mutex_exit(&ixgbe->watchdog_lock);
4127 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
4129 static void
4130 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
4132 mutex_enter(&ixgbe->watchdog_lock);
4134 if (ixgbe->watchdog_start)
4135 ixgbe_arm_watchdog_timer(ixgbe);
4137 mutex_exit(&ixgbe->watchdog_lock);
4141 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
4143 void
4144 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
4146 timeout_id_t tid;
4148 mutex_enter(&ixgbe->watchdog_lock);
4150 ixgbe->watchdog_start = B_FALSE;
4151 tid = ixgbe->watchdog_tid;
4152 ixgbe->watchdog_tid = 0;
4154 mutex_exit(&ixgbe->watchdog_lock);
4156 if (tid != 0)
4157 (void) untimeout(tid);
4161 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
4163 static void
4164 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
4166 struct ixgbe_hw *hw = &ixgbe->hw;
4169 * mask all interrupts off
4171 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
4174 * for MSI-X, also disable autoclear
4176 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
4177 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
4180 IXGBE_WRITE_FLUSH(hw);
4184 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
4186 static void
4187 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
4189 struct ixgbe_hw *hw = &ixgbe->hw;
4190 uint32_t eiac, eiam;
4191 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4193 /* interrupt types to enable */
4194 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */
4195 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */
4196 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
4198 /* enable automask on "other" causes that this adapter can generate */
4199 eiam = ixgbe->capab->other_intr;
4202 * msi-x mode
4204 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
4205 /* enable autoclear but not on bits 29:20 */
4206 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
4208 /* general purpose interrupt enable */
4209 gpie |= (IXGBE_GPIE_MSIX_MODE
4210 | IXGBE_GPIE_PBA_SUPPORT
4211 | IXGBE_GPIE_OCD
4212 | IXGBE_GPIE_EIAME);
4214 * non-msi-x mode
4216 } else {
4218 /* disable autoclear, leave gpie at default */
4219 eiac = 0;
4222 * General purpose interrupt enable.
4223 * For 82599, X540 and X550, extended interrupt
4224 * automask enable only in MSI or MSI-X mode
4226 if ((hw->mac.type == ixgbe_mac_82598EB) ||
4227 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
4228 gpie |= IXGBE_GPIE_EIAME;
4232 /* Enable specific "other" interrupt types */
4233 switch (hw->mac.type) {
4234 case ixgbe_mac_82598EB:
4235 gpie |= ixgbe->capab->other_gpie;
4236 break;
4238 case ixgbe_mac_82599EB:
4239 case ixgbe_mac_X540:
4240 case ixgbe_mac_X550:
4241 case ixgbe_mac_X550EM_x:
4242 gpie |= ixgbe->capab->other_gpie;
4244 /* Enable RSC Delay 8us when LRO enabled */
4245 if (ixgbe->lro_enable) {
4246 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
4248 break;
4250 default:
4251 break;
4254 /* write to interrupt control registers */
4255 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4256 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
4257 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
4258 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4259 IXGBE_WRITE_FLUSH(hw);
4263 * ixgbe_loopback_ioctl - Loopback support.
4265 enum ioc_reply
4266 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
4268 lb_info_sz_t *lbsp;
4269 lb_property_t *lbpp;
4270 uint32_t *lbmp;
4271 uint32_t size;
4272 uint32_t value;
4274 if (mp->b_cont == NULL)
4275 return (IOC_INVAL);
4277 switch (iocp->ioc_cmd) {
4278 default:
4279 return (IOC_INVAL);
4281 case LB_GET_INFO_SIZE:
4282 size = sizeof (lb_info_sz_t);
4283 if (iocp->ioc_count != size)
4284 return (IOC_INVAL);
4286 value = sizeof (lb_normal);
4287 value += sizeof (lb_mac);
4288 value += sizeof (lb_external);
4290 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
4291 *lbsp = value;
4292 break;
4294 case LB_GET_INFO:
4295 value = sizeof (lb_normal);
4296 value += sizeof (lb_mac);
4297 value += sizeof (lb_external);
4299 size = value;
4300 if (iocp->ioc_count != size)
4301 return (IOC_INVAL);
4303 value = 0;
4304 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
4306 lbpp[value++] = lb_normal;
4307 lbpp[value++] = lb_mac;
4308 lbpp[value++] = lb_external;
4309 break;
4311 case LB_GET_MODE:
4312 size = sizeof (uint32_t);
4313 if (iocp->ioc_count != size)
4314 return (IOC_INVAL);
4316 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4317 *lbmp = ixgbe->loopback_mode;
4318 break;
4320 case LB_SET_MODE:
4321 size = 0;
4322 if (iocp->ioc_count != sizeof (uint32_t))
4323 return (IOC_INVAL);
4325 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4326 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
4327 return (IOC_INVAL);
4328 break;
4331 iocp->ioc_count = size;
4332 iocp->ioc_error = 0;
4334 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4335 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4336 return (IOC_INVAL);
4339 return (IOC_REPLY);
4343 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
4345 static bool
4346 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
4348 if (mode == ixgbe->loopback_mode)
4349 return (B_TRUE);
4351 ixgbe->loopback_mode = mode;
4353 if (mode == IXGBE_LB_NONE) {
4355 * Reset the chip
4357 (void) ixgbe_reset(ixgbe);
4358 return (B_TRUE);
4361 mutex_enter(&ixgbe->gen_lock);
4363 switch (mode) {
4364 default:
4365 mutex_exit(&ixgbe->gen_lock);
4366 return (B_FALSE);
4368 case IXGBE_LB_EXTERNAL:
4369 break;
4371 case IXGBE_LB_INTERNAL_MAC:
4372 ixgbe_set_internal_mac_loopback(ixgbe);
4373 break;
4376 mutex_exit(&ixgbe->gen_lock);
4378 return (B_TRUE);
4382 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
4384 static void
4385 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
4387 struct ixgbe_hw *hw;
4388 uint32_t reg;
4389 uint8_t atlas;
4391 hw = &ixgbe->hw;
4394 * Setup MAC loopback
4396 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
4397 reg |= IXGBE_HLREG0_LPBK;
4398 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
4400 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4401 reg &= ~IXGBE_AUTOC_LMS_MASK;
4402 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4405 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
4407 switch (hw->mac.type) {
4408 case ixgbe_mac_82598EB:
4409 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4410 &atlas);
4411 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
4412 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4413 atlas);
4415 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4416 &atlas);
4417 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
4418 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4419 atlas);
4421 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4422 &atlas);
4423 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
4424 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4425 atlas);
4427 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4428 &atlas);
4429 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4430 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4431 atlas);
4432 break;
4434 case ixgbe_mac_82599EB:
4435 case ixgbe_mac_X540:
4436 case ixgbe_mac_X550:
4437 case ixgbe_mac_X550EM_x:
4438 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4439 reg |= (IXGBE_AUTOC_FLU |
4440 IXGBE_AUTOC_10G_KX4);
4441 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4443 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4444 B_FALSE);
4445 break;
4447 default:
4448 break;
4452 #pragma inline(ixgbe_intr_rx_work)
4454 * ixgbe_intr_rx_work - RX processing of ISR.
4456 static void
4457 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4459 mblk_t *mp;
4461 mutex_enter(&rx_ring->rx_lock);
4463 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4464 mutex_exit(&rx_ring->rx_lock);
4466 if (mp != NULL)
4467 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4468 rx_ring->ring_gen_num);
4471 #pragma inline(ixgbe_intr_tx_work)
4473 * ixgbe_intr_tx_work - TX processing of ISR.
4475 static void
4476 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
4478 ixgbe_t *ixgbe = tx_ring->ixgbe;
4481 * Recycle the tx descriptors
4483 tx_ring->tx_recycle(tx_ring);
4486 * Schedule the re-transmit
4488 if (tx_ring->reschedule &&
4489 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
4490 tx_ring->reschedule = B_FALSE;
4491 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
4492 tx_ring->ring_handle);
4493 tx_ring->stat_reschedule++;
4497 #pragma inline(ixgbe_intr_other_work)
4499 * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4501 static void
4502 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4504 struct ixgbe_hw *hw = &ixgbe->hw;
4506 ASSERT(mutex_owned(&ixgbe->gen_lock));
4509 * handle link status change
4511 if (eicr & IXGBE_EICR_LSC) {
4512 ixgbe_driver_link_check(ixgbe);
4513 ixgbe_get_hw_state(ixgbe);
4517 * check for fan failure on adapters with fans
4519 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4520 (eicr & IXGBE_EICR_GPI_SDP1)) {
4521 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4524 * Disable the adapter interrupts
4526 ixgbe_disable_adapter_interrupts(ixgbe);
4529 * Disable Rx/Tx units
4531 (void) ixgbe_stop_adapter(&ixgbe->hw);
4533 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4534 ixgbe_error(ixgbe,
4535 "Problem: Network adapter has been stopped "
4536 "because the fan has stopped.\n");
4537 ixgbe_error(ixgbe,
4538 "Action: Replace the adapter.\n");
4540 /* re-enable the interrupt, which was automasked */
4541 ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
4545 * Do SFP check for adapters with hot-plug capability
4547 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4548 ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) ||
4549 (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) {
4550 ixgbe->eicr = eicr;
4551 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4552 ixgbe_sfp_check, (void *)ixgbe,
4553 DDI_NOSLEEP)) != DDI_SUCCESS) {
4554 ixgbe_log(ixgbe, "No memory available to dispatch "
4555 "taskq for SFP check");
4560 * Do over-temperature check for adapters with temp sensor
4562 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4563 ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) ||
4564 (eicr & IXGBE_EICR_LSC))) {
4565 ixgbe->eicr = eicr;
4566 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4567 ixgbe_overtemp_check, (void *)ixgbe,
4568 DDI_NOSLEEP)) != DDI_SUCCESS) {
4569 ixgbe_log(ixgbe, "No memory available to dispatch "
4570 "taskq for overtemp check");
4575 * Process an external PHY interrupt
4577 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
4578 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
4579 ixgbe->eicr = eicr;
4580 if ((ddi_taskq_dispatch(ixgbe->phy_taskq,
4581 ixgbe_phy_check, (void *)ixgbe,
4582 DDI_NOSLEEP)) != DDI_SUCCESS) {
4583 ixgbe_log(ixgbe, "No memory available to dispatch "
4584 "taskq for PHY check");
4590 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4592 static uint_t
4593 ixgbe_intr_legacy(void *arg1, void *arg2)
4595 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4596 struct ixgbe_hw *hw = &ixgbe->hw;
4597 ixgbe_tx_ring_t *tx_ring;
4598 ixgbe_rx_ring_t *rx_ring;
4599 uint32_t eicr;
4600 mblk_t *mp;
4601 bool tx_reschedule;
4602 uint_t result;
4604 _NOTE(ARGUNUSED(arg2));
4606 mutex_enter(&ixgbe->gen_lock);
4607 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4608 mutex_exit(&ixgbe->gen_lock);
4609 return (DDI_INTR_UNCLAIMED);
4612 mp = NULL;
4613 tx_reschedule = B_FALSE;
4616 * Any bit set in eicr: claim this interrupt
4618 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4620 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4621 mutex_exit(&ixgbe->gen_lock);
4622 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4623 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4624 return (DDI_INTR_CLAIMED);
4627 if (eicr) {
4629 * For legacy interrupt, we have only one interrupt,
4630 * so we have only one rx ring and one tx ring enabled.
4632 ASSERT(ixgbe->num_rx_rings == 1);
4633 ASSERT(ixgbe->num_tx_rings == 1);
4636 * For legacy interrupt, rx rings[0] will use RTxQ[0].
4638 if (eicr & 0x1) {
4639 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
4640 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4641 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4643 * Clean the rx descriptors
4645 rx_ring = &ixgbe->rx_rings[0];
4646 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4650 * For legacy interrupt, tx rings[0] will use RTxQ[1].
4652 if (eicr & 0x2) {
4654 * Recycle the tx descriptors
4656 tx_ring = &ixgbe->tx_rings[0];
4657 tx_ring->tx_recycle(tx_ring);
4660 * Schedule the re-transmit
4662 tx_reschedule = (tx_ring->reschedule &&
4663 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4666 /* any interrupt type other than tx/rx */
4667 if (eicr & ixgbe->capab->other_intr) {
4668 switch (hw->mac.type) {
4669 case ixgbe_mac_82598EB:
4670 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4671 break;
4673 case ixgbe_mac_82599EB:
4674 case ixgbe_mac_X540:
4675 case ixgbe_mac_X550:
4676 case ixgbe_mac_X550EM_x:
4677 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4678 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4679 break;
4681 default:
4682 break;
4684 ixgbe_intr_other_work(ixgbe, eicr);
4685 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4688 mutex_exit(&ixgbe->gen_lock);
4690 result = DDI_INTR_CLAIMED;
4691 } else {
4692 mutex_exit(&ixgbe->gen_lock);
4695 * No interrupt cause bits set: don't claim this interrupt.
4697 result = DDI_INTR_UNCLAIMED;
4700 /* re-enable the interrupts which were automasked */
4701 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4704 * Do the following work outside of the gen_lock
4706 if (mp != NULL) {
4707 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4708 rx_ring->ring_gen_num);
4711 if (tx_reschedule) {
4712 tx_ring->reschedule = B_FALSE;
4713 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
4714 tx_ring->stat_reschedule++;
4717 return (result);
4721 * ixgbe_intr_msi - Interrupt handler for MSI.
4723 static uint_t
4724 ixgbe_intr_msi(void *arg1, void *arg2)
4726 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4727 struct ixgbe_hw *hw = &ixgbe->hw;
4728 uint32_t eicr;
4730 _NOTE(ARGUNUSED(arg2));
4732 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4734 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4735 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4736 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4737 return (DDI_INTR_CLAIMED);
4741 * For MSI interrupt, we have only one vector,
4742 * so we have only one rx ring and one tx ring enabled.
4744 ASSERT(ixgbe->num_rx_rings == 1);
4745 ASSERT(ixgbe->num_tx_rings == 1);
4748 * For MSI interrupt, rx rings[0] will use RTxQ[0].
4750 if (eicr & 0x1) {
4751 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4755 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4757 if (eicr & 0x2) {
4758 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4761 /* any interrupt type other than tx/rx */
4762 if (eicr & ixgbe->capab->other_intr) {
4763 mutex_enter(&ixgbe->gen_lock);
4764 switch (hw->mac.type) {
4765 case ixgbe_mac_82598EB:
4766 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4767 break;
4769 case ixgbe_mac_82599EB:
4770 case ixgbe_mac_X540:
4771 case ixgbe_mac_X550:
4772 case ixgbe_mac_X550EM_x:
4773 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4774 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4775 break;
4777 default:
4778 break;
4780 ixgbe_intr_other_work(ixgbe, eicr);
4781 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4782 mutex_exit(&ixgbe->gen_lock);
4785 /* re-enable the interrupts which were automasked */
4786 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4788 return (DDI_INTR_CLAIMED);
4792 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4794 static uint_t
4795 ixgbe_intr_msix(void *arg1, void *arg2)
4797 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
4798 ixgbe_t *ixgbe = vect->ixgbe;
4799 struct ixgbe_hw *hw = &ixgbe->hw;
4800 uint32_t eicr;
4801 int r_idx = 0;
4803 _NOTE(ARGUNUSED(arg2));
4806 * Clean each rx ring that has its bit set in the map
4808 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
4809 while (r_idx >= 0) {
4810 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
4811 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4812 (ixgbe->num_rx_rings - 1));
4816 * Clean each tx ring that has its bit set in the map
4818 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
4819 while (r_idx >= 0) {
4820 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
4821 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4822 (ixgbe->num_tx_rings - 1));
4827 * Clean other interrupt (link change) that has its bit set in the map
4829 if (BT_TEST(vect->other_map, 0) == 1) {
4830 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4832 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4833 DDI_FM_OK) {
4834 ddi_fm_service_impact(ixgbe->dip,
4835 DDI_SERVICE_DEGRADED);
4836 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4837 return (DDI_INTR_CLAIMED);
4841 * Check "other" cause bits: any interrupt type other than tx/rx
4843 if (eicr & ixgbe->capab->other_intr) {
4844 mutex_enter(&ixgbe->gen_lock);
4845 switch (hw->mac.type) {
4846 case ixgbe_mac_82598EB:
4847 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4848 ixgbe_intr_other_work(ixgbe, eicr);
4849 break;
4851 case ixgbe_mac_82599EB:
4852 case ixgbe_mac_X540:
4853 case ixgbe_mac_X550:
4854 case ixgbe_mac_X550EM_x:
4855 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4856 ixgbe_intr_other_work(ixgbe, eicr);
4857 break;
4859 default:
4860 break;
4862 mutex_exit(&ixgbe->gen_lock);
4865 /* re-enable the interrupts which were automasked */
4866 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4869 return (DDI_INTR_CLAIMED);
4873 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4875 * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4876 * if not successful, try Legacy.
4877 * ixgbe->intr_force can be used to force sequence to start with
4878 * any of the 3 types.
4879 * If MSI-X is not used, number of tx/rx rings is forced to 1.
4881 static int
4882 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
4884 dev_info_t *devinfo;
4885 int intr_types;
4886 int rc;
4888 devinfo = ixgbe->dip;
4891 * Get supported interrupt types
4893 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4895 if (rc != DDI_SUCCESS) {
4896 ixgbe_log(ixgbe,
4897 "Get supported interrupt types failed: %d", rc);
4898 return (IXGBE_FAILURE);
4900 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
4902 ixgbe->intr_type = 0;
4905 * Install MSI-X interrupts
4907 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
4908 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
4909 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
4910 if (rc == IXGBE_SUCCESS)
4911 return (IXGBE_SUCCESS);
4913 ixgbe_log(ixgbe,
4914 "Allocate MSI-X failed, trying MSI interrupts...");
4918 * MSI-X not used, force rings and groups to 1
4920 ixgbe->num_rx_rings = 1;
4921 ixgbe->num_rx_groups = 1;
4922 ixgbe->num_tx_rings = 1;
4923 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
4924 ixgbe_log(ixgbe,
4925 "MSI-X not used, force rings and groups number to 1");
4928 * Install MSI interrupts
4930 if ((intr_types & DDI_INTR_TYPE_MSI) &&
4931 (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
4932 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
4933 if (rc == IXGBE_SUCCESS)
4934 return (IXGBE_SUCCESS);
4936 ixgbe_log(ixgbe,
4937 "Allocate MSI failed, trying Legacy interrupts...");
4941 * Install legacy interrupts
4943 if (intr_types & DDI_INTR_TYPE_FIXED) {
4945 * Disallow legacy interrupts for X550. X550 has a silicon
4946 * bug which prevents Shared Legacy interrupts from working.
4947 * For details, please reference:
4949 * Intel Ethernet Controller X550 Specification Update rev. 2.1
4950 * May 2016, erratum 22: PCIe Interrupt Status Bit
4952 if (ixgbe->hw.mac.type == ixgbe_mac_X550 ||
4953 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x ||
4954 ixgbe->hw.mac.type == ixgbe_mac_X550_vf ||
4955 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x_vf) {
4956 ixgbe_log(ixgbe,
4957 "Legacy interrupts are not supported on this "
4958 "adapter. Please use MSI or MSI-X instead.");
4959 return (IXGBE_FAILURE);
4961 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
4962 if (rc == IXGBE_SUCCESS)
4963 return (IXGBE_SUCCESS);
4965 ixgbe_log(ixgbe,
4966 "Allocate Legacy interrupts failed");
4970 * If none of the 3 types succeeded, return failure
4972 return (IXGBE_FAILURE);
4976 * ixgbe_alloc_intr_handles - Allocate interrupt handles.
4978 * For legacy and MSI, only 1 handle is needed. For MSI-X,
4979 * if fewer than 2 handles are available, return failure.
4980 * Upon success, this maps the vectors to rx and tx rings for
4981 * interrupts.
4983 static int
4984 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
4986 dev_info_t *devinfo;
4987 int request, count, actual;
4988 int minimum;
4989 int rc;
4990 uint32_t ring_per_group;
4992 devinfo = ixgbe->dip;
4994 switch (intr_type) {
4995 case DDI_INTR_TYPE_FIXED:
4996 request = 1; /* Request 1 legacy interrupt handle */
4997 minimum = 1;
4998 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
4999 break;
5001 case DDI_INTR_TYPE_MSI:
5002 request = 1; /* Request 1 MSI interrupt handle */
5003 minimum = 1;
5004 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
5005 break;
5007 case DDI_INTR_TYPE_MSIX:
5009 * Best number of vectors for the adapter is
5010 * (# rx rings + # tx rings), however we will
5011 * limit the request number.
5013 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings);
5014 if (request > ixgbe->capab->max_ring_vect)
5015 request = ixgbe->capab->max_ring_vect;
5016 minimum = 1;
5017 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
5018 break;
5020 default:
5021 ixgbe_log(ixgbe,
5022 "invalid call to ixgbe_alloc_intr_handles(): %d\n",
5023 intr_type);
5024 return (IXGBE_FAILURE);
5026 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d",
5027 request, minimum);
5030 * Get number of supported interrupts
5032 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
5033 if ((rc != DDI_SUCCESS) || (count < minimum)) {
5034 ixgbe_log(ixgbe,
5035 "Get interrupt number failed. Return: %d, count: %d",
5036 rc, count);
5037 return (IXGBE_FAILURE);
5039 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
5041 actual = 0;
5042 ixgbe->intr_cnt = 0;
5043 ixgbe->intr_cnt_max = 0;
5044 ixgbe->intr_cnt_min = 0;
5047 * Allocate an array of interrupt handles
5049 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
5050 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
5052 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
5053 request, &actual, DDI_INTR_ALLOC_NORMAL);
5054 if (rc != DDI_SUCCESS) {
5055 ixgbe_log(ixgbe, "Allocate interrupts failed. "
5056 "return: %d, request: %d, actual: %d",
5057 rc, request, actual);
5058 goto alloc_handle_fail;
5060 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
5063 * upper/lower limit of interrupts
5065 ixgbe->intr_cnt = actual;
5066 ixgbe->intr_cnt_max = request;
5067 ixgbe->intr_cnt_min = minimum;
5070 * rss number per group should not exceed the rx interrupt number,
5071 * else need to adjust rx ring number.
5073 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
5074 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0);
5075 if (actual < ring_per_group) {
5076 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual;
5077 ixgbe_setup_vmdq_rss_conf(ixgbe);
5081 * Now we know the actual number of vectors. Here we map the vector
5082 * to other, rx rings and tx ring.
5084 if (actual < minimum) {
5085 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
5086 actual);
5087 goto alloc_handle_fail;
5091 * Get priority for first vector, assume remaining are all the same
5093 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
5094 if (rc != DDI_SUCCESS) {
5095 ixgbe_log(ixgbe,
5096 "Get interrupt priority failed: %d", rc);
5097 goto alloc_handle_fail;
5100 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
5101 if (rc != DDI_SUCCESS) {
5102 ixgbe_log(ixgbe,
5103 "Get interrupt cap failed: %d", rc);
5104 goto alloc_handle_fail;
5107 ixgbe->intr_type = intr_type;
5109 return (IXGBE_SUCCESS);
5111 alloc_handle_fail:
5112 ixgbe_rem_intrs(ixgbe);
5114 return (IXGBE_FAILURE);
5118 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
5120 * Before adding the interrupt handlers, the interrupt vectors have
5121 * been allocated, and the rx/tx rings have also been allocated.
5123 static int
5124 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
5126 int vector = 0;
5127 int rc;
5129 switch (ixgbe->intr_type) {
5130 case DDI_INTR_TYPE_MSIX:
5132 * Add interrupt handler for all vectors
5134 for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
5136 * install pointer to vect_map[vector]
5138 rc = ddi_intr_add_handler(ixgbe->htable[vector],
5139 (ddi_intr_handler_t *)ixgbe_intr_msix,
5140 (void *)&ixgbe->vect_map[vector], NULL);
5142 if (rc != DDI_SUCCESS) {
5143 ixgbe_log(ixgbe,
5144 "Add interrupt handler failed. "
5145 "return: %d, vector: %d", rc, vector);
5146 for (vector--; vector >= 0; vector--) {
5147 (void) ddi_intr_remove_handler(
5148 ixgbe->htable[vector]);
5150 return (IXGBE_FAILURE);
5154 break;
5156 case DDI_INTR_TYPE_MSI:
5158 * Add interrupt handlers for the only vector
5160 rc = ddi_intr_add_handler(ixgbe->htable[vector],
5161 (ddi_intr_handler_t *)ixgbe_intr_msi,
5162 (void *)ixgbe, NULL);
5164 if (rc != DDI_SUCCESS) {
5165 ixgbe_log(ixgbe,
5166 "Add MSI interrupt handler failed: %d", rc);
5167 return (IXGBE_FAILURE);
5170 break;
5172 case DDI_INTR_TYPE_FIXED:
5174 * Add interrupt handlers for the only vector
5176 rc = ddi_intr_add_handler(ixgbe->htable[vector],
5177 (ddi_intr_handler_t *)ixgbe_intr_legacy,
5178 (void *)ixgbe, NULL);
5180 if (rc != DDI_SUCCESS) {
5181 ixgbe_log(ixgbe,
5182 "Add legacy interrupt handler failed: %d", rc);
5183 return (IXGBE_FAILURE);
5186 break;
5188 default:
5189 return (IXGBE_FAILURE);
5192 return (IXGBE_SUCCESS);
5195 #pragma inline(ixgbe_map_rxring_to_vector)
5197 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
5199 static void
5200 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
5203 * Set bit in map
5205 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
5208 * Count bits set
5210 ixgbe->vect_map[v_idx].rxr_cnt++;
5213 * Remember bit position
5215 ixgbe->rx_rings[r_idx].intr_vector = v_idx;
5216 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
5219 #pragma inline(ixgbe_map_txring_to_vector)
5221 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
5223 static void
5224 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
5227 * Set bit in map
5229 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
5232 * Count bits set
5234 ixgbe->vect_map[v_idx].txr_cnt++;
5237 * Remember bit position
5239 ixgbe->tx_rings[t_idx].intr_vector = v_idx;
5240 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
5244 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
5245 * allocation register (IVAR).
5246 * cause:
5247 * -1 : other cause
5248 * 0 : rx
5249 * 1 : tx
5251 static void
5252 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
5253 int8_t cause)
5255 struct ixgbe_hw *hw = &ixgbe->hw;
5256 u32 ivar, index;
5258 switch (hw->mac.type) {
5259 case ixgbe_mac_82598EB:
5260 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5261 if (cause == -1) {
5262 cause = 0;
5264 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
5265 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
5266 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
5267 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
5268 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
5269 break;
5271 case ixgbe_mac_82599EB:
5272 case ixgbe_mac_X540:
5273 case ixgbe_mac_X550:
5274 case ixgbe_mac_X550EM_x:
5275 if (cause == -1) {
5276 /* other causes */
5277 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5278 index = (intr_alloc_entry & 1) * 8;
5279 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5280 ivar &= ~(0xFF << index);
5281 ivar |= (msix_vector << index);
5282 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5283 } else {
5284 /* tx or rx causes */
5285 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5286 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5287 ivar = IXGBE_READ_REG(hw,
5288 IXGBE_IVAR(intr_alloc_entry >> 1));
5289 ivar &= ~(0xFF << index);
5290 ivar |= (msix_vector << index);
5291 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5292 ivar);
5294 break;
5296 default:
5297 break;
5302 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
5303 * given interrupt vector allocation register (IVAR).
5304 * cause:
5305 * -1 : other cause
5306 * 0 : rx
5307 * 1 : tx
5309 static void
5310 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
5312 struct ixgbe_hw *hw = &ixgbe->hw;
5313 u32 ivar, index;
5315 switch (hw->mac.type) {
5316 case ixgbe_mac_82598EB:
5317 if (cause == -1) {
5318 cause = 0;
5320 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
5321 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
5322 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
5323 (intr_alloc_entry & 0x3)));
5324 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
5325 break;
5327 case ixgbe_mac_82599EB:
5328 case ixgbe_mac_X540:
5329 case ixgbe_mac_X550:
5330 case ixgbe_mac_X550EM_x:
5331 if (cause == -1) {
5332 /* other causes */
5333 index = (intr_alloc_entry & 1) * 8;
5334 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5335 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
5336 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5337 } else {
5338 /* tx or rx causes */
5339 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5340 ivar = IXGBE_READ_REG(hw,
5341 IXGBE_IVAR(intr_alloc_entry >> 1));
5342 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
5343 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5344 ivar);
5346 break;
5348 default:
5349 break;
5354 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
5355 * given interrupt vector allocation register (IVAR).
5356 * cause:
5357 * -1 : other cause
5358 * 0 : rx
5359 * 1 : tx
5361 static void
5362 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
5364 struct ixgbe_hw *hw = &ixgbe->hw;
5365 u32 ivar, index;
5367 switch (hw->mac.type) {
5368 case ixgbe_mac_82598EB:
5369 if (cause == -1) {
5370 cause = 0;
5372 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
5373 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
5374 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
5375 (intr_alloc_entry & 0x3)));
5376 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
5377 break;
5379 case ixgbe_mac_82599EB:
5380 case ixgbe_mac_X540:
5381 case ixgbe_mac_X550:
5382 case ixgbe_mac_X550EM_x:
5383 if (cause == -1) {
5384 /* other causes */
5385 index = (intr_alloc_entry & 1) * 8;
5386 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5387 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
5388 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5389 } else {
5390 /* tx or rx causes */
5391 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5392 ivar = IXGBE_READ_REG(hw,
5393 IXGBE_IVAR(intr_alloc_entry >> 1));
5394 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
5395 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5396 ivar);
5398 break;
5400 default:
5401 break;
5406 * Convert the rx ring index driver maintained to the rx ring index
5407 * in h/w.
5409 static uint32_t
5410 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
5413 struct ixgbe_hw *hw = &ixgbe->hw;
5414 uint32_t rx_ring_per_group, hw_rx_index;
5416 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
5417 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
5418 return (sw_rx_index);
5419 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
5420 switch (hw->mac.type) {
5421 case ixgbe_mac_82598EB:
5422 return (sw_rx_index);
5424 case ixgbe_mac_82599EB:
5425 case ixgbe_mac_X540:
5426 case ixgbe_mac_X550:
5427 case ixgbe_mac_X550EM_x:
5428 return (sw_rx_index * 2);
5430 default:
5431 break;
5433 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
5434 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
5436 switch (hw->mac.type) {
5437 case ixgbe_mac_82598EB:
5438 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
5439 16 + (sw_rx_index % rx_ring_per_group);
5440 return (hw_rx_index);
5442 case ixgbe_mac_82599EB:
5443 case ixgbe_mac_X540:
5444 case ixgbe_mac_X550:
5445 case ixgbe_mac_X550EM_x:
5446 if (ixgbe->num_rx_groups > 32) {
5447 hw_rx_index = (sw_rx_index /
5448 rx_ring_per_group) * 2 +
5449 (sw_rx_index % rx_ring_per_group);
5450 } else {
5451 hw_rx_index = (sw_rx_index /
5452 rx_ring_per_group) * 4 +
5453 (sw_rx_index % rx_ring_per_group);
5455 return (hw_rx_index);
5457 default:
5458 break;
5463 * Should never reach. Just to make compiler happy.
5465 return (sw_rx_index);
5469 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
5471 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
5472 * to vector[0 - (intr_cnt -1)].
5474 static int
5475 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
5477 int i, vector = 0;
5479 /* initialize vector map */
5480 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
5481 for (i = 0; i < ixgbe->intr_cnt; i++) {
5482 ixgbe->vect_map[i].ixgbe = ixgbe;
5486 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
5487 * tx rings[0] on RTxQ[1].
5489 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5490 ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
5491 ixgbe_map_txring_to_vector(ixgbe, 0, 1);
5492 return (IXGBE_SUCCESS);
5496 * Interrupts/vectors mapping for MSI-X
5500 * Map other interrupt to vector 0,
5501 * Set bit in map and count the bits set.
5503 BT_SET(ixgbe->vect_map[vector].other_map, 0);
5504 ixgbe->vect_map[vector].other_cnt++;
5507 * Map rx ring interrupts to vectors
5509 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5510 ixgbe_map_rxring_to_vector(ixgbe, i, vector);
5511 vector = (vector +1) % ixgbe->intr_cnt;
5515 * Map tx ring interrupts to vectors
5517 for (i = 0; i < ixgbe->num_tx_rings; i++) {
5518 ixgbe_map_txring_to_vector(ixgbe, i, vector);
5519 vector = (vector +1) % ixgbe->intr_cnt;
5522 return (IXGBE_SUCCESS);
5526 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
5528 * This relies on ring/vector mapping already set up in the
5529 * vect_map[] structures
5531 static void
5532 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5534 struct ixgbe_hw *hw = &ixgbe->hw;
5535 ixgbe_intr_vector_t *vect; /* vector bitmap */
5536 int r_idx; /* ring index */
5537 int v_idx; /* vector index */
5538 uint32_t hw_index;
5541 * Clear any previous entries
5543 switch (hw->mac.type) {
5544 case ixgbe_mac_82598EB:
5545 for (v_idx = 0; v_idx < 25; v_idx++)
5546 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5547 break;
5549 case ixgbe_mac_82599EB:
5550 case ixgbe_mac_X540:
5551 case ixgbe_mac_X550:
5552 case ixgbe_mac_X550EM_x:
5553 for (v_idx = 0; v_idx < 64; v_idx++)
5554 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5555 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5556 break;
5558 default:
5559 break;
5563 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5564 * tx rings[0] will use RTxQ[1].
5566 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5567 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5568 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5569 return;
5573 * For MSI-X interrupt, "Other" is always on vector[0].
5575 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
5578 * For each interrupt vector, populate the IVAR table
5580 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
5581 vect = &ixgbe->vect_map[v_idx];
5584 * For each rx ring bit set
5586 r_idx = bt_getlowbit(vect->rx_map, 0,
5587 (ixgbe->num_rx_rings - 1));
5589 while (r_idx >= 0) {
5590 hw_index = ixgbe->rx_rings[r_idx].hw_index;
5591 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0);
5592 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
5593 (ixgbe->num_rx_rings - 1));
5597 * For each tx ring bit set
5599 r_idx = bt_getlowbit(vect->tx_map, 0,
5600 (ixgbe->num_tx_rings - 1));
5602 while (r_idx >= 0) {
5603 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
5604 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
5605 (ixgbe->num_tx_rings - 1));
5611 * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
5613 static void
5614 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
5616 int i;
5617 int rc;
5619 for (i = 0; i < ixgbe->intr_cnt; i++) {
5620 rc = ddi_intr_remove_handler(ixgbe->htable[i]);
5621 if (rc != DDI_SUCCESS) {
5622 IXGBE_DEBUGLOG_1(ixgbe,
5623 "Remove intr handler failed: %d", rc);
5629 * ixgbe_rem_intrs - Remove the allocated interrupts.
5631 static void
5632 ixgbe_rem_intrs(ixgbe_t *ixgbe)
5634 int i;
5635 int rc;
5637 for (i = 0; i < ixgbe->intr_cnt; i++) {
5638 rc = ddi_intr_free(ixgbe->htable[i]);
5639 if (rc != DDI_SUCCESS) {
5640 IXGBE_DEBUGLOG_1(ixgbe,
5641 "Free intr failed: %d", rc);
5645 kmem_free(ixgbe->htable, ixgbe->intr_size);
5646 ixgbe->htable = NULL;
5650 * ixgbe_enable_intrs - Enable all the ddi interrupts.
5652 static int
5653 ixgbe_enable_intrs(ixgbe_t *ixgbe)
5655 int i;
5656 int rc;
5659 * Enable interrupts
5661 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5663 * Call ddi_intr_block_enable() for MSI
5665 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
5666 if (rc != DDI_SUCCESS) {
5667 ixgbe_log(ixgbe,
5668 "Enable block intr failed: %d", rc);
5669 return (IXGBE_FAILURE);
5671 } else {
5673 * Call ddi_intr_enable() for Legacy/MSI non block enable
5675 for (i = 0; i < ixgbe->intr_cnt; i++) {
5676 rc = ddi_intr_enable(ixgbe->htable[i]);
5677 if (rc != DDI_SUCCESS) {
5678 ixgbe_log(ixgbe,
5679 "Enable intr failed: %d", rc);
5680 return (IXGBE_FAILURE);
5685 return (IXGBE_SUCCESS);
5689 * ixgbe_disable_intrs - Disable all the interrupts.
5691 static int
5692 ixgbe_disable_intrs(ixgbe_t *ixgbe)
5694 int i;
5695 int rc;
5698 * Disable all interrupts
5700 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5701 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
5702 if (rc != DDI_SUCCESS) {
5703 ixgbe_log(ixgbe,
5704 "Disable block intr failed: %d", rc);
5705 return (IXGBE_FAILURE);
5707 } else {
5708 for (i = 0; i < ixgbe->intr_cnt; i++) {
5709 rc = ddi_intr_disable(ixgbe->htable[i]);
5710 if (rc != DDI_SUCCESS) {
5711 ixgbe_log(ixgbe,
5712 "Disable intr failed: %d", rc);
5713 return (IXGBE_FAILURE);
5718 return (IXGBE_SUCCESS);
5722 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
5724 static void
5725 ixgbe_get_hw_state(ixgbe_t *ixgbe)
5727 struct ixgbe_hw *hw = &ixgbe->hw;
5728 ixgbe_link_speed speed = 0;
5729 bool link_up = B_FALSE;
5730 uint32_t pcs1g_anlp = 0;
5732 ASSERT(mutex_owned(&ixgbe->gen_lock));
5733 ixgbe->param_lp_1000fdx_cap = 0;
5734 ixgbe->param_lp_100fdx_cap = 0;
5736 /* check for link, don't wait */
5737 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
5740 * Update the observed Link Partner's capabilities. Not all adapters
5741 * can provide full information on the LP's capable speeds, so we
5742 * provide what we can.
5744 if (link_up) {
5745 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
5747 ixgbe->param_lp_1000fdx_cap =
5748 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5749 ixgbe->param_lp_100fdx_cap =
5750 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5754 * Update GLD's notion of the adapter's currently advertised speeds.
5755 * Since the common code doesn't always record the current autonegotiate
5756 * settings in the phy struct for all parts (specifically, adapters with
5757 * SFPs) we first test to see if it is 0, and if so, we fall back to
5758 * using the adapter's speed capabilities which we saved during instance
5759 * init in ixgbe_init_params().
5761 * Adapters with SFPs will always be shown as advertising all of their
5762 * supported speeds, and adapters with baseT PHYs (where the phy struct
5763 * is maintained by the common code) will always have a factual view of
5764 * their currently-advertised speeds. In the case of SFPs, this is
5765 * acceptable as we default to advertising all speeds that the adapter
5766 * claims to support, and those properties are immutable; unlike on
5767 * baseT (copper) PHYs, where speeds can be enabled or disabled at will.
5769 speed = hw->phy.autoneg_advertised;
5770 if (speed == 0)
5771 speed = ixgbe->speeds_supported;
5773 ixgbe->param_adv_10000fdx_cap =
5774 (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 1 : 0;
5775 ixgbe->param_adv_5000fdx_cap =
5776 (speed & IXGBE_LINK_SPEED_5GB_FULL) ? 1 : 0;
5777 ixgbe->param_adv_2500fdx_cap =
5778 (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 1 : 0;
5779 ixgbe->param_adv_1000fdx_cap =
5780 (speed & IXGBE_LINK_SPEED_1GB_FULL) ? 1 : 0;
5781 ixgbe->param_adv_100fdx_cap =
5782 (speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0;
5786 * ixgbe_get_driver_control - Notify that driver is in control of device.
5788 static void
5789 ixgbe_get_driver_control(struct ixgbe_hw *hw)
5791 uint32_t ctrl_ext;
5794 * Notify firmware that driver is in control of device
5796 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5797 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
5798 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5802 * ixgbe_release_driver_control - Notify that driver is no longer in control
5803 * of device.
5805 static void
5806 ixgbe_release_driver_control(struct ixgbe_hw *hw)
5808 uint32_t ctrl_ext;
5811 * Notify firmware that driver is no longer in control of device
5813 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5814 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
5815 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5819 * ixgbe_atomic_reserve - Atomic decrease operation.
5822 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
5824 uint32_t oldval;
5825 uint32_t newval;
5828 * ATOMICALLY
5830 do {
5831 oldval = *count_p;
5832 if (oldval < n)
5833 return (-1);
5834 newval = oldval - n;
5835 } while (atomic_cas_32(count_p, oldval, newval) != oldval);
5837 return (newval);
5841 * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
5843 static uint8_t *
5844 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
5846 uint8_t *addr = *upd_ptr;
5847 uint8_t *new_ptr;
5849 _NOTE(ARGUNUSED(hw));
5850 _NOTE(ARGUNUSED(vmdq));
5852 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
5853 *upd_ptr = new_ptr;
5854 return (addr);
5858 * FMA support
5861 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
5863 ddi_fm_error_t de;
5865 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5866 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5867 return (de.fme_status);
5871 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
5873 ddi_fm_error_t de;
5875 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5876 return (de.fme_status);
5880 * ixgbe_fm_error_cb - The IO fault service error handling callback function.
5882 static int
5883 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5885 _NOTE(ARGUNUSED(impl_data));
5887 * as the driver can always deal with an error in any dma or
5888 * access handle, we can just return the fme_status value.
5890 pci_ereport_post(dip, err, NULL);
5891 return (err->fme_status);
5894 static void
5895 ixgbe_fm_init(ixgbe_t *ixgbe)
5897 ddi_iblock_cookie_t iblk;
5898 int fma_dma_flag;
5901 * Only register with IO Fault Services if we have some capability
5903 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5904 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5905 } else {
5906 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5909 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5910 fma_dma_flag = 1;
5911 } else {
5912 fma_dma_flag = 0;
5915 ixgbe_set_fma_flags(fma_dma_flag);
5917 if (ixgbe->fm_capabilities) {
5920 * Register capabilities with IO Fault Services
5922 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
5925 * Initialize pci ereport capabilities if ereport capable
5927 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5928 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5929 pci_ereport_setup(ixgbe->dip);
5932 * Register error callback if error callback capable
5934 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5935 ddi_fm_handler_register(ixgbe->dip,
5936 ixgbe_fm_error_cb, (void*) ixgbe);
5940 static void
5941 ixgbe_fm_fini(ixgbe_t *ixgbe)
5944 * Only unregister FMA capabilities if they are registered
5946 if (ixgbe->fm_capabilities) {
5949 * Release any resources allocated by pci_ereport_setup()
5951 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5952 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5953 pci_ereport_teardown(ixgbe->dip);
5956 * Un-register error callback if error callback capable
5958 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5959 ddi_fm_handler_unregister(ixgbe->dip);
5962 * Unregister from IO Fault Service
5964 ddi_fm_fini(ixgbe->dip);
5968 void
5969 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
5971 uint64_t ena;
5972 char buf[FM_MAX_CLASS];
5974 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5975 ena = fm_ena_generate(0, FM_ENA_FMT1);
5976 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
5977 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
5978 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5982 static int
5983 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
5985 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
5987 mutex_enter(&rx_ring->rx_lock);
5988 rx_ring->ring_gen_num = mr_gen_num;
5989 mutex_exit(&rx_ring->rx_lock);
5990 return (0);
5994 * Get the global ring index by a ring index within a group.
5996 static int
5997 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex)
5999 ixgbe_rx_ring_t *rx_ring;
6000 int i;
6002 for (i = 0; i < ixgbe->num_rx_rings; i++) {
6003 rx_ring = &ixgbe->rx_rings[i];
6004 if (rx_ring->group_index == gindex)
6005 rindex--;
6006 if (rindex < 0)
6007 return (i);
6010 return (-1);
6014 * Callback funtion for MAC layer to register all rings.
6016 /* ARGSUSED */
6017 void
6018 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
6019 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
6021 ixgbe_t *ixgbe = (ixgbe_t *)arg;
6022 mac_intr_t *mintr = &infop->mri_intr;
6024 switch (rtype) {
6025 case MAC_RING_TYPE_RX: {
6027 * 'index' is the ring index within the group.
6028 * Need to get the global ring index by searching in groups.
6030 int global_ring_index = ixgbe_get_rx_ring_index(
6031 ixgbe, group_index, ring_index);
6033 ASSERT(global_ring_index >= 0);
6035 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index];
6036 rx_ring->ring_handle = rh;
6038 infop->mri_driver = (mac_ring_driver_t)rx_ring;
6039 infop->mri_start = ixgbe_ring_start;
6040 infop->mri_stop = NULL;
6041 infop->mri_poll = ixgbe_ring_rx_poll;
6042 infop->mri_stat = ixgbe_rx_ring_stat;
6044 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
6045 mintr->mi_enable = ixgbe_rx_ring_intr_enable;
6046 mintr->mi_disable = ixgbe_rx_ring_intr_disable;
6047 if (ixgbe->intr_type &
6048 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
6049 mintr->mi_ddi_handle =
6050 ixgbe->htable[rx_ring->intr_vector];
6053 break;
6055 case MAC_RING_TYPE_TX: {
6056 ASSERT(group_index == -1);
6057 ASSERT(ring_index < ixgbe->num_tx_rings);
6059 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
6060 tx_ring->ring_handle = rh;
6062 infop->mri_driver = (mac_ring_driver_t)tx_ring;
6063 infop->mri_start = NULL;
6064 infop->mri_stop = NULL;
6065 infop->mri_tx = ixgbe_ring_tx;
6066 infop->mri_stat = ixgbe_tx_ring_stat;
6067 if (ixgbe->intr_type &
6068 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
6069 mintr->mi_ddi_handle =
6070 ixgbe->htable[tx_ring->intr_vector];
6072 break;
6074 default:
6075 break;
6080 * Callback funtion for MAC layer to register all groups.
6082 void
6083 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
6084 mac_group_info_t *infop, mac_group_handle_t gh)
6086 ixgbe_t *ixgbe = (ixgbe_t *)arg;
6088 switch (rtype) {
6089 case MAC_RING_TYPE_RX: {
6090 ixgbe_rx_group_t *rx_group;
6092 rx_group = &ixgbe->rx_groups[index];
6093 rx_group->group_handle = gh;
6095 infop->mgi_driver = (mac_group_driver_t)rx_group;
6096 infop->mgi_start = NULL;
6097 infop->mgi_stop = NULL;
6098 infop->mgi_addmac = ixgbe_addmac;
6099 infop->mgi_remmac = ixgbe_remmac;
6100 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
6102 break;
6104 case MAC_RING_TYPE_TX:
6105 break;
6106 default:
6107 break;
6112 * Enable interrupt on the specificed rx ring.
6115 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
6117 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
6118 ixgbe_t *ixgbe = rx_ring->ixgbe;
6119 int r_idx = rx_ring->index;
6120 int hw_r_idx = rx_ring->hw_index;
6121 int v_idx = rx_ring->intr_vector;
6123 mutex_enter(&ixgbe->gen_lock);
6124 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
6125 mutex_exit(&ixgbe->gen_lock);
6127 * Simply return 0.
6128 * Interrupts are being adjusted. ixgbe_intr_adjust()
6129 * will eventually re-enable the interrupt when it's
6130 * done with the adjustment.
6132 return (0);
6136 * To enable interrupt by setting the VAL bit of given interrupt
6137 * vector allocation register (IVAR).
6139 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0);
6141 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
6144 * Trigger a Rx interrupt on this ring
6146 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
6147 IXGBE_WRITE_FLUSH(&ixgbe->hw);
6149 mutex_exit(&ixgbe->gen_lock);
6151 return (0);
6155 * Disable interrupt on the specificed rx ring.
6158 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
6160 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
6161 ixgbe_t *ixgbe = rx_ring->ixgbe;
6162 int r_idx = rx_ring->index;
6163 int hw_r_idx = rx_ring->hw_index;
6164 int v_idx = rx_ring->intr_vector;
6166 mutex_enter(&ixgbe->gen_lock);
6167 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
6168 mutex_exit(&ixgbe->gen_lock);
6170 * Simply return 0.
6171 * In the rare case where an interrupt is being
6172 * disabled while interrupts are being adjusted,
6173 * we don't fail the operation. No interrupts will
6174 * be generated while they are adjusted, and
6175 * ixgbe_intr_adjust() will cause the interrupts
6176 * to be re-enabled once it completes. Note that
6177 * in this case, packets may be delivered to the
6178 * stack via interrupts before xgbe_rx_ring_intr_enable()
6179 * is called again. This is acceptable since interrupt
6180 * adjustment is infrequent, and the stack will be
6181 * able to handle these packets.
6183 return (0);
6187 * To disable interrupt by clearing the VAL bit of given interrupt
6188 * vector allocation register (IVAR).
6190 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
6192 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
6194 mutex_exit(&ixgbe->gen_lock);
6196 return (0);
6200 * Add a mac address.
6202 static int
6203 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
6205 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
6206 ixgbe_t *ixgbe = rx_group->ixgbe;
6207 struct ixgbe_hw *hw = &ixgbe->hw;
6208 int slot, i;
6210 mutex_enter(&ixgbe->gen_lock);
6212 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6213 mutex_exit(&ixgbe->gen_lock);
6214 return (ECANCELED);
6217 if (ixgbe->unicst_avail == 0) {
6218 /* no slots available */
6219 mutex_exit(&ixgbe->gen_lock);
6220 return (ENOSPC);
6224 * The first ixgbe->num_rx_groups slots are reserved for each respective
6225 * group. The rest slots are shared by all groups. While adding a
6226 * MAC address, reserved slots are firstly checked then the shared
6227 * slots are searched.
6229 slot = -1;
6230 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) {
6231 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) {
6232 if (ixgbe->unicst_addr[i].mac.set == 0) {
6233 slot = i;
6234 break;
6237 } else {
6238 slot = rx_group->index;
6241 if (slot == -1) {
6242 /* no slots available */
6243 mutex_exit(&ixgbe->gen_lock);
6244 return (ENOSPC);
6247 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
6248 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr,
6249 rx_group->index, IXGBE_RAH_AV);
6250 ixgbe->unicst_addr[slot].mac.set = 1;
6251 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index;
6252 ixgbe->unicst_avail--;
6254 mutex_exit(&ixgbe->gen_lock);
6256 return (0);
6260 * Remove a mac address.
6262 static int
6263 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
6265 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
6266 ixgbe_t *ixgbe = rx_group->ixgbe;
6267 struct ixgbe_hw *hw = &ixgbe->hw;
6268 int slot;
6270 mutex_enter(&ixgbe->gen_lock);
6272 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6273 mutex_exit(&ixgbe->gen_lock);
6274 return (ECANCELED);
6277 slot = ixgbe_unicst_find(ixgbe, mac_addr);
6278 if (slot == -1) {
6279 mutex_exit(&ixgbe->gen_lock);
6280 return (EINVAL);
6283 if (ixgbe->unicst_addr[slot].mac.set == 0) {
6284 mutex_exit(&ixgbe->gen_lock);
6285 return (EINVAL);
6288 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
6289 (void) ixgbe_clear_rar(hw, slot);
6290 ixgbe->unicst_addr[slot].mac.set = 0;
6291 ixgbe->unicst_avail++;
6293 mutex_exit(&ixgbe->gen_lock);
6295 return (0);