4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Copyright 2013, Nexenta Systems, Inc. All rights reserved.
29 * Copyright 2014 Pluribus Networks Inc.
35 igb_m_stat(void *arg
, uint_t stat
, uint64_t *val
)
37 igb_t
*igb
= (igb_t
*)arg
;
38 struct e1000_hw
*hw
= &igb
->hw
;
40 uint32_t low_val
, high_val
;
42 igb_ks
= (igb_stat_t
*)igb
->igb_ks
->ks_data
;
44 mutex_enter(&igb
->gen_lock
);
46 if (igb
->igb_state
& IGB_SUSPENDED
) {
47 mutex_exit(&igb
->gen_lock
);
52 case MAC_STAT_IFSPEED
:
53 *val
= igb
->link_speed
* 1000000ull;
56 case MAC_STAT_MULTIRCV
:
57 igb
->stat_mprc
+= E1000_READ_REG(hw
, E1000_MPRC
);
58 *val
= igb
->stat_mprc
;
61 case MAC_STAT_BRDCSTRCV
:
62 igb
->stat_bprc
+= E1000_READ_REG(hw
, E1000_BPRC
);
63 *val
= igb
->stat_bprc
;
66 case MAC_STAT_MULTIXMT
:
67 igb
->stat_mptc
+= E1000_READ_REG(hw
, E1000_MPTC
);
68 *val
= igb
->stat_mptc
;
71 case MAC_STAT_BRDCSTXMT
:
72 igb
->stat_bptc
+= E1000_READ_REG(hw
, E1000_BPTC
);
73 *val
= igb
->stat_bptc
;
76 case MAC_STAT_NORCVBUF
:
77 igb
->stat_rnbc
+= E1000_READ_REG(hw
, E1000_RNBC
);
78 *val
= igb
->stat_rnbc
;
81 case MAC_STAT_IERRORS
:
82 igb
->stat_rxerrc
+= E1000_READ_REG(hw
, E1000_RXERRC
);
83 igb
->stat_algnerrc
+= E1000_READ_REG(hw
, E1000_ALGNERRC
);
84 igb_ks
->rlec
.value
.ui64
+=
85 E1000_READ_REG(hw
, E1000_RLEC
);
86 igb
->stat_crcerrs
+= E1000_READ_REG(hw
, E1000_CRCERRS
);
87 igb
->stat_cexterr
+= E1000_READ_REG(hw
, E1000_CEXTERR
);
88 *val
= igb
->stat_rxerrc
+
90 igb_ks
->rlec
.value
.ui64
+
95 case MAC_STAT_NOXMTBUF
:
99 case MAC_STAT_OERRORS
:
100 igb
->stat_ecol
+= E1000_READ_REG(hw
, E1000_ECOL
);
101 *val
= igb
->stat_ecol
;
104 case MAC_STAT_COLLISIONS
:
105 igb
->stat_colc
+= E1000_READ_REG(hw
, E1000_COLC
);
106 *val
= igb
->stat_colc
;
109 case MAC_STAT_RBYTES
:
111 * The 64-bit register will reset whenever the upper
112 * 32 bits are read. So we need to read the lower
113 * 32 bits first, then read the upper 32 bits.
115 low_val
= E1000_READ_REG(hw
, E1000_TORL
);
116 high_val
= E1000_READ_REG(hw
, E1000_TORH
);
117 igb
->stat_tor
+= (uint64_t)high_val
<< 32 | (uint64_t)low_val
;
118 *val
= igb
->stat_tor
;
121 case MAC_STAT_IPACKETS
:
122 igb
->stat_tpr
+= E1000_READ_REG(hw
, E1000_TPR
);
123 *val
= igb
->stat_tpr
;
126 case MAC_STAT_OBYTES
:
128 * The 64-bit register will reset whenever the upper
129 * 32 bits are read. So we need to read the lower
130 * 32 bits first, then read the upper 32 bits.
132 low_val
= E1000_READ_REG(hw
, E1000_TOTL
);
133 high_val
= E1000_READ_REG(hw
, E1000_TOTH
);
134 igb
->stat_tot
+= (uint64_t)high_val
<< 32 | (uint64_t)low_val
;
135 *val
= igb
->stat_tot
;
138 case MAC_STAT_OPACKETS
:
139 igb
->stat_tpt
+= E1000_READ_REG(hw
, E1000_TPT
);
140 *val
= igb
->stat_tpt
;
144 case ETHER_STAT_ALIGN_ERRORS
:
145 igb
->stat_algnerrc
+= E1000_READ_REG(hw
, E1000_ALGNERRC
);
146 *val
= igb
->stat_algnerrc
;
149 case ETHER_STAT_FCS_ERRORS
:
150 igb
->stat_crcerrs
+= E1000_READ_REG(hw
, E1000_CRCERRS
);
151 *val
= igb
->stat_crcerrs
;
154 case ETHER_STAT_FIRST_COLLISIONS
:
155 igb
->stat_scc
+= E1000_READ_REG(hw
, E1000_SCC
);
156 *val
= igb
->stat_scc
;
159 case ETHER_STAT_MULTI_COLLISIONS
:
160 igb
->stat_mcc
+= E1000_READ_REG(hw
, E1000_MCC
);
161 *val
= igb
->stat_mcc
;
164 case ETHER_STAT_SQE_ERRORS
:
165 igb
->stat_sec
+= E1000_READ_REG(hw
, E1000_SEC
);
166 *val
= igb
->stat_sec
;
169 case ETHER_STAT_DEFER_XMTS
:
170 igb
->stat_dc
+= E1000_READ_REG(hw
, E1000_DC
);
174 case ETHER_STAT_TX_LATE_COLLISIONS
:
175 igb
->stat_latecol
+= E1000_READ_REG(hw
, E1000_LATECOL
);
176 *val
= igb
->stat_latecol
;
179 case ETHER_STAT_EX_COLLISIONS
:
180 igb
->stat_ecol
+= E1000_READ_REG(hw
, E1000_ECOL
);
181 *val
= igb
->stat_ecol
;
184 case ETHER_STAT_MACXMT_ERRORS
:
185 igb
->stat_ecol
+= E1000_READ_REG(hw
, E1000_ECOL
);
186 *val
= igb
->stat_ecol
;
189 case ETHER_STAT_CARRIER_ERRORS
:
190 igb
->stat_cexterr
+= E1000_READ_REG(hw
, E1000_CEXTERR
);
191 *val
= igb
->stat_cexterr
;
194 case ETHER_STAT_TOOLONG_ERRORS
:
195 igb
->stat_roc
+= E1000_READ_REG(hw
, E1000_ROC
);
196 *val
= igb
->stat_roc
;
199 case ETHER_STAT_MACRCV_ERRORS
:
200 igb
->stat_rxerrc
+= E1000_READ_REG(hw
, E1000_RXERRC
);
201 *val
= igb
->stat_rxerrc
;
205 case ETHER_STAT_XCVR_ADDR
:
206 /* The Internal PHY's MDI address for each MAC is 1 */
210 case ETHER_STAT_XCVR_ID
:
211 *val
= hw
->phy
.id
| hw
->phy
.revision
;
214 case ETHER_STAT_XCVR_INUSE
:
215 switch (igb
->link_speed
) {
218 (hw
->phy
.media_type
== e1000_media_type_copper
) ?
219 XCVR_1000T
: XCVR_1000X
;
223 (hw
->phy
.media_type
== e1000_media_type_copper
) ?
224 (igb
->param_100t4_cap
== 1) ?
225 XCVR_100T4
: XCVR_100T2
: XCVR_100X
;
236 case ETHER_STAT_CAP_1000FDX
:
237 *val
= igb
->param_1000fdx_cap
;
240 case ETHER_STAT_CAP_1000HDX
:
241 *val
= igb
->param_1000hdx_cap
;
244 case ETHER_STAT_CAP_100FDX
:
245 *val
= igb
->param_100fdx_cap
;
248 case ETHER_STAT_CAP_100HDX
:
249 *val
= igb
->param_100hdx_cap
;
252 case ETHER_STAT_CAP_10FDX
:
253 *val
= igb
->param_10fdx_cap
;
256 case ETHER_STAT_CAP_10HDX
:
257 *val
= igb
->param_10hdx_cap
;
260 case ETHER_STAT_CAP_ASMPAUSE
:
261 *val
= igb
->param_asym_pause_cap
;
264 case ETHER_STAT_CAP_PAUSE
:
265 *val
= igb
->param_pause_cap
;
268 case ETHER_STAT_CAP_AUTONEG
:
269 *val
= igb
->param_autoneg_cap
;
272 case ETHER_STAT_ADV_CAP_1000FDX
:
273 *val
= igb
->param_adv_1000fdx_cap
;
276 case ETHER_STAT_ADV_CAP_1000HDX
:
277 *val
= igb
->param_adv_1000hdx_cap
;
280 case ETHER_STAT_ADV_CAP_100FDX
:
281 *val
= igb
->param_adv_100fdx_cap
;
284 case ETHER_STAT_ADV_CAP_100HDX
:
285 *val
= igb
->param_adv_100hdx_cap
;
288 case ETHER_STAT_ADV_CAP_10FDX
:
289 *val
= igb
->param_adv_10fdx_cap
;
292 case ETHER_STAT_ADV_CAP_10HDX
:
293 *val
= igb
->param_adv_10hdx_cap
;
296 case ETHER_STAT_ADV_CAP_ASMPAUSE
:
297 *val
= igb
->param_adv_asym_pause_cap
;
300 case ETHER_STAT_ADV_CAP_PAUSE
:
301 *val
= igb
->param_adv_pause_cap
;
304 case ETHER_STAT_ADV_CAP_AUTONEG
:
305 *val
= hw
->mac
.autoneg
;
308 case ETHER_STAT_LP_CAP_1000FDX
:
309 *val
= igb
->param_lp_1000fdx_cap
;
312 case ETHER_STAT_LP_CAP_1000HDX
:
313 *val
= igb
->param_lp_1000hdx_cap
;
316 case ETHER_STAT_LP_CAP_100FDX
:
317 *val
= igb
->param_lp_100fdx_cap
;
320 case ETHER_STAT_LP_CAP_100HDX
:
321 *val
= igb
->param_lp_100hdx_cap
;
324 case ETHER_STAT_LP_CAP_10FDX
:
325 *val
= igb
->param_lp_10fdx_cap
;
328 case ETHER_STAT_LP_CAP_10HDX
:
329 *val
= igb
->param_lp_10hdx_cap
;
332 case ETHER_STAT_LP_CAP_ASMPAUSE
:
333 *val
= igb
->param_lp_asym_pause_cap
;
336 case ETHER_STAT_LP_CAP_PAUSE
:
337 *val
= igb
->param_lp_pause_cap
;
340 case ETHER_STAT_LP_CAP_AUTONEG
:
341 *val
= igb
->param_lp_autoneg_cap
;
344 case ETHER_STAT_LINK_ASMPAUSE
:
345 *val
= igb
->param_asym_pause_cap
;
348 case ETHER_STAT_LINK_PAUSE
:
349 *val
= igb
->param_pause_cap
;
352 case ETHER_STAT_LINK_AUTONEG
:
353 *val
= hw
->mac
.autoneg
;
356 case ETHER_STAT_LINK_DUPLEX
:
357 *val
= (igb
->link_duplex
== FULL_DUPLEX
) ?
358 LINK_DUPLEX_FULL
: LINK_DUPLEX_HALF
;
361 case ETHER_STAT_TOOSHORT_ERRORS
:
362 igb
->stat_ruc
+= E1000_READ_REG(hw
, E1000_RUC
);
363 *val
= igb
->stat_ruc
;
366 case ETHER_STAT_CAP_REMFAULT
:
367 *val
= igb
->param_rem_fault
;
370 case ETHER_STAT_ADV_REMFAULT
:
371 *val
= igb
->param_adv_rem_fault
;
374 case ETHER_STAT_LP_REMFAULT
:
375 *val
= igb
->param_lp_rem_fault
;
378 case ETHER_STAT_JABBER_ERRORS
:
379 igb
->stat_rjc
+= E1000_READ_REG(hw
, E1000_RJC
);
380 *val
= igb
->stat_rjc
;
383 case ETHER_STAT_CAP_100T4
:
384 *val
= igb
->param_100t4_cap
;
387 case ETHER_STAT_ADV_CAP_100T4
:
388 *val
= igb
->param_adv_100t4_cap
;
391 case ETHER_STAT_LP_CAP_100T4
:
392 *val
= igb
->param_lp_100t4_cap
;
396 mutex_exit(&igb
->gen_lock
);
400 mutex_exit(&igb
->gen_lock
);
402 if (igb_check_acc_handle(igb
->osdep
.reg_handle
) != DDI_FM_OK
) {
403 ddi_fm_service_impact(igb
->dip
, DDI_SERVICE_DEGRADED
);
411 * Bring the device out of the reset/quiesced state that it
412 * was in when the interface was registered.
415 igb_m_start(void *arg
)
417 igb_t
*igb
= (igb_t
*)arg
;
419 mutex_enter(&igb
->gen_lock
);
421 if (igb
->igb_state
& IGB_SUSPENDED
) {
422 mutex_exit(&igb
->gen_lock
);
426 if (igb_start(igb
, B_TRUE
) != IGB_SUCCESS
) {
427 mutex_exit(&igb
->gen_lock
);
431 atomic_or_32(&igb
->igb_state
, IGB_STARTED
);
433 mutex_exit(&igb
->gen_lock
);
436 * Enable and start the watchdog timer
438 igb_enable_watchdog_timer(igb
);
444 * Stop the device and put it in a reset/quiesced state such
445 * that the interface can be unregistered.
448 igb_m_stop(void *arg
)
450 igb_t
*igb
= (igb_t
*)arg
;
452 mutex_enter(&igb
->gen_lock
);
454 if (igb
->igb_state
& IGB_SUSPENDED
) {
455 mutex_exit(&igb
->gen_lock
);
459 atomic_and_32(&igb
->igb_state
, ~IGB_STARTED
);
461 igb_stop(igb
, B_TRUE
);
463 mutex_exit(&igb
->gen_lock
);
466 * Disable and stop the watchdog timer
468 igb_disable_watchdog_timer(igb
);
472 * Set the promiscuity of the device.
475 igb_m_promisc(void *arg
, boolean_t on
)
477 igb_t
*igb
= (igb_t
*)arg
;
480 mutex_enter(&igb
->gen_lock
);
482 if (igb
->igb_state
& IGB_SUSPENDED
) {
483 mutex_exit(&igb
->gen_lock
);
487 reg_val
= E1000_READ_REG(&igb
->hw
, E1000_RCTL
);
490 reg_val
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
492 reg_val
&= (~(E1000_RCTL_UPE
| E1000_RCTL_MPE
));
494 E1000_WRITE_REG(&igb
->hw
, E1000_RCTL
, reg_val
);
496 mutex_exit(&igb
->gen_lock
);
498 if (igb_check_acc_handle(igb
->osdep
.reg_handle
) != DDI_FM_OK
) {
499 ddi_fm_service_impact(igb
->dip
, DDI_SERVICE_DEGRADED
);
507 * Add/remove the addresses to/from the set of multicast
508 * addresses for which the device will receive packets.
511 igb_m_multicst(void *arg
, boolean_t add
, const uint8_t *mcst_addr
)
513 igb_t
*igb
= (igb_t
*)arg
;
516 mutex_enter(&igb
->gen_lock
);
518 if (igb
->igb_state
& IGB_SUSPENDED
) {
519 mutex_exit(&igb
->gen_lock
);
523 result
= (add
) ? igb_multicst_add(igb
, mcst_addr
)
524 : igb_multicst_remove(igb
, mcst_addr
);
526 mutex_exit(&igb
->gen_lock
);
532 * Pass on M_IOCTL messages passed to the DLD, and support
533 * private IOCTLs for debugging and ndd.
536 igb_m_ioctl(void *arg
, queue_t
*q
, mblk_t
*mp
)
538 igb_t
*igb
= (igb_t
*)arg
;
540 enum ioc_reply status
;
542 iocp
= (struct iocblk
*)(uintptr_t)mp
->b_rptr
;
545 mutex_enter(&igb
->gen_lock
);
546 if (igb
->igb_state
& IGB_SUSPENDED
) {
547 mutex_exit(&igb
->gen_lock
);
548 miocnak(q
, mp
, 0, EINVAL
);
551 mutex_exit(&igb
->gen_lock
);
553 switch (iocp
->ioc_cmd
) {
554 case LB_GET_INFO_SIZE
:
558 status
= igb_loopback_ioctl(igb
, iocp
, mp
);
567 * Decide how to reply
573 * Error, reply with a NAK and EINVAL or the specified error
575 miocnak(q
, mp
, 0, iocp
->ioc_error
== 0 ?
576 EINVAL
: iocp
->ioc_error
);
581 * OK, reply already sent
587 * OK, reply with an ACK
589 miocack(q
, mp
, 0, 0);
594 * OK, send prepared reply as ACK or NAK
596 mp
->b_datap
->db_type
= iocp
->ioc_error
== 0 ?
604 * Add a MAC address to the target RX group.
607 igb_addmac(void *arg
, const uint8_t *mac_addr
)
609 igb_rx_group_t
*rx_group
= (igb_rx_group_t
*)arg
;
610 igb_t
*igb
= rx_group
->igb
;
611 struct e1000_hw
*hw
= &igb
->hw
;
614 mutex_enter(&igb
->gen_lock
);
616 if (igb
->igb_state
& IGB_SUSPENDED
) {
617 mutex_exit(&igb
->gen_lock
);
621 if (igb
->unicst_avail
== 0) {
622 /* no slots available */
623 mutex_exit(&igb
->gen_lock
);
628 * The slots from 0 to igb->num_rx_groups are reserved slots which
629 * are 1 to 1 mapped with group index directly. The other slots are
630 * shared between the all of groups. While adding a MAC address,
631 * it will try to set the reserved slots first, then the shared slots.
634 if (igb
->unicst_addr
[rx_group
->index
].mac
.set
== 1) {
636 * The reserved slot for current group is used, find the free
637 * slots in the shared slots.
639 for (i
= igb
->num_rx_groups
; i
< igb
->unicst_total
; i
++) {
640 if (igb
->unicst_addr
[i
].mac
.set
== 0) {
646 slot
= rx_group
->index
;
649 /* no slots available in the shared slots */
650 mutex_exit(&igb
->gen_lock
);
654 /* Set VMDq according to the mode supported by hardware. */
655 e1000_rar_set_vmdq(hw
, mac_addr
, slot
, igb
->vmdq_mode
, rx_group
->index
);
657 bcopy(mac_addr
, igb
->unicst_addr
[slot
].mac
.addr
, ETHERADDRL
);
658 igb
->unicst_addr
[slot
].mac
.group_index
= rx_group
->index
;
659 igb
->unicst_addr
[slot
].mac
.set
= 1;
662 mutex_exit(&igb
->gen_lock
);
668 * Remove a MAC address from the specified RX group.
671 igb_remmac(void *arg
, const uint8_t *mac_addr
)
673 igb_rx_group_t
*rx_group
= (igb_rx_group_t
*)arg
;
674 igb_t
*igb
= rx_group
->igb
;
675 struct e1000_hw
*hw
= &igb
->hw
;
678 mutex_enter(&igb
->gen_lock
);
680 if (igb
->igb_state
& IGB_SUSPENDED
) {
681 mutex_exit(&igb
->gen_lock
);
685 slot
= igb_unicst_find(igb
, mac_addr
);
687 mutex_exit(&igb
->gen_lock
);
691 if (igb
->unicst_addr
[slot
].mac
.set
== 0) {
692 mutex_exit(&igb
->gen_lock
);
696 /* Clear the MAC ddress in the slot */
697 e1000_rar_clear(hw
, slot
);
698 igb
->unicst_addr
[slot
].mac
.set
= 0;
701 mutex_exit(&igb
->gen_lock
);
707 * Enable interrupt on the specificed rx ring.
710 igb_rx_ring_intr_enable(mac_intr_handle_t intrh
)
712 igb_rx_ring_t
*rx_ring
= (igb_rx_ring_t
*)intrh
;
713 igb_t
*igb
= rx_ring
->igb
;
714 struct e1000_hw
*hw
= &igb
->hw
;
715 uint32_t index
= rx_ring
->index
;
717 if (igb
->intr_type
== DDI_INTR_TYPE_MSIX
) {
718 /* Interrupt enabling for MSI-X */
719 igb
->eims_mask
|= (E1000_EICR_RX_QUEUE0
<< index
);
720 E1000_WRITE_REG(hw
, E1000_EIMS
, igb
->eims_mask
);
721 E1000_WRITE_REG(hw
, E1000_EIAC
, igb
->eims_mask
);
724 /* Interrupt enabling for MSI and legacy */
725 igb
->ims_mask
|= E1000_IMS_RXT0
;
726 E1000_WRITE_REG(hw
, E1000_IMS
, igb
->ims_mask
);
729 E1000_WRITE_FLUSH(hw
);
735 * Disable interrupt on the specificed rx ring.
738 igb_rx_ring_intr_disable(mac_intr_handle_t intrh
)
740 igb_rx_ring_t
*rx_ring
= (igb_rx_ring_t
*)intrh
;
741 igb_t
*igb
= rx_ring
->igb
;
742 struct e1000_hw
*hw
= &igb
->hw
;
743 uint32_t index
= rx_ring
->index
;
745 if (igb
->intr_type
== DDI_INTR_TYPE_MSIX
) {
746 /* Interrupt disabling for MSI-X */
747 igb
->eims_mask
&= ~(E1000_EICR_RX_QUEUE0
<< index
);
748 E1000_WRITE_REG(hw
, E1000_EIMC
,
749 (E1000_EICR_RX_QUEUE0
<< index
));
750 E1000_WRITE_REG(hw
, E1000_EIAC
, igb
->eims_mask
);
753 /* Interrupt disabling for MSI and legacy */
754 igb
->ims_mask
&= ~E1000_IMS_RXT0
;
755 E1000_WRITE_REG(hw
, E1000_IMC
, E1000_IMS_RXT0
);
758 E1000_WRITE_FLUSH(hw
);
764 * Get the global ring index by a ring index within a group.
767 igb_get_rx_ring_index(igb_t
*igb
, int gindex
, int rindex
)
769 igb_rx_ring_t
*rx_ring
;
772 for (i
= 0; i
< igb
->num_rx_rings
; i
++) {
773 rx_ring
= &igb
->rx_rings
[i
];
774 if (rx_ring
->group_index
== gindex
)
784 igb_ring_start(mac_ring_driver_t rh
, uint64_t mr_gen_num
)
786 igb_rx_ring_t
*rx_ring
= (igb_rx_ring_t
*)rh
;
788 mutex_enter(&rx_ring
->rx_lock
);
789 rx_ring
->ring_gen_num
= mr_gen_num
;
790 mutex_exit(&rx_ring
->rx_lock
);
795 * Callback funtion for MAC layer to register all rings.
799 igb_fill_ring(void *arg
, mac_ring_type_t rtype
, const int rg_index
,
800 const int index
, mac_ring_info_t
*infop
, mac_ring_handle_t rh
)
802 igb_t
*igb
= (igb_t
*)arg
;
803 mac_intr_t
*mintr
= &infop
->mri_intr
;
806 case MAC_RING_TYPE_RX
: {
807 igb_rx_ring_t
*rx_ring
;
811 * 'index' is the ring index within the group.
812 * We need the global ring index by searching in group.
814 global_index
= igb_get_rx_ring_index(igb
, rg_index
, index
);
816 ASSERT(global_index
>= 0);
818 rx_ring
= &igb
->rx_rings
[global_index
];
819 rx_ring
->ring_handle
= rh
;
821 infop
->mri_driver
= (mac_ring_driver_t
)rx_ring
;
822 infop
->mri_start
= igb_ring_start
;
823 infop
->mri_stop
= NULL
;
824 infop
->mri_poll
= (mac_ring_poll_t
)igb_rx_ring_poll
;
825 infop
->mri_stat
= igb_rx_ring_stat
;
827 mintr
->mi_handle
= (mac_intr_handle_t
)rx_ring
;
828 mintr
->mi_enable
= igb_rx_ring_intr_enable
;
829 mintr
->mi_disable
= igb_rx_ring_intr_disable
;
830 if (igb
->intr_type
& (DDI_INTR_TYPE_MSIX
| DDI_INTR_TYPE_MSI
)) {
831 mintr
->mi_ddi_handle
=
832 igb
->htable
[rx_ring
->intr_vector
];
836 case MAC_RING_TYPE_TX
: {
837 ASSERT(index
< igb
->num_tx_rings
);
839 igb_tx_ring_t
*tx_ring
= &igb
->tx_rings
[index
];
840 tx_ring
->ring_handle
= rh
;
842 infop
->mri_driver
= (mac_ring_driver_t
)tx_ring
;
843 infop
->mri_start
= NULL
;
844 infop
->mri_stop
= NULL
;
845 infop
->mri_tx
= igb_tx_ring_send
;
846 infop
->mri_stat
= igb_tx_ring_stat
;
847 if (igb
->intr_type
& (DDI_INTR_TYPE_MSIX
| DDI_INTR_TYPE_MSI
)) {
848 mintr
->mi_ddi_handle
=
849 igb
->htable
[tx_ring
->intr_vector
];
859 igb_fill_group(void *arg
, mac_ring_type_t rtype
, const int index
,
860 mac_group_info_t
*infop
, mac_group_handle_t gh
)
862 igb_t
*igb
= (igb_t
*)arg
;
865 case MAC_RING_TYPE_RX
: {
866 igb_rx_group_t
*rx_group
;
868 ASSERT((index
>= 0) && (index
< igb
->num_rx_groups
));
870 rx_group
= &igb
->rx_groups
[index
];
871 rx_group
->group_handle
= gh
;
873 infop
->mgi_driver
= (mac_group_driver_t
)rx_group
;
874 infop
->mgi_start
= NULL
;
875 infop
->mgi_stop
= NULL
;
876 infop
->mgi_addmac
= igb_addmac
;
877 infop
->mgi_remmac
= igb_remmac
;
878 infop
->mgi_count
= (igb
->num_rx_rings
/ igb
->num_rx_groups
);
882 case MAC_RING_TYPE_TX
:
890 * Obtain the MAC's capabilities and associated data from
894 igb_m_getcapab(void *arg
, mac_capab_t cap
, void *cap_data
)
896 igb_t
*igb
= (igb_t
*)arg
;
899 case MAC_CAPAB_HCKSUM
: {
900 uint32_t *tx_hcksum_flags
= cap_data
;
903 * We advertise our capabilities only if tx hcksum offload is
904 * enabled. On receive, the stack will accept checksummed
905 * packets anyway, even if we haven't said we can deliver
908 if (!igb
->tx_hcksum_enable
)
911 *tx_hcksum_flags
= HCKSUM_INET_PARTIAL
| HCKSUM_IPHDRCKSUM
;
914 case MAC_CAPAB_LSO
: {
915 mac_capab_lso_t
*cap_lso
= cap_data
;
917 if (igb
->lso_enable
) {
918 cap_lso
->lso_flags
= LSO_TX_BASIC_TCP_IPV4
;
919 cap_lso
->lso_basic_tcp_ipv4
.lso_max
= IGB_LSO_MAXLEN
;
925 case MAC_CAPAB_RINGS
: {
926 mac_capab_rings_t
*cap_rings
= cap_data
;
928 switch (cap_rings
->mr_type
) {
929 case MAC_RING_TYPE_RX
:
930 cap_rings
->mr_group_type
= MAC_GROUP_TYPE_STATIC
;
931 cap_rings
->mr_rnum
= igb
->num_rx_rings
;
932 cap_rings
->mr_gnum
= igb
->num_rx_groups
;
933 cap_rings
->mr_rget
= igb_fill_ring
;
934 cap_rings
->mr_gget
= igb_fill_group
;
935 cap_rings
->mr_gaddring
= NULL
;
936 cap_rings
->mr_gremring
= NULL
;
939 case MAC_RING_TYPE_TX
:
940 cap_rings
->mr_group_type
= MAC_GROUP_TYPE_STATIC
;
941 cap_rings
->mr_rnum
= igb
->num_tx_rings
;
942 cap_rings
->mr_gnum
= 0;
943 cap_rings
->mr_rget
= igb_fill_ring
;
944 cap_rings
->mr_gget
= NULL
;
960 igb_m_setprop(void *arg
, const char *pr_name
, mac_prop_id_t pr_num
,
961 uint_t pr_valsize
, const void *pr_val
)
963 igb_t
*igb
= (igb_t
*)arg
;
964 struct e1000_hw
*hw
= &igb
->hw
;
966 uint32_t flow_control
;
967 uint32_t cur_mtu
, new_mtu
;
971 mutex_enter(&igb
->gen_lock
);
972 if (igb
->igb_state
& IGB_SUSPENDED
) {
973 mutex_exit(&igb
->gen_lock
);
977 if (igb
->loopback_mode
!= IGB_LB_NONE
&& igb_param_locked(pr_num
)) {
979 * All en_* parameters are locked (read-only)
980 * while the device is in any sort of loopback mode.
982 mutex_exit(&igb
->gen_lock
);
987 case MAC_PROP_EN_1000FDX_CAP
:
988 /* read/write on copper, read-only on serdes */
989 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
993 igb
->param_en_1000fdx_cap
= *(uint8_t *)pr_val
;
994 igb
->param_adv_1000fdx_cap
= *(uint8_t *)pr_val
;
996 case MAC_PROP_EN_100FDX_CAP
:
997 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
1001 igb
->param_en_100fdx_cap
= *(uint8_t *)pr_val
;
1002 igb
->param_adv_100fdx_cap
= *(uint8_t *)pr_val
;
1004 case MAC_PROP_EN_100HDX_CAP
:
1005 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
1009 igb
->param_en_100hdx_cap
= *(uint8_t *)pr_val
;
1010 igb
->param_adv_100hdx_cap
= *(uint8_t *)pr_val
;
1012 case MAC_PROP_EN_10FDX_CAP
:
1013 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
1017 igb
->param_en_10fdx_cap
= *(uint8_t *)pr_val
;
1018 igb
->param_adv_10fdx_cap
= *(uint8_t *)pr_val
;
1020 case MAC_PROP_EN_10HDX_CAP
:
1021 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
1025 igb
->param_en_10hdx_cap
= *(uint8_t *)pr_val
;
1026 igb
->param_adv_10hdx_cap
= *(uint8_t *)pr_val
;
1028 case MAC_PROP_AUTONEG
:
1029 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
1033 igb
->param_adv_autoneg_cap
= *(uint8_t *)pr_val
;
1035 case MAC_PROP_FLOWCTRL
:
1036 bcopy(pr_val
, &flow_control
, sizeof (flow_control
));
1038 switch (flow_control
) {
1042 case LINK_FLOWCTRL_NONE
:
1043 hw
->fc
.requested_mode
= e1000_fc_none
;
1045 case LINK_FLOWCTRL_RX
:
1046 hw
->fc
.requested_mode
= e1000_fc_rx_pause
;
1048 case LINK_FLOWCTRL_TX
:
1049 hw
->fc
.requested_mode
= e1000_fc_tx_pause
;
1051 case LINK_FLOWCTRL_BI
:
1052 hw
->fc
.requested_mode
= e1000_fc_full
;
1057 if (igb_setup_link(igb
, B_TRUE
) != IGB_SUCCESS
)
1061 case MAC_PROP_ADV_1000FDX_CAP
:
1062 case MAC_PROP_ADV_1000HDX_CAP
:
1063 case MAC_PROP_ADV_100T4_CAP
:
1064 case MAC_PROP_ADV_100FDX_CAP
:
1065 case MAC_PROP_ADV_100HDX_CAP
:
1066 case MAC_PROP_ADV_10FDX_CAP
:
1067 case MAC_PROP_ADV_10HDX_CAP
:
1068 case MAC_PROP_EN_1000HDX_CAP
:
1069 case MAC_PROP_EN_100T4_CAP
:
1070 case MAC_PROP_STATUS
:
1071 case MAC_PROP_SPEED
:
1072 case MAC_PROP_DUPLEX
:
1073 err
= ENOTSUP
; /* read-only prop. Can't set this. */
1076 /* adapter must be stopped for an MTU change */
1077 if (igb
->igb_state
& IGB_STARTED
) {
1082 cur_mtu
= igb
->default_mtu
;
1083 bcopy(pr_val
, &new_mtu
, sizeof (new_mtu
));
1084 if (new_mtu
== cur_mtu
) {
1089 if (new_mtu
< MIN_MTU
|| new_mtu
> MAX_MTU
) {
1094 err
= mac_maxsdu_update(igb
->mac_hdl
, new_mtu
);
1096 igb
->default_mtu
= new_mtu
;
1097 igb
->max_frame_size
= igb
->default_mtu
+
1098 sizeof (struct ether_vlan_header
) + ETHERFCSL
;
1101 * Set rx buffer size
1103 rx_size
= igb
->max_frame_size
+ IPHDR_ALIGN_ROOM
;
1104 igb
->rx_buf_size
= ((rx_size
>> 10) + ((rx_size
&
1105 (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1108 * Set tx buffer size
1110 tx_size
= igb
->max_frame_size
;
1111 igb
->tx_buf_size
= ((tx_size
>> 10) + ((tx_size
&
1112 (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1115 case MAC_PROP_PRIVATE
:
1116 err
= igb_set_priv_prop(igb
, pr_name
, pr_valsize
, pr_val
);
1123 mutex_exit(&igb
->gen_lock
);
1125 if (igb_check_acc_handle(igb
->osdep
.reg_handle
) != DDI_FM_OK
) {
1126 ddi_fm_service_impact(igb
->dip
, DDI_SERVICE_DEGRADED
);
1134 igb_m_getprop(void *arg
, const char *pr_name
, mac_prop_id_t pr_num
,
1135 uint_t pr_valsize
, void *pr_val
)
1137 igb_t
*igb
= (igb_t
*)arg
;
1138 struct e1000_hw
*hw
= &igb
->hw
;
1140 uint32_t flow_control
;
1144 case MAC_PROP_DUPLEX
:
1145 ASSERT(pr_valsize
>= sizeof (link_duplex_t
));
1146 bcopy(&igb
->link_duplex
, pr_val
, sizeof (link_duplex_t
));
1148 case MAC_PROP_SPEED
:
1149 ASSERT(pr_valsize
>= sizeof (uint64_t));
1150 tmp
= igb
->link_speed
* 1000000ull;
1151 bcopy(&tmp
, pr_val
, sizeof (tmp
));
1153 case MAC_PROP_AUTONEG
:
1154 ASSERT(pr_valsize
>= sizeof (uint8_t));
1155 *(uint8_t *)pr_val
= igb
->param_adv_autoneg_cap
;
1157 case MAC_PROP_FLOWCTRL
:
1158 ASSERT(pr_valsize
>= sizeof (uint32_t));
1159 switch (hw
->fc
.requested_mode
) {
1161 flow_control
= LINK_FLOWCTRL_NONE
;
1163 case e1000_fc_rx_pause
:
1164 flow_control
= LINK_FLOWCTRL_RX
;
1166 case e1000_fc_tx_pause
:
1167 flow_control
= LINK_FLOWCTRL_TX
;
1170 flow_control
= LINK_FLOWCTRL_BI
;
1173 bcopy(&flow_control
, pr_val
, sizeof (flow_control
));
1175 case MAC_PROP_ADV_1000FDX_CAP
:
1176 *(uint8_t *)pr_val
= igb
->param_adv_1000fdx_cap
;
1178 case MAC_PROP_EN_1000FDX_CAP
:
1179 *(uint8_t *)pr_val
= igb
->param_en_1000fdx_cap
;
1181 case MAC_PROP_ADV_1000HDX_CAP
:
1182 *(uint8_t *)pr_val
= igb
->param_adv_1000hdx_cap
;
1184 case MAC_PROP_EN_1000HDX_CAP
:
1185 *(uint8_t *)pr_val
= igb
->param_en_1000hdx_cap
;
1187 case MAC_PROP_ADV_100T4_CAP
:
1188 *(uint8_t *)pr_val
= igb
->param_adv_100t4_cap
;
1190 case MAC_PROP_EN_100T4_CAP
:
1191 *(uint8_t *)pr_val
= igb
->param_en_100t4_cap
;
1193 case MAC_PROP_ADV_100FDX_CAP
:
1194 *(uint8_t *)pr_val
= igb
->param_adv_100fdx_cap
;
1196 case MAC_PROP_EN_100FDX_CAP
:
1197 *(uint8_t *)pr_val
= igb
->param_en_100fdx_cap
;
1199 case MAC_PROP_ADV_100HDX_CAP
:
1200 *(uint8_t *)pr_val
= igb
->param_adv_100hdx_cap
;
1202 case MAC_PROP_EN_100HDX_CAP
:
1203 *(uint8_t *)pr_val
= igb
->param_en_100hdx_cap
;
1205 case MAC_PROP_ADV_10FDX_CAP
:
1206 *(uint8_t *)pr_val
= igb
->param_adv_10fdx_cap
;
1208 case MAC_PROP_EN_10FDX_CAP
:
1209 *(uint8_t *)pr_val
= igb
->param_en_10fdx_cap
;
1211 case MAC_PROP_ADV_10HDX_CAP
:
1212 *(uint8_t *)pr_val
= igb
->param_adv_10hdx_cap
;
1214 case MAC_PROP_EN_10HDX_CAP
:
1215 *(uint8_t *)pr_val
= igb
->param_en_10hdx_cap
;
1217 case MAC_PROP_PRIVATE
:
1218 err
= igb_get_priv_prop(igb
, pr_name
, pr_valsize
, pr_val
);
1228 igb_m_propinfo(void *arg
, const char *pr_name
, mac_prop_id_t pr_num
,
1229 mac_prop_info_handle_t prh
)
1231 igb_t
*igb
= (igb_t
*)arg
;
1232 struct e1000_hw
*hw
= &igb
->hw
;
1233 uint16_t phy_status
, phy_ext_status
;
1236 case MAC_PROP_DUPLEX
:
1237 case MAC_PROP_SPEED
:
1238 case MAC_PROP_ADV_1000FDX_CAP
:
1239 case MAC_PROP_ADV_1000HDX_CAP
:
1240 case MAC_PROP_EN_1000HDX_CAP
:
1241 case MAC_PROP_ADV_100T4_CAP
:
1242 case MAC_PROP_EN_100T4_CAP
:
1243 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
1246 case MAC_PROP_EN_1000FDX_CAP
:
1247 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
1248 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
1250 (void) e1000_read_phy_reg(hw
, PHY_EXT_STATUS
,
1252 mac_prop_info_set_default_uint8(prh
,
1253 ((phy_ext_status
& IEEE_ESR_1000T_FD_CAPS
) ||
1254 (phy_ext_status
& IEEE_ESR_1000X_FD_CAPS
)) ? 1 : 0);
1258 case MAC_PROP_ADV_100FDX_CAP
:
1259 case MAC_PROP_EN_100FDX_CAP
:
1260 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
1261 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
1263 (void) e1000_read_phy_reg(hw
, PHY_STATUS
, &phy_status
);
1264 mac_prop_info_set_default_uint8(prh
,
1265 ((phy_status
& MII_SR_100X_FD_CAPS
) ||
1266 (phy_status
& MII_SR_100T2_FD_CAPS
)) ? 1 : 0);
1270 case MAC_PROP_ADV_100HDX_CAP
:
1271 case MAC_PROP_EN_100HDX_CAP
:
1272 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
1273 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
1275 (void) e1000_read_phy_reg(hw
, PHY_STATUS
, &phy_status
);
1276 mac_prop_info_set_default_uint8(prh
,
1277 ((phy_status
& MII_SR_100X_HD_CAPS
) ||
1278 (phy_status
& MII_SR_100T2_HD_CAPS
)) ? 1 : 0);
1282 case MAC_PROP_ADV_10FDX_CAP
:
1283 case MAC_PROP_EN_10FDX_CAP
:
1284 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
1285 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
1287 (void) e1000_read_phy_reg(hw
, PHY_STATUS
, &phy_status
);
1288 mac_prop_info_set_default_uint8(prh
,
1289 (phy_status
& MII_SR_10T_FD_CAPS
) ? 1 : 0);
1293 case MAC_PROP_ADV_10HDX_CAP
:
1294 case MAC_PROP_EN_10HDX_CAP
:
1295 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
1296 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
1298 (void) e1000_read_phy_reg(hw
, PHY_STATUS
, &phy_status
);
1299 mac_prop_info_set_default_uint8(prh
,
1300 (phy_status
& MII_SR_10T_HD_CAPS
) ? 1 : 0);
1304 case MAC_PROP_AUTONEG
:
1305 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
1306 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
1308 (void) e1000_read_phy_reg(hw
, PHY_STATUS
, &phy_status
);
1309 mac_prop_info_set_default_uint8(prh
,
1310 (phy_status
& MII_SR_AUTONEG_CAPS
) ? 1 : 0);
1314 case MAC_PROP_FLOWCTRL
:
1315 mac_prop_info_set_default_link_flowctrl(prh
, LINK_FLOWCTRL_BI
);
1319 mac_prop_info_set_range_uint32(prh
, MIN_MTU
, MAX_MTU
);
1322 case MAC_PROP_PRIVATE
:
1323 igb_priv_prop_info(igb
, pr_name
, prh
);
1330 igb_param_locked(mac_prop_id_t pr_num
)
1333 * All en_* parameters are locked (read-only) while
1334 * the device is in any sort of loopback mode ...
1337 case MAC_PROP_EN_1000FDX_CAP
:
1338 case MAC_PROP_EN_1000HDX_CAP
:
1339 case MAC_PROP_EN_100T4_CAP
:
1340 case MAC_PROP_EN_100FDX_CAP
:
1341 case MAC_PROP_EN_100HDX_CAP
:
1342 case MAC_PROP_EN_10FDX_CAP
:
1343 case MAC_PROP_EN_10HDX_CAP
:
1344 case MAC_PROP_AUTONEG
:
1345 case MAC_PROP_FLOWCTRL
:
1353 igb_set_priv_prop(igb_t
*igb
, const char *pr_name
,
1354 uint_t pr_valsize
, const void *pr_val
)
1358 struct e1000_hw
*hw
= &igb
->hw
;
1361 if (strcmp(pr_name
, "_eee_support") == 0) {
1364 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
1369 * For now, only supported on I350/I354.
1370 * Add new mac.type values (or use < instead)
1371 * as new cards offer up EEE.
1373 switch (hw
->mac
.type
) {
1375 /* Must set this prior to the set call. */
1376 hw
->dev_spec
._82575
.eee_disable
= !result
;
1377 if (e1000_set_eee_i350(hw
) != E1000_SUCCESS
)
1381 /* Must set this prior to the set call. */
1382 hw
->dev_spec
._82575
.eee_disable
= !result
;
1383 if (e1000_set_eee_i354(hw
) != E1000_SUCCESS
)
1396 if (strcmp(pr_name
, "_tx_copy_thresh") == 0) {
1397 if (pr_val
== NULL
) {
1401 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
1402 if (result
< MIN_TX_COPY_THRESHOLD
||
1403 result
> MAX_TX_COPY_THRESHOLD
)
1406 igb
->tx_copy_thresh
= (uint32_t)result
;
1410 if (strcmp(pr_name
, "_tx_recycle_thresh") == 0) {
1411 if (pr_val
== NULL
) {
1415 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
1416 if (result
< MIN_TX_RECYCLE_THRESHOLD
||
1417 result
> MAX_TX_RECYCLE_THRESHOLD
)
1420 igb
->tx_recycle_thresh
= (uint32_t)result
;
1424 if (strcmp(pr_name
, "_tx_overload_thresh") == 0) {
1425 if (pr_val
== NULL
) {
1429 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
1430 if (result
< MIN_TX_OVERLOAD_THRESHOLD
||
1431 result
> MAX_TX_OVERLOAD_THRESHOLD
)
1434 igb
->tx_overload_thresh
= (uint32_t)result
;
1438 if (strcmp(pr_name
, "_tx_resched_thresh") == 0) {
1439 if (pr_val
== NULL
) {
1443 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
1444 if (result
< MIN_TX_RESCHED_THRESHOLD
||
1445 result
> MAX_TX_RESCHED_THRESHOLD
||
1446 result
> igb
->tx_ring_size
)
1449 igb
->tx_resched_thresh
= (uint32_t)result
;
1453 if (strcmp(pr_name
, "_rx_copy_thresh") == 0) {
1454 if (pr_val
== NULL
) {
1458 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
1459 if (result
< MIN_RX_COPY_THRESHOLD
||
1460 result
> MAX_RX_COPY_THRESHOLD
)
1463 igb
->rx_copy_thresh
= (uint32_t)result
;
1467 if (strcmp(pr_name
, "_rx_limit_per_intr") == 0) {
1468 if (pr_val
== NULL
) {
1472 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
1473 if (result
< MIN_RX_LIMIT_PER_INTR
||
1474 result
> MAX_RX_LIMIT_PER_INTR
)
1477 igb
->rx_limit_per_intr
= (uint32_t)result
;
1481 if (strcmp(pr_name
, "_intr_throttling") == 0) {
1482 if (pr_val
== NULL
) {
1486 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
1488 if (result
< igb
->capab
->min_intr_throttle
||
1489 result
> igb
->capab
->max_intr_throttle
)
1492 igb
->intr_throttling
[0] = (uint32_t)result
;
1494 for (i
= 0; i
< MAX_NUM_EITR
; i
++)
1495 igb
->intr_throttling
[i
] =
1496 igb
->intr_throttling
[0];
1498 /* Set interrupt throttling rate */
1499 for (i
= 0; i
< igb
->intr_cnt
; i
++)
1500 E1000_WRITE_REG(hw
, E1000_EITR(i
),
1501 igb
->intr_throttling
[i
]);
1509 igb_get_priv_prop(igb_t
*igb
, const char *pr_name
, uint_t pr_valsize
,
1514 if (strcmp(pr_name
, "_adv_pause_cap") == 0) {
1515 value
= igb
->param_adv_pause_cap
;
1516 } else if (strcmp(pr_name
, "_adv_asym_pause_cap") == 0) {
1517 value
= igb
->param_adv_asym_pause_cap
;
1518 } else if (strcmp(pr_name
, "_eee_support") == 0) {
1520 * For now, only supported on I350. Add new mac.type values
1521 * (or use < instead) as new cards offer up EEE.
1523 switch (igb
->hw
.mac
.type
) {
1526 value
= !(igb
->hw
.dev_spec
._82575
.eee_disable
);
1531 } else if (strcmp(pr_name
, "_tx_copy_thresh") == 0) {
1532 value
= igb
->tx_copy_thresh
;
1533 } else if (strcmp(pr_name
, "_tx_recycle_thresh") == 0) {
1534 value
= igb
->tx_recycle_thresh
;
1535 } else if (strcmp(pr_name
, "_tx_overload_thresh") == 0) {
1536 value
= igb
->tx_overload_thresh
;
1537 } else if (strcmp(pr_name
, "_tx_resched_thresh") == 0) {
1538 value
= igb
->tx_resched_thresh
;
1539 } else if (strcmp(pr_name
, "_rx_copy_thresh") == 0) {
1540 value
= igb
->rx_copy_thresh
;
1541 } else if (strcmp(pr_name
, "_rx_limit_per_intr") == 0) {
1542 value
= igb
->rx_limit_per_intr
;
1543 } else if (strcmp(pr_name
, "_intr_throttling") == 0) {
1544 value
= igb
->intr_throttling
[0];
1549 (void) snprintf(pr_val
, pr_valsize
, "%d", value
);
1554 igb_priv_prop_info(igb_t
*igb
, const char *pr_name
, mac_prop_info_handle_t prh
)
1559 if (strcmp(pr_name
, "_adv_pause_cap") == 0 ||
1560 strcmp(pr_name
, "_adv_asym_pause_cap") == 0) {
1561 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
1563 } else if (strcmp(pr_name
, "_tx_copy_thresh") == 0) {
1564 value
= DEFAULT_TX_COPY_THRESHOLD
;
1565 } else if (strcmp(pr_name
, "_tx_recycle_thresh") == 0) {
1566 value
= DEFAULT_TX_RECYCLE_THRESHOLD
;
1567 } else if (strcmp(pr_name
, "_tx_overload_thresh") == 0) {
1568 value
= DEFAULT_TX_OVERLOAD_THRESHOLD
;
1569 } else if (strcmp(pr_name
, "_tx_resched_thresh") == 0) {
1570 value
= DEFAULT_TX_RESCHED_THRESHOLD
;
1571 } else if (strcmp(pr_name
, "_rx_copy_thresh") == 0) {
1572 value
= DEFAULT_RX_COPY_THRESHOLD
;
1573 } else if (strcmp(pr_name
, "_rx_limit_per_intr") == 0) {
1574 value
= DEFAULT_RX_LIMIT_PER_INTR
;
1575 } else if (strcmp(pr_name
, "_intr_throttling") == 0) {
1576 value
= igb
->capab
->def_intr_throttle
;
1581 (void) snprintf(valstr
, sizeof (valstr
), "%d", value
);
1582 mac_prop_info_set_default_str(prh
, valstr
);