4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
27 * You may not use this file except in compliance with the License.
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
39 #define FP_LOCK(ptr) \
40 mutex_enter(&ptr->fp_lock);
41 #define FP_UNLOCK(ptr) \
42 mutex_exit(&ptr->fp_lock);
45 qede_ucst_find(qede_t
*qede
, const uint8_t *mac_addr
)
49 for(slot
= 0; slot
< qede
->ucst_total
; slot
++) {
50 if (bcmp(qede
->ucst_mac
[slot
].mac_addr
.ether_addr_octet
,
51 mac_addr
, ETHERADDRL
) == 0) {
60 qede_set_mac_addr(qede_t
*qede
, uint8_t *mac_addr
, uint8_t fl
)
62 struct ecore_filter_ucast params
;
64 memset(¶ms
, 0, sizeof (params
));
67 params
.type
= ECORE_FILTER_MAC
;
68 params
.is_rx_filter
= true;
69 params
.is_tx_filter
= true;
70 COPY_ETH_ADDRESS(mac_addr
, params
.mac
);
72 return (ecore_filter_ucast_cmd(&qede
->edev
,
73 ¶ms
, ECORE_SPQ_MODE_EBLOCK
, NULL
));
78 qede_add_macaddr(qede_t
*qede
, uint8_t *mac_addr
)
82 i
= qede_ucst_find(qede
, mac_addr
);
84 /* LINTED E_ARGUMENT_MISMATCH */
85 qede_info(qede
, "mac addr already added %d\n",
89 if (qede
->ucst_avail
== 0) {
90 qede_info(qede
, "add macaddr ignored \n");
93 for (i
= 0; i
< qede
->ucst_total
; i
++) {
94 if (qede
->ucst_mac
[i
].set
== 0) {
98 if (i
>= qede
->ucst_total
) {
99 qede_info(qede
, "add macaddr ignored no space");
102 ret
= qede_set_mac_addr(qede
, (uint8_t *)mac_addr
, ECORE_FILTER_ADD
);
105 qede
->ucst_mac
[i
].mac_addr
.ether_addr_octet
,
107 qede
->ucst_mac
[i
].set
= 1;
109 /* LINTED E_ARGUMENT_MISMATCH */
110 qede_info(qede
, " add macaddr passed for addr "
111 "%02x:%02x:%02x:%02x:%02x:%02x",
112 mac_addr
[0], mac_addr
[1],
113 mac_addr
[2], mac_addr
[3], mac_addr
[4], mac_addr
[5]);
115 /* LINTED E_ARGUMENT_MISMATCH */
116 qede_info(qede
, "add macaddr failed for addr "
117 "%02x:%02x:%02x:%02x:%02x:%02x",
118 mac_addr
[0], mac_addr
[1],
119 mac_addr
[2], mac_addr
[3], mac_addr
[4], mac_addr
[5]);
122 if (qede
->ucst_avail
== (qede
->ucst_total
-1)) {
125 0xff, 0xff, 0xff, 0xff, 0xff,
128 for (i
= 0; i
< qede
->ucst_total
; i
++) {
129 if (qede
->ucst_mac
[i
].set
== 0)
132 ret
= qede_set_mac_addr(qede
,
133 (uint8_t *)bcast_addr
, ECORE_FILTER_ADD
);
136 qede
->ucst_mac
[i
].mac_addr
.ether_addr_octet
,
138 qede
->ucst_mac
[i
].set
= 1;
142 /* LINTED E_ARGUMENT_MISMATCH */
143 qede_info(qede
, "add macaddr failed for addr "
144 "%02x:%02x:%02x:%02x:%02x:%02x",
145 mac_addr
[0], mac_addr
[1],
146 mac_addr
[2], mac_addr
[3], mac_addr
[4],
158 qede_add_mac_addr(void *arg
, const uint8_t *mac_addr
, const uint64_t flags
)
161 qede_add_mac_addr(void *arg
, const uint8_t *mac_addr
)
164 qede_mac_group_t
*rx_group
= (qede_mac_group_t
*)arg
;
165 qede_t
*qede
= rx_group
->qede
;
166 int ret
= DDI_SUCCESS
;
168 /* LINTED E_ARGUMENT_MISMATCH */
169 qede_info(qede
, " mac addr :" MAC_STRING
, MACTOSTR(mac_addr
));
171 mutex_enter(&qede
->gld_lock
);
172 if (qede
->qede_state
== QEDE_STATE_SUSPENDED
) {
173 mutex_exit(&qede
->gld_lock
);
176 ret
= qede_add_macaddr(qede
, (uint8_t *)mac_addr
);
178 mutex_exit(&qede
->gld_lock
);
185 qede_rem_macaddr(qede_t
*qede
, uint8_t *mac_addr
)
190 i
= qede_ucst_find(qede
, mac_addr
);
192 /* LINTED E_ARGUMENT_MISMATCH */
194 "mac addr not there to remove",
195 MAC_STRING
, MACTOSTR(mac_addr
));
198 if (qede
->ucst_mac
[i
].set
== 0) {
201 ret
= qede_set_mac_addr(qede
, (uint8_t *)mac_addr
, ECORE_FILTER_REMOVE
);
203 bzero(qede
->ucst_mac
[i
].mac_addr
.ether_addr_octet
,ETHERADDRL
);
204 qede
->ucst_mac
[i
].set
= 0;
207 /* LINTED E_ARGUMENT_MISMATCH */
208 qede_info(qede
, "mac addr remove failed",
209 MAC_STRING
, MACTOSTR(mac_addr
));
217 qede_rem_mac_addr(void *arg
, const uint8_t *mac_addr
)
219 qede_mac_group_t
*rx_group
= (qede_mac_group_t
*)arg
;
220 qede_t
*qede
= rx_group
->qede
;
221 int ret
= DDI_SUCCESS
;
223 /* LINTED E_ARGUMENT_MISMATCH */
224 qede_info(qede
, "mac addr remove:" MAC_STRING
, MACTOSTR(mac_addr
));
225 mutex_enter(&qede
->gld_lock
);
226 if (qede
->qede_state
== QEDE_STATE_SUSPENDED
) {
227 mutex_exit(&qede
->gld_lock
);
230 ret
= qede_rem_macaddr(qede
, (uint8_t *)mac_addr
);
231 mutex_exit(&qede
->gld_lock
);
237 qede_tx_ring_stat(mac_ring_driver_t rh
, uint_t stat
, uint64_t *val
)
241 qede_fastpath_t
*fp
= (qede_fastpath_t
*)rh
;
242 qede_tx_ring_t
*tx_ring
= fp
->tx_ring
[0];
243 qede_t
*qede
= fp
->qede
;
246 if (qede
->qede_state
== QEDE_STATE_SUSPENDED
)
250 case MAC_STAT_OBYTES
:
251 *val
= tx_ring
->tx_byte_count
;
254 case MAC_STAT_OPACKETS
:
255 *val
= tx_ring
->tx_pkt_count
;
268 qede_rx_ring_poll(void *arg
, int poll_bytes
, int poll_pkts
)
272 qede_rx_ring_poll(void *arg
, int poll_bytes
)
274 /* XXX pick a value at the moment */
277 qede_fastpath_t
*fp
= (qede_fastpath_t
*)arg
;
280 qede_t
*qede
= fp
->qede
;
282 if (poll_bytes
== 0) {
286 mutex_enter(&fp
->fp_lock
);
287 qede
->intrSbPollCnt
[fp
->vect_info
->vect_index
]++;
289 mp
= qede_process_fastpath(fp
, poll_bytes
, poll_pkts
, &work_done
);
291 fp
->rx_ring
->rx_poll_cnt
++;
292 } else if ((mp
== NULL
) && (work_done
== 0)) {
293 qede
->intrSbPollNoChangeCnt
[fp
->vect_info
->vect_index
]++;
296 mutex_exit(&fp
->fp_lock
);
302 qede_rx_ring_intr_enable(mac_ring_driver_t rh
)
305 qede_rx_ring_intr_enable(mac_intr_handle_t rh
)
308 qede_fastpath_t
*fp
= (qede_fastpath_t
*)rh
;
310 mutex_enter(&fp
->qede
->drv_lock
);
311 if (!fp
->sb_phys
&& (fp
->sb_dma_handle
== NULL
)) {
312 mutex_exit(&fp
->qede
->drv_lock
);
313 return (DDI_FAILURE
);
316 fp
->rx_ring
->intrEnableCnt
++;
317 qede_enable_hw_intr(fp
);
318 fp
->disabled_by_poll
= 0;
319 mutex_exit(&fp
->qede
->drv_lock
);
321 return (DDI_SUCCESS
);
326 qede_rx_ring_intr_disable(mac_ring_driver_t rh
)
329 qede_rx_ring_intr_disable(mac_intr_handle_t rh
)
332 qede_fastpath_t
*fp
= (qede_fastpath_t
*)rh
;
334 mutex_enter(&fp
->qede
->drv_lock
);
335 if (!fp
->sb_phys
&& (fp
->sb_dma_handle
== NULL
)) {
336 mutex_exit(&fp
->qede
->drv_lock
);
337 return (DDI_FAILURE
);
339 fp
->rx_ring
->intrDisableCnt
++;
340 qede_disable_hw_intr(fp
);
341 fp
->disabled_by_poll
= 1;
342 mutex_exit(&fp
->qede
->drv_lock
);
343 return (DDI_SUCCESS
);
347 qede_rx_ring_stat(mac_ring_driver_t rh
, uint_t stat
, uint64_t *val
)
352 qede_fastpath_t
*fp
= (qede_fastpath_t
*)rh
;
353 qede_t
*qede
= fp
->qede
;
354 qede_rx_ring_t
*rx_ring
= fp
->rx_ring
;
356 if (qede
->qede_state
== QEDE_STATE_SUSPENDED
) {
361 case MAC_STAT_RBYTES
:
362 *val
= rx_ring
->rx_byte_cnt
;
364 case MAC_STAT_IPACKETS
:
365 *val
= rx_ring
->rx_pkt_cnt
;
377 qede_get_global_ring_index(qede_t
*qede
, int gindex
, int rindex
)
380 qede_rx_ring_t
*rx_ring
;
383 for (i
= 0; i
< qede
->num_fp
; i
++) {
384 fp
= &qede
->fp_array
[i
];
385 rx_ring
= fp
->rx_ring
;
387 if (rx_ring
->group_index
== gindex
) {
399 qede_rx_ring_stop(mac_ring_driver_t rh
)
401 qede_fastpath_t
*fp
= (qede_fastpath_t
*)rh
;
402 qede_rx_ring_t
*rx_ring
= fp
->rx_ring
;
404 qede_print("!%s(%d): called", __func__
,fp
->qede
->instance
);
405 mutex_enter(&fp
->fp_lock
);
406 rx_ring
->mac_ring_started
= B_FALSE
;
407 mutex_exit(&fp
->fp_lock
);
411 qede_rx_ring_start(mac_ring_driver_t rh
, u64 mr_gen_num
)
413 qede_fastpath_t
*fp
= (qede_fastpath_t
*)rh
;
414 qede_rx_ring_t
*rx_ring
= fp
->rx_ring
;
416 qede_print("!%s(%d): called", __func__
,fp
->qede
->instance
);
417 mutex_enter(&fp
->fp_lock
);
418 rx_ring
->mr_gen_num
= mr_gen_num
;
419 rx_ring
->mac_ring_started
= B_TRUE
;
420 rx_ring
->intrDisableCnt
= 0;
421 rx_ring
->intrEnableCnt
= 0;
422 fp
->disabled_by_poll
= 0;
424 mutex_exit(&fp
->fp_lock
);
426 return (DDI_SUCCESS
);
429 /* Callback function from mac layer to register rings */
431 qede_fill_ring(void *arg
, mac_ring_type_t rtype
, const int group_index
,
432 const int ring_index
, mac_ring_info_t
*infop
, mac_ring_handle_t rh
)
434 qede_t
*qede
= (qede_t
*)arg
;
435 mac_intr_t
*mintr
= &infop
->mri_intr
;
438 case MAC_RING_TYPE_RX
: {
440 * Index passed as a param is the ring index within the
441 * given group index. If multiple groups are supported
442 * then need to search into all groups to find out the
443 * global ring index for the passed group relative
446 int global_ring_index
= qede_get_global_ring_index(qede
,
447 group_index
, ring_index
);
449 qede_rx_ring_t
*rx_ring
;
453 * global_ring_index < 0 means group index passed
454 * was registered by our driver
456 ASSERT(global_ring_index
>= 0);
459 cmn_err(CE_WARN
, "!rx ring(%d) ring handle NULL",
463 fp
= &qede
->fp_array
[global_ring_index
];
464 rx_ring
= fp
->rx_ring
;
467 rx_ring
->mac_ring_handle
= rh
;
469 qede_info(qede
, "rx_ring %d mac_ring_handle %p",
470 rx_ring
->rss_id
, rh
);
472 /* mri_driver passed as arg to mac_ring* callbacks */
473 infop
->mri_driver
= (mac_ring_driver_t
)fp
;
475 * mri_start callback will supply a mac rings generation
476 * number which is needed while indicating packets
477 * upstream via mac_ring_rx() call
479 infop
->mri_start
= qede_rx_ring_start
;
480 infop
->mri_stop
= qede_rx_ring_stop
;
481 infop
->mri_poll
= qede_rx_ring_poll
;
482 infop
->mri_stat
= qede_rx_ring_stat
;
484 mintr
->mi_handle
= (mac_intr_handle_t
)fp
;
485 mintr
->mi_enable
= qede_rx_ring_intr_enable
;
486 mintr
->mi_disable
= qede_rx_ring_intr_disable
;
487 if (qede
->intr_ctx
.intr_type_in_use
&
488 (DDI_INTR_TYPE_MSIX
| DDI_INTR_TYPE_MSI
)) {
489 mintr
->mi_ddi_handle
=
491 intr_hdl_array
[global_ring_index
+ qede
->num_hwfns
];
495 case MAC_RING_TYPE_TX
: {
497 qede_tx_ring_t
*tx_ring
;
500 ASSERT(ring_index
< qede
->num_fp
);
502 fp
= &qede
->fp_array
[ring_index
];
504 tx_ring
= fp
->tx_ring
[0];
505 tx_ring
->mac_ring_handle
= rh
;
506 qede_info(qede
, "tx_ring %d mac_ring_handle %p",
507 tx_ring
->tx_queue_index
, rh
);
508 infop
->mri_driver
= (mac_ring_driver_t
)fp
;
509 infop
->mri_start
= NULL
;
510 infop
->mri_stop
= NULL
;
511 infop
->mri_tx
= qede_ring_tx
;
512 infop
->mri_stat
= qede_tx_ring_stat
;
513 if (qede
->intr_ctx
.intr_type_in_use
&
514 (DDI_INTR_TYPE_MSIX
| DDI_INTR_TYPE_MSI
)) {
515 mintr
->mi_ddi_handle
=
517 intr_hdl_array
[ring_index
+ qede
->num_hwfns
];
527 * Callback function from mac layer to register group
530 qede_fill_group(void *arg
, mac_ring_type_t rtype
, const int index
,
531 mac_group_info_t
*infop
, mac_group_handle_t gh
)
533 qede_t
*qede
= (qede_t
*)arg
;
536 case MAC_RING_TYPE_RX
: {
537 qede_mac_group_t
*rx_group
;
539 rx_group
= &qede
->rx_groups
[index
];
540 rx_group
->group_handle
= gh
;
541 rx_group
->group_index
= index
;
542 rx_group
->qede
= qede
;
543 infop
->mgi_driver
= (mac_group_driver_t
)rx_group
;
544 infop
->mgi_start
= NULL
;
545 infop
->mgi_stop
= NULL
;
547 infop
->mgi_addvlan
= NULL
;
548 infop
->mgi_remvlan
= NULL
;
549 infop
->mgi_getsriov_info
= NULL
;
550 infop
->mgi_setmtu
= NULL
;
552 infop
->mgi_addmac
= qede_add_mac_addr
;
553 infop
->mgi_remmac
= qede_rem_mac_addr
;
554 infop
->mgi_count
= qede
->num_fp
;
557 infop
->mgi_flags
= MAC_GROUP_DEFAULT
;
563 case MAC_RING_TYPE_TX
: {
564 qede_mac_group_t
*tx_group
;
566 tx_group
= &qede
->tx_groups
[index
];
567 tx_group
->group_handle
= gh
;
568 tx_group
->group_index
= index
;
569 tx_group
->qede
= qede
;
571 infop
->mgi_driver
= (mac_group_driver_t
)tx_group
;
572 infop
->mgi_start
= NULL
;
573 infop
->mgi_stop
= NULL
;
574 infop
->mgi_addmac
= NULL
;
575 infop
->mgi_remmac
= NULL
;
577 infop
->mgi_addvlan
= NULL
;
578 infop
->mgi_remvlan
= NULL
;
579 infop
->mgi_setmtu
= NULL
;
580 infop
->mgi_getsriov_info
= NULL
;
583 infop
->mgi_count
= qede
->num_fp
;
587 infop
->mgi_flags
= MAC_GROUP_DEFAULT
;
599 qede_transceiver_info(void *arg
, uint_t id
, mac_transceiver_info_t
*infop
)
602 struct ecore_dev
*edev
= &qede
->edev
;
603 struct ecore_hwfn
*hwfn
;
604 struct ecore_ptt
*ptt
;
605 uint32_t transceiver_state
;
607 if (id
>= edev
->num_hwfns
|| arg
== NULL
|| infop
== NULL
)
610 hwfn
= &edev
->hwfns
[id
];
611 ptt
= ecore_ptt_acquire(hwfn
);
616 * Use the underlying raw API to get this information. While the
617 * ecore_phy routines have some ways of getting to this information, it
618 * ends up writing the raw data as ASCII characters which doesn't help
621 transceiver_state
= ecore_rd(hwfn
, ptt
, hwfn
->mcp_info
->port_addr
+
622 OFFSETOF(struct public_port
, transceiver_data
));
623 transceiver_state
= GET_FIELD(transceiver_state
, ETH_TRANSCEIVER_STATE
);
624 ecore_ptt_release(hwfn
, ptt
);
626 if ((transceiver_state
& ETH_TRANSCEIVER_STATE_PRESENT
) != 0) {
627 mac_transceiver_info_set_present(infop
, B_TRUE
);
629 * Based on our testing, the ETH_TRANSCEIVER_STATE_VALID flag is
630 * not set, so we cannot rely on it. Instead, we have found that
631 * the ETH_TRANSCEIVER_STATE_UPDATING will be set when we cannot
632 * use the transceiver.
634 if ((transceiver_state
& ETH_TRANSCEIVER_STATE_UPDATING
) != 0) {
635 mac_transceiver_info_set_usable(infop
, B_FALSE
);
637 mac_transceiver_info_set_usable(infop
, B_TRUE
);
640 mac_transceiver_info_set_present(infop
, B_FALSE
);
641 mac_transceiver_info_set_usable(infop
, B_FALSE
);
648 qede_transceiver_read(void *arg
, uint_t id
, uint_t page
, void *buf
,
649 size_t nbytes
, off_t offset
, size_t *nread
)
652 struct ecore_dev
*edev
= &qede
->edev
;
653 struct ecore_hwfn
*hwfn
;
655 struct ecore_ptt
*ptt
;
656 enum _ecore_status_t ret
;
658 if (id
>= edev
->num_hwfns
|| buf
== NULL
|| nbytes
== 0 || nread
== NULL
||
659 (page
!= 0xa0 && page
!= 0xa2) || offset
< 0)
663 * Both supported pages have a length of 256 bytes, ensure nothing asks
664 * us to go beyond that.
666 if (nbytes
> 256 || offset
>= 256 || (offset
+ nbytes
> 256)) {
670 hwfn
= &edev
->hwfns
[id
];
671 ptt
= ecore_ptt_acquire(hwfn
);
676 ret
= ecore_mcp_phy_sfp_read(hwfn
, ptt
, hwfn
->port_id
, page
, offset
,
678 ecore_ptt_release(hwfn
, ptt
);
679 if (ret
!= ECORE_SUCCESS
) {
689 qede_mac_stats(void * arg
,
693 qede_t
* qede
= (qede_t
*)arg
;
694 struct ecore_eth_stats vstats
;
695 struct ecore_dev
*edev
= &qede
->edev
;
696 struct qede_link_cfg lnkcfg
;
698 qede_fastpath_t
*fp
= &qede
->fp_array
[0];
699 qede_rx_ring_t
*rx_ring
;
700 qede_tx_ring_t
*tx_ring
;
702 if ((qede
== NULL
) || (value
== NULL
)) {
707 mutex_enter(&qede
->gld_lock
);
709 if(qede
->qede_state
!= QEDE_STATE_STARTED
) {
710 mutex_exit(&qede
->gld_lock
);
716 memset(&vstats
, 0, sizeof(struct ecore_eth_stats
));
717 ecore_get_vport_stats(edev
, &vstats
);
720 memset(&qede
->curcfg
, 0, sizeof(struct qede_link_cfg
));
721 qede_get_link_info(&edev
->hwfns
[0], &qede
->curcfg
);
727 case MAC_STAT_IFSPEED
:
728 *value
= (qede
->props
.link_speed
* 1000000ULL);
730 case MAC_STAT_MULTIRCV
:
731 *value
= vstats
.common
.rx_mcast_pkts
;
733 case MAC_STAT_BRDCSTRCV
:
734 *value
= vstats
.common
.rx_bcast_pkts
;
736 case MAC_STAT_MULTIXMT
:
737 *value
= vstats
.common
.tx_mcast_pkts
;
739 case MAC_STAT_BRDCSTXMT
:
740 *value
= vstats
.common
.tx_bcast_pkts
;
742 case MAC_STAT_NORCVBUF
:
743 *value
= vstats
.common
.no_buff_discards
;
745 case MAC_STAT_NOXMTBUF
:
748 case MAC_STAT_IERRORS
:
749 case ETHER_STAT_MACRCV_ERRORS
:
750 *value
= vstats
.common
.mac_filter_discards
+
751 vstats
.common
.packet_too_big_discard
+
752 vstats
.common
.rx_crc_errors
;
755 case MAC_STAT_OERRORS
:
758 case MAC_STAT_COLLISIONS
:
759 *value
= vstats
.bb
.tx_total_collisions
;
762 case MAC_STAT_RBYTES
:
763 *value
= vstats
.common
.rx_ucast_bytes
+
764 vstats
.common
.rx_mcast_bytes
+
765 vstats
.common
.rx_bcast_bytes
;
768 case MAC_STAT_IPACKETS
:
769 *value
= vstats
.common
.rx_ucast_pkts
+
770 vstats
.common
.rx_mcast_pkts
+
771 vstats
.common
.rx_bcast_pkts
;
774 case MAC_STAT_OBYTES
:
775 *value
= vstats
.common
.tx_ucast_bytes
+
776 vstats
.common
.tx_mcast_bytes
+
777 vstats
.common
.tx_bcast_bytes
;
780 case MAC_STAT_OPACKETS
:
781 *value
= vstats
.common
.tx_ucast_pkts
+
782 vstats
.common
.tx_mcast_pkts
+
783 vstats
.common
.tx_bcast_pkts
;
786 case ETHER_STAT_ALIGN_ERRORS
:
787 *value
= vstats
.common
.rx_align_errors
;
790 case ETHER_STAT_FCS_ERRORS
:
791 *value
= vstats
.common
.rx_crc_errors
;
794 case ETHER_STAT_FIRST_COLLISIONS
:
797 case ETHER_STAT_MULTI_COLLISIONS
:
800 case ETHER_STAT_DEFER_XMTS
:
803 case ETHER_STAT_TX_LATE_COLLISIONS
:
806 case ETHER_STAT_EX_COLLISIONS
:
809 case ETHER_STAT_MACXMT_ERRORS
:
813 case ETHER_STAT_CARRIER_ERRORS
:
816 case ETHER_STAT_TOOLONG_ERRORS
:
817 *value
= vstats
.common
.rx_oversize_packets
;
820 #if (MAC_VERSION > 1)
821 case ETHER_STAT_TOOSHORT_ERRORS
:
822 *value
= vstats
.common
.rx_undersize_packets
;
826 case ETHER_STAT_XCVR_ADDR
:
830 case ETHER_STAT_XCVR_ID
:
834 case ETHER_STAT_XCVR_INUSE
:
835 switch (qede
->props
.link_speed
) {
837 *value
= XCVR_UNDEFINED
;
840 #if (MAC_VERSION > 1)
841 case ETHER_STAT_CAP_10GFDX
:
845 case ETHER_STAT_CAP_100FDX
:
848 case ETHER_STAT_CAP_100HDX
:
851 case ETHER_STAT_CAP_ASMPAUSE
:
854 case ETHER_STAT_CAP_PAUSE
:
857 case ETHER_STAT_CAP_AUTONEG
:
861 #if (MAC_VERSION > 1)
862 case ETHER_STAT_CAP_REMFAULT
:
867 #if (MAC_VERSION > 1)
868 case ETHER_STAT_ADV_CAP_10GFDX
:
872 case ETHER_STAT_ADV_CAP_ASMPAUSE
:
876 case ETHER_STAT_ADV_CAP_PAUSE
:
880 case ETHER_STAT_ADV_CAP_AUTONEG
:
881 *value
= qede
->curcfg
.adv_capab
.autoneg
;
884 #if (MAC_VERSION > 1)
885 case ETHER_STAT_ADV_REMFAULT
:
890 case ETHER_STAT_LINK_AUTONEG
:
891 *value
= qede
->curcfg
.autoneg
;
894 case ETHER_STAT_LINK_DUPLEX
:
895 *value
= (qede
->props
.link_duplex
== DUPLEX_FULL
) ?
896 LINK_DUPLEX_FULL
: LINK_DUPLEX_HALF
;
899 * Supported speeds. These indicate what hardware is capable of.
901 case ETHER_STAT_CAP_1000HDX
:
902 *value
= qede
->curcfg
.supp_capab
.param_1000hdx
;
905 case ETHER_STAT_CAP_1000FDX
:
906 *value
= qede
->curcfg
.supp_capab
.param_1000fdx
;
909 case ETHER_STAT_CAP_10GFDX
:
910 *value
= qede
->curcfg
.supp_capab
.param_10000fdx
;
913 case ETHER_STAT_CAP_25GFDX
:
914 *value
= qede
->curcfg
.supp_capab
.param_25000fdx
;
917 case ETHER_STAT_CAP_40GFDX
:
918 *value
= qede
->curcfg
.supp_capab
.param_40000fdx
;
921 case ETHER_STAT_CAP_50GFDX
:
922 *value
= qede
->curcfg
.supp_capab
.param_50000fdx
;
925 case ETHER_STAT_CAP_100GFDX
:
926 *value
= qede
->curcfg
.supp_capab
.param_100000fdx
;
930 * Advertised speeds. These indicate what hardware is currently sending.
932 case ETHER_STAT_ADV_CAP_1000HDX
:
933 *value
= qede
->curcfg
.adv_capab
.param_1000hdx
;
936 case ETHER_STAT_ADV_CAP_1000FDX
:
937 *value
= qede
->curcfg
.adv_capab
.param_1000fdx
;
940 case ETHER_STAT_ADV_CAP_10GFDX
:
941 *value
= qede
->curcfg
.adv_capab
.param_10000fdx
;
944 case ETHER_STAT_ADV_CAP_25GFDX
:
945 *value
= qede
->curcfg
.adv_capab
.param_25000fdx
;
948 case ETHER_STAT_ADV_CAP_40GFDX
:
949 *value
= qede
->curcfg
.adv_capab
.param_40000fdx
;
952 case ETHER_STAT_ADV_CAP_50GFDX
:
953 *value
= qede
->curcfg
.adv_capab
.param_50000fdx
;
956 case ETHER_STAT_ADV_CAP_100GFDX
:
957 *value
= qede
->curcfg
.adv_capab
.param_100000fdx
;
964 mutex_exit(&qede
->gld_lock
);
968 /* (flag) TRUE = on, FALSE = off */
970 qede_mac_promiscuous(void *arg
,
973 qede_t
*qede
= (qede_t
*)arg
;
974 qede_print("!%s(%d): called", __func__
,qede
->instance
);
975 int ret
= DDI_SUCCESS
;
976 enum qede_filter_rx_mode_type mode
;
978 mutex_enter(&qede
->drv_lock
);
980 if (qede
->qede_state
== QEDE_STATE_SUSPENDED
) {
986 qede_info(qede
, "Entering promiscuous mode");
987 mode
= QEDE_FILTER_RX_MODE_PROMISC
;
988 qede
->params
.promisc_fl
= B_TRUE
;
990 qede_info(qede
, "Leaving promiscuous mode");
991 if(qede
->params
.multi_promisc_fl
== B_TRUE
) {
992 mode
= QEDE_FILTER_RX_MODE_MULTI_PROMISC
;
994 mode
= QEDE_FILTER_RX_MODE_REGULAR
;
996 qede
->params
.promisc_fl
= B_FALSE
;
999 ret
= qede_set_filter_rx_mode(qede
, mode
);
1002 mutex_exit(&qede
->drv_lock
);
1006 int qede_set_rx_mac_mcast(qede_t
*qede
, enum ecore_filter_opcode opcode
,
1007 uint8_t *mac
, int mc_cnt
)
1009 struct ecore_filter_mcast cmd
;
1011 memset(&cmd
, 0, sizeof(cmd
));
1012 cmd
.opcode
= opcode
;
1013 cmd
.num_mc_addrs
= mc_cnt
;
1015 for (i
= 0; i
< mc_cnt
; i
++, mac
+= ETH_ALLEN
) {
1016 COPY_ETH_ADDRESS(mac
, cmd
.mac
[i
]);
1020 return (ecore_filter_mcast_cmd(&qede
->edev
, &cmd
,
1021 ECORE_SPQ_MODE_CB
, NULL
));
1026 qede_set_filter_rx_mode(qede_t
* qede
, enum qede_filter_rx_mode_type type
)
1028 struct ecore_filter_accept_flags flg
;
1030 memset(&flg
, 0, sizeof(flg
));
1032 flg
.update_rx_mode_config
= 1;
1033 flg
.update_tx_mode_config
= 1;
1034 flg
.rx_accept_filter
= ECORE_ACCEPT_UCAST_MATCHED
|
1035 ECORE_ACCEPT_MCAST_MATCHED
| ECORE_ACCEPT_BCAST
;
1036 flg
.tx_accept_filter
= ECORE_ACCEPT_UCAST_MATCHED
|
1037 ECORE_ACCEPT_MCAST_MATCHED
| ECORE_ACCEPT_BCAST
;
1039 if (type
== QEDE_FILTER_RX_MODE_PROMISC
)
1040 flg
.rx_accept_filter
|= ECORE_ACCEPT_UCAST_UNMATCHED
|
1041 ECORE_ACCEPT_MCAST_UNMATCHED
;
1042 else if (type
== QEDE_FILTER_RX_MODE_MULTI_PROMISC
)
1043 flg
.rx_accept_filter
|= ECORE_ACCEPT_MCAST_UNMATCHED
;
1044 qede_info(qede
, "rx_mode rx_filter=0x%x tx_filter=0x%x type=0x%x\n",
1045 flg
.rx_accept_filter
, flg
.tx_accept_filter
, type
);
1046 return (ecore_filter_accept_cmd(&qede
->edev
, 0, flg
,
1047 0, /* update_accept_any_vlan */
1048 0, /* accept_any_vlan */
1049 ECORE_SPQ_MODE_CB
, NULL
));
1053 qede_multicast(qede_t
*qede
, boolean_t flag
, const uint8_t *ptr_mcaddr
)
1055 int i
, ret
= DDI_SUCCESS
;
1056 qede_mcast_list_entry_t
*ptr_mlist
;
1057 qede_mcast_list_entry_t
*ptr_entry
;
1059 unsigned char *mc_macs
, *tmpmc
;
1061 boolean_t mcmac_exists
= B_FALSE
;
1062 enum qede_filter_rx_mode_type mode
;
1065 cmn_err(CE_NOTE
, "Removing all multicast");
1068 "qede=%p %s multicast: %02x:%02x:%02x:%02x:%02x:%02x",
1069 qede
, (flag
) ? "Adding" : "Removing", ptr_mcaddr
[0],
1070 ptr_mcaddr
[1],ptr_mcaddr
[2],ptr_mcaddr
[3],ptr_mcaddr
[4],
1075 if (flag
&& (ptr_mcaddr
== NULL
)) {
1076 cmn_err(CE_WARN
, "ERROR: Multicast address not specified");
1081 /* exceeds addition of mcaddr above limit */
1082 if (flag
&& (qede
->mc_cnt
>= MAX_MC_SOFT_LIMIT
)) {
1083 qede_info(qede
, "Cannot add more than MAX_MC_SOFT_LIMIT");
1087 size
= MAX_MC_SOFT_LIMIT
* ETH_ALLEN
;
1089 mc_macs
= kmem_zalloc(size
, KM_NOSLEEP
);
1091 cmn_err(CE_WARN
, "ERROR: Failed to allocate for mc_macs");
1097 /* remove all multicast - as flag not set and mcaddr not specified*/
1098 if (!flag
&& (ptr_mcaddr
== NULL
)) {
1099 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry
,
1100 &qede
->mclist
.head
, qede_mcast_list_entry_t
, mclist_entry
)
1102 if (ptr_entry
!= NULL
) {
1103 QEDE_LIST_REMOVE(&ptr_entry
->mclist_entry
,
1104 &qede
->mclist
.head
);
1105 kmem_free(ptr_entry
,
1106 sizeof (qede_mcast_list_entry_t
) + ETH_ALLEN
);
1110 ret
= qede_set_rx_mac_mcast(qede
,
1111 ECORE_FILTER_REMOVE
, mc_macs
, 1);
1116 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry
,
1117 &qede
->mclist
.head
, qede_mcast_list_entry_t
, mclist_entry
)
1119 if ((ptr_entry
!= NULL
) &&
1120 IS_ETH_ADDRESS_EQUAL(ptr_mcaddr
, ptr_entry
->mac
)) {
1121 mcmac_exists
= B_TRUE
;
1125 if (flag
&& mcmac_exists
) {
1128 } else if (!flag
&& !mcmac_exists
) {
1134 ptr_entry
= kmem_zalloc((sizeof (qede_mcast_list_entry_t
) +
1135 ETH_ALLEN
), KM_NOSLEEP
);
1136 ptr_entry
->mac
= (uint8_t *)ptr_entry
+
1137 sizeof (qede_mcast_list_entry_t
);
1138 COPY_ETH_ADDRESS(ptr_mcaddr
, ptr_entry
->mac
);
1139 QEDE_LIST_ADD(&ptr_entry
->mclist_entry
, &qede
->mclist
.head
);
1141 QEDE_LIST_REMOVE(&ptr_entry
->mclist_entry
, &qede
->mclist
.head
);
1142 kmem_free(ptr_entry
, sizeof(qede_mcast_list_entry_t
) +
1147 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry
, &qede
->mclist
.head
,
1148 qede_mcast_list_entry_t
, mclist_entry
) {
1149 COPY_ETH_ADDRESS(ptr_entry
->mac
, tmpmc
);
1153 qede
->mc_cnt
= mc_cnt
;
1155 ret
= qede_set_rx_mac_mcast(qede
, ECORE_FILTER_ADD
,
1156 (unsigned char *)mc_macs
, mc_cnt
);
1157 if ((qede
->params
.multi_promisc_fl
== B_TRUE
) &&
1158 (qede
->params
.promisc_fl
== B_FALSE
)) {
1159 mode
= QEDE_FILTER_RX_MODE_REGULAR
;
1160 ret
= qede_set_filter_rx_mode(qede
, mode
);
1162 qede
->params
.multi_promisc_fl
= B_FALSE
;
1164 if ((qede
->params
.multi_promisc_fl
== B_FALSE
) &&
1165 (qede
->params
.promisc_fl
= B_FALSE
)) {
1166 ret
= qede_set_filter_rx_mode(qede
,
1167 QEDE_FILTER_RX_MODE_MULTI_PROMISC
);
1169 qede
->params
.multi_promisc_fl
= B_TRUE
;
1170 qede_info(qede
, "mode is MULTI_PROMISC");
1173 kmem_free(mc_macs
, size
);
1174 qede_info(qede
, "multicast ret %d mc_cnt %d\n", ret
, qede
->mc_cnt
);
1179 * This function is used to enable or disable multicast packet reception for
1180 * particular multicast addresses.
1181 * (flag) TRUE = add, FALSE = remove
1184 qede_mac_multicast(void *arg
,
1186 const uint8_t * mcast_addr
)
1188 qede_t
*qede
= (qede_t
*)arg
;
1189 int ret
= DDI_SUCCESS
;
1192 mutex_enter(&qede
->gld_lock
);
1193 if(qede
->qede_state
!= QEDE_STATE_STARTED
) {
1194 mutex_exit(&qede
->gld_lock
);
1197 ret
= qede_multicast(qede
, flag
, mcast_addr
);
1199 mutex_exit(&qede
->gld_lock
);
1204 qede_clear_filters(qede_t
*qede
)
1208 if ((qede
->params
.promisc_fl
== B_TRUE
) ||
1209 (qede
->params
.multi_promisc_fl
== B_TRUE
)) {
1210 ret
= qede_set_filter_rx_mode(qede
,
1211 QEDE_FILTER_RX_MODE_REGULAR
);
1214 "qede_clear_filters failed to set rx_mode");
1217 for (i
=0; i
< qede
->ucst_total
; i
++)
1219 if (qede
->ucst_mac
[i
].set
) {
1220 qede_rem_macaddr(qede
,
1221 qede
->ucst_mac
[i
].mac_addr
.ether_addr_octet
);
1224 qede_multicast(qede
, B_FALSE
, NULL
);
1231 qede_mac_unicast(void *arg
,
1232 const uint8_t * mac_addr
)
1234 qede_t
*qede
= (qede_t
*)arg
;
1240 qede_mac_tx(void *arg
,
1243 qede_t
*qede
= (qede_t
*)arg
;
1244 qede_fastpath_t
*fp
= &qede
->fp_array
[0];
1246 mblk
= qede_ring_tx((void *)fp
, mblk
);
1250 #endif /* NO_CROSSBOW */
1253 static lb_property_t loopmodes
[] = {
1254 { normal
, "normal", QEDE_LOOP_NONE
},
1255 { internal
, "internal", QEDE_LOOP_INTERNAL
},
1256 { external
, "external", QEDE_LOOP_EXTERNAL
},
1263 static enum ioc_reply
1264 qede_set_loopback_mode(qede_t
*qede
, uint32_t mode
)
1267 struct ecore_dev
*edev
= &qede
->edev
;
1268 struct ecore_hwfn
*hwfn
;
1269 struct ecore_ptt
*ptt
= NULL
;
1270 struct ecore_mcp_link_params
*link_params
;
1272 hwfn
= &edev
->hwfns
[0];
1273 link_params
= ecore_mcp_get_link_params(hwfn
);
1274 ptt
= ecore_ptt_acquire(hwfn
);
1278 qede_info(qede
, "unknown loopback mode !!");
1279 ecore_ptt_release(hwfn
, ptt
);
1282 case QEDE_LOOP_NONE
:
1283 ecore_mcp_set_link(hwfn
, ptt
, 0);
1285 while (qede
->params
.link_state
&& i
< 5000) {
1291 link_params
->loopback_mode
= ETH_LOOPBACK_NONE
;
1292 qede
->loop_back_mode
= QEDE_LOOP_NONE
;
1293 ret
= ecore_mcp_set_link(hwfn
, ptt
, 1);
1294 ecore_ptt_release(hwfn
, ptt
);
1296 while (!qede
->params
.link_state
&& i
< 5000) {
1302 case QEDE_LOOP_INTERNAL
:
1303 qede_print("!%s(%d) : loopback mode (INTERNAL) is set!",
1304 __func__
, qede
->instance
);
1305 ecore_mcp_set_link(hwfn
, ptt
, 0);
1307 while(qede
->params
.link_state
&& i
< 5000) {
1312 link_params
->loopback_mode
= ETH_LOOPBACK_INT_PHY
;
1313 qede
->loop_back_mode
= QEDE_LOOP_INTERNAL
;
1314 ret
= ecore_mcp_set_link(hwfn
, ptt
, 1);
1315 ecore_ptt_release(hwfn
, ptt
);
1317 while(!qede
->params
.link_state
&& i
< 5000) {
1323 case QEDE_LOOP_EXTERNAL
:
1324 qede_print("!%s(%d) : External loopback mode is not supported",
1325 __func__
, qede
->instance
);
1326 ecore_ptt_release(hwfn
, ptt
);
1332 qede_ioctl_pcicfg_rd(qede_t
*qede
, u32 addr
, void *data
,
1335 u32 crb
, actual_crb
;
1337 int cap_offset
= 0, cap_id
= 0, next_cap
= 0;
1338 ddi_acc_handle_t pci_cfg_handle
= qede
->pci_cfg_handle
;
1339 qede_ioctl_data_t
* data1
= (qede_ioctl_data_t
*) data
;
1341 cap_offset
= pci_config_get8(pci_cfg_handle
, PCI_CONF_CAP_PTR
);
1342 while (cap_offset
!= 0) {
1343 /* Check for an invalid PCI read. */
1344 if (cap_offset
== PCI_EINVAL8
) {
1347 cap_id
= pci_config_get8(pci_cfg_handle
, cap_offset
);
1348 if (cap_id
== PCI_CAP_ID_PCI_E
) {
1349 /* PCIe expr capab struct found */
1352 next_cap
= pci_config_get8(pci_cfg_handle
,
1354 cap_offset
= next_cap
;
1360 ret
= pci_config_get8(qede
->pci_cfg_handle
, addr
);
1361 (void) memcpy(data
, &ret
, sizeof(uint8_t));
1364 ret
= pci_config_get16(qede
->pci_cfg_handle
, addr
);
1365 (void) memcpy(data
, &ret
, sizeof(uint16_t));
1368 ret
= pci_config_get32(qede
->pci_cfg_handle
, addr
);
1369 (void) memcpy(data
, &ret
, sizeof(uint32_t));
1372 cmn_err(CE_WARN
, "bad length for pci config read\n");
1379 qede_ioctl_pcicfg_wr(qede_t
*qede
, u32 addr
, void *data
,
1383 int cap_offset
= 0, cap_id
= 0, next_cap
= 0;
1384 qede_ioctl_data_t
* data1
= (qede_ioctl_data_t
*) data
;
1385 ddi_acc_handle_t pci_cfg_handle
= qede
->pci_cfg_handle
;
1387 cap_offset
= pci_config_get8(pci_cfg_handle
, PCI_CONF_CAP_PTR
);
1388 while (cap_offset
!= 0) {
1389 cap_id
= pci_config_get8(pci_cfg_handle
, cap_offset
);
1390 if (cap_id
== PCI_CAP_ID_PCI_E
) {
1391 /* PCIe expr capab struct found */
1394 next_cap
= pci_config_get8(pci_cfg_handle
,
1396 cap_offset
= next_cap
;
1403 pci_config_put8(qede
->pci_cfg_handle
, addr
,
1407 ret
= pci_config_get16(qede
->pci_cfg_handle
, addr
);
1408 ret
= ret
| *(uint16_t *)data1
->uabc
;
1410 pci_config_put16(qede
->pci_cfg_handle
, addr
,
1414 pci_config_put32(qede
->pci_cfg_handle
, addr
, *(uint32_t *)data1
->uabc
);
1424 qede_ioctl_rd_wr_reg(qede_t
*qede
, void *data
)
1426 struct ecore_hwfn
*p_hwfn
;
1427 struct ecore_dev
*edev
= &qede
->edev
;
1428 struct ecore_ptt
*ptt
;
1429 qede_ioctl_data_t
*data1
= (qede_ioctl_data_t
*)data
;
1431 uint8_t cmd
= (uint8_t) data1
->unused1
;
1432 uint32_t addr
= data1
->off
;
1433 uint32_t val
= *(uint32_t *)&data1
->uabc
[1];
1434 uint32_t hwfn_index
= *(uint32_t *)&data1
->uabc
[5];
1437 if (hwfn_index
> qede
->num_hwfns
) {
1438 cmn_err(CE_WARN
, "invalid hwfn index from application\n");
1441 p_hwfn
= &edev
->hwfns
[hwfn_index
];
1445 ret
= ecore_rd(p_hwfn
, p_hwfn
->p_main_ptt
, addr
);
1446 (void) memcpy(data1
->uabc
, &ret
, sizeof(uint32_t));
1449 case QEDE_REG_WRITE
:
1450 ecore_wr(p_hwfn
, p_hwfn
->p_main_ptt
, addr
, val
);
1455 "wrong command in register read/write from application\n");
1462 qede_ioctl_rd_wr_nvram(qede_t
*qede
, mblk_t
*mp
)
1464 qede_nvram_data_t
*data1
= (qede_nvram_data_t
*)(mp
->b_cont
->b_rptr
);
1465 qede_nvram_data_t
*data2
, *next_data
;
1466 struct ecore_dev
*edev
= &qede
->edev
;
1467 uint32_t ret
= 0, hdr_size
= 24, bytes_to_copy
, copy_len
= 0;
1468 uint32_t copy_len1
= 0;
1469 uint32_t addr
= data1
->off
;
1470 uint32_t size
= data1
->size
, i
, buf_size
;
1472 uint8_t *buf
, *tmp_buf
;
1475 cmd
= (uint8_t)data1
->unused1
;
1478 case QEDE_NVRAM_CMD_READ
:
1479 buf
= kmem_zalloc(size
, GFP_KERNEL
);
1481 cmn_err(CE_WARN
, "memory allocation failed"
1482 " in nvram read ioctl\n");
1483 return (DDI_FAILURE
);
1485 ret
= ecore_mcp_nvm_read(edev
, addr
, buf
, data1
->size
);
1487 copy_len
= (MBLKL(mp
->b_cont
)) - hdr_size
;
1488 if(copy_len
> size
) {
1489 (void) memcpy(data1
->uabc
, buf
, size
);
1490 kmem_free(buf
, size
);
1491 //OSAL_FREE(edev, buf);
1495 (void) memcpy(data1
->uabc
, buf
, copy_len
);
1496 bytes_to_copy
= size
- copy_len
;
1497 tmp_buf
= ((uint8_t *)buf
) + copy_len
;
1498 copy_len1
= copy_len
;
1503 copy_len
= MBLKL(mp1
);
1504 if(mp1
->b_cont
== NULL
) {
1505 copy_len
= MBLKL(mp1
) - 4;
1507 data2
= (qede_nvram_data_t
*)mp1
->b_rptr
;
1508 if (copy_len
> bytes_to_copy
) {
1509 (void) memcpy(data2
->uabc
, tmp_buf
,
1511 kmem_free(buf
, size
);
1512 //OSAL_FREE(edev, buf);
1515 (void) memcpy(data2
->uabc
, tmp_buf
, copy_len
);
1516 tmp_buf
= tmp_buf
+ copy_len
;
1517 copy_len
+= copy_len
;
1519 bytes_to_copy
= bytes_to_copy
- copy_len
;
1522 kmem_free(buf
, size
);
1523 //OSAL_FREE(edev, buf);
1526 case QEDE_NVRAM_CMD_WRITE
:
1527 cmd2
= (uint8_t )data1
->cmd2
;
1530 buf_size
= size
; //data1->buf_size;
1531 //buf_size = data1->buf_size;
1535 case START_NVM_WRITE
:
1536 buf
= kmem_zalloc(size
, GFP_KERNEL
);
1537 //buf = qede->reserved_buf;
1538 qede
->nvm_buf_size
= data1
->size
;
1541 "memory allocation failed in START_NVM_WRITE\n");
1544 qede
->nvm_buf_start
= buf
;
1546 "buf = %p, size = %x\n", qede
->nvm_buf_start
, size
);
1547 qede
->nvm_buf
= buf
;
1549 //tmp_buf = buf + addr;
1553 case ACCUMULATE_NVM_BUF
:
1554 tmp_buf
= qede
->nvm_buf
;
1555 copy_len
= MBLKL(mp
->b_cont
) - hdr_size
;
1556 if(copy_len
> buf_size
) {
1557 if (buf_size
< qede
->nvm_buf_size
) {
1558 (void) memcpy(tmp_buf
, data1
->uabc
, buf_size
);
1559 qede
->copy_len
= qede
->copy_len
+
1562 (void) memcpy(tmp_buf
,
1563 data1
->uabc
, qede
->nvm_buf_size
);
1565 qede
->copy_len
+ qede
->nvm_buf_size
;
1567 tmp_buf
= tmp_buf
+ buf_size
;
1568 qede
->nvm_buf
= tmp_buf
;
1569 //qede->copy_len = qede->copy_len + buf_size;
1571 "buf_size from app = %x\n", copy_len
);
1575 (void) memcpy(tmp_buf
, data1
->uabc
, copy_len
);
1576 tmp_buf
= tmp_buf
+ copy_len
;
1577 bytes_to_copy
= buf_size
- copy_len
;
1580 copy_len1
= copy_len
;
1583 copy_len
= MBLKL(mp1
);
1584 if (mp1
->b_cont
== NULL
) {
1585 copy_len
= MBLKL(mp1
) - 4;
1587 next_data
= (qede_nvram_data_t
*) mp1
->b_rptr
;
1588 if (copy_len
> bytes_to_copy
){
1589 (void) memcpy(tmp_buf
, next_data
->uabc
,
1591 qede
->copy_len
= qede
->copy_len
+
1596 (void) memcpy(tmp_buf
, next_data
->uabc
,
1598 qede
->copy_len
= qede
->copy_len
+ copy_len
;
1599 tmp_buf
= tmp_buf
+ copy_len
;
1600 copy_len
= copy_len1
+ copy_len
;
1601 bytes_to_copy
= bytes_to_copy
- copy_len
;
1604 qede
->nvm_buf
= tmp_buf
;
1608 case STOP_NVM_WRITE
:
1609 //qede->nvm_buf = tmp_buf;
1613 tmp_buf
= (uint8_t *)qede
->nvm_buf_start
;
1614 for(i
= 0; i
< size
; i
++){
1616 "buff (%d) : %d\n", i
, *tmp_buf
);
1623 case QEDE_NVRAM_CMD_PUT_FILE_DATA
:
1624 tmp_buf
= qede
->nvm_buf_start
;
1625 ret
= ecore_mcp_nvm_write(edev
, ECORE_PUT_FILE_DATA
,
1626 addr
, tmp_buf
, size
);
1627 kmem_free(qede
->nvm_buf_start
, size
);
1628 //OSAL_FREE(edev, tmp_buf);
1629 cmn_err(CE_NOTE
, "total size = %x, copied size = %x\n",
1630 qede
->nvm_buf_size
, qede
->copy_len
);
1632 qede
->nvm_buf
= NULL
;
1633 qede
->nvm_buf_start
= NULL
;
1637 case QEDE_NVRAM_CMD_SET_SECURE_MODE
:
1638 ret
= ecore_mcp_nvm_set_secure_mode(edev
, addr
);
1641 case QEDE_NVRAM_CMD_DEL_FILE
:
1642 ret
= ecore_mcp_nvm_del_file(edev
, addr
);
1645 case QEDE_NVRAM_CMD_PUT_FILE_BEGIN
:
1646 ret
= ecore_mcp_nvm_put_file_begin(edev
, addr
);
1649 case QEDE_NVRAM_CMD_GET_NVRAM_RESP
:
1650 buf
= kmem_zalloc(size
, KM_SLEEP
);
1651 ret
= ecore_mcp_nvm_resp(edev
, buf
);
1652 (void)memcpy(data1
->uabc
, buf
, size
);
1653 kmem_free(buf
, size
);
1658 "wrong command in NVRAM read/write from application\n");
1661 return (DDI_SUCCESS
);
1665 qede_get_func_info(qede_t
*qede
, void *data
)
1667 qede_link_output_t link_op
;
1668 qede_func_info_t func_info
;
1669 qede_ioctl_data_t
*data1
= (qede_ioctl_data_t
*)data
;
1670 struct ecore_dev
*edev
= &qede
->edev
;
1671 struct ecore_hwfn
*hwfn
;
1672 struct ecore_mcp_link_params params
;
1673 struct ecore_mcp_link_state link
;
1675 hwfn
= &edev
->hwfns
[0];
1678 cmn_err(CE_WARN
, "(%s) : cannot acquire hwfn\n",
1680 return (DDI_FAILURE
);
1682 memcpy(¶ms
, &hwfn
->mcp_info
->link_input
, sizeof(params
));
1683 memcpy(&link
, &hwfn
->mcp_info
->link_output
, sizeof(link
));
1686 link_op
.link_up
= true;
1689 link_op
.supported_caps
= SUPPORTED_FIBRE
;
1690 if(params
.speed
.autoneg
) {
1691 link_op
.supported_caps
|= SUPPORTED_Autoneg
;
1694 if(params
.pause
.autoneg
||
1695 (params
.pause
.forced_rx
&& params
.pause
.forced_tx
)) {
1696 link_op
.supported_caps
|= SUPPORTED_Asym_Pause
;
1699 if (params
.pause
.autoneg
|| params
.pause
.forced_rx
||
1700 params
.pause
.forced_tx
) {
1701 link_op
.supported_caps
|= SUPPORTED_Pause
;
1704 if (params
.speed
.advertised_speeds
&
1705 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
) {
1706 link_op
.supported_caps
|= SUPPORTED_1000baseT_Half
|
1707 SUPPORTED_1000baseT_Full
;
1710 if (params
.speed
.advertised_speeds
&
1711 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
) {
1712 link_op
.supported_caps
|= SUPPORTED_10000baseKR_Full
;
1715 if (params
.speed
.advertised_speeds
&
1716 NVM_CFG1_PORT_DRV_LINK_SPEED_40G
) {
1717 link_op
.supported_caps
|= SUPPORTED_40000baseLR4_Full
;
1720 link_op
.advertised_caps
= link_op
.supported_caps
;
1723 link_op
.speed
= link
.speed
;
1728 link_op
.duplex
= DUPLEX_FULL
;
1729 link_op
.port
= PORT_FIBRE
;
1731 link_op
.autoneg
= params
.speed
.autoneg
;
1733 /* Link partner capabilities */
1734 if (link
.partner_adv_speed
&
1735 ECORE_LINK_PARTNER_SPEED_1G_HD
) {
1736 link_op
.lp_caps
|= SUPPORTED_1000baseT_Half
;
1739 if (link
.partner_adv_speed
&
1740 ECORE_LINK_PARTNER_SPEED_1G_FD
) {
1741 link_op
.lp_caps
|= SUPPORTED_1000baseT_Full
;
1744 if (link
.partner_adv_speed
&
1745 ECORE_LINK_PARTNER_SPEED_10G
) {
1746 link_op
.lp_caps
|= SUPPORTED_10000baseKR_Full
;
1749 if (link
.partner_adv_speed
&
1750 ECORE_LINK_PARTNER_SPEED_20G
) {
1751 link_op
.lp_caps
|= SUPPORTED_20000baseKR2_Full
;
1754 if (link
.partner_adv_speed
&
1755 ECORE_LINK_PARTNER_SPEED_40G
) {
1756 link_op
.lp_caps
|= SUPPORTED_40000baseLR4_Full
;
1759 if (link
.an_complete
) {
1760 link_op
.lp_caps
|= SUPPORTED_Autoneg
;
1763 if (link
.partner_adv_pause
) {
1764 link_op
.lp_caps
|= SUPPORTED_Pause
;
1767 if (link
.partner_adv_pause
== ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE
||
1768 link
.partner_adv_pause
== ECORE_LINK_PARTNER_BOTH_PAUSE
) {
1769 link_op
.lp_caps
|= SUPPORTED_Asym_Pause
;
1772 func_info
.supported
= link_op
.supported_caps
;
1773 func_info
.advertising
= link_op
.advertised_caps
;
1774 func_info
.speed
= link_op
.speed
;
1775 func_info
.duplex
= link_op
.duplex
;
1776 func_info
.port
= qede
->pci_func
& 0x1;
1777 func_info
.autoneg
= link_op
.autoneg
;
1779 (void) memcpy(data1
->uabc
, &func_info
, sizeof(qede_func_info_t
));
1785 qede_do_ioctl(qede_t
*qede
, queue_t
*q
, mblk_t
*mp
)
1787 qede_ioctl_data_t
*up_data
;
1788 qede_driver_info_t driver_info
;
1789 struct ecore_dev
*edev
= &qede
->edev
;
1790 struct ecore_hwfn
*hwfn
;
1791 struct ecore_ptt
*ptt
= NULL
;
1792 struct mcp_file_att attrib
;
1793 uint32_t flash_size
;
1794 uint32_t mcp_resp
, mcp_param
, txn_size
;
1795 uint32_t cmd
, size
, ret
= 0;
1802 up_data
= (qede_ioctl_data_t
*)(mp
->b_cont
->b_rptr
);
1806 size
= up_data
->size
;
1810 hwfn
= &edev
->hwfns
[0];
1811 ptt
= ecore_ptt_acquire(hwfn
);
1813 snprintf(driver_info
.drv_name
, MAX_QEDE_NAME_LEN
, "%s", "qede");
1814 snprintf(driver_info
.drv_version
, QEDE_STR_SIZE
,
1815 "v:%s", qede
->version
);
1816 snprintf(driver_info
.mfw_version
, QEDE_STR_SIZE
,
1817 "%s", qede
->versionMFW
);
1818 snprintf(driver_info
.stormfw_version
, QEDE_STR_SIZE
,
1819 "%s", qede
->versionFW
);
1820 snprintf(driver_info
.bus_info
, QEDE_STR_SIZE
,
1821 "%s", qede
->bus_dev_func
);
1825 * calling ecore_mcp_nvm_rd_cmd to find the flash length, i
1826 * 0x08 is equivalent of NVM_TYPE_MFW_TRACE1
1828 ecore_mcp_get_flash_size(hwfn
, ptt
, &flash_size
);
1829 driver_info
.eeprom_dump_len
= flash_size
;
1830 (void) memcpy(up_data
->uabc
, &driver_info
,
1831 sizeof (qede_driver_info_t
));
1832 up_data
->size
= sizeof (qede_driver_info_t
);
1834 ecore_ptt_release(hwfn
, ptt
);
1837 case QEDE_RD_PCICFG
:
1838 ret
= qede_ioctl_pcicfg_rd(qede
, off
, up_data
->uabc
, size
);
1841 case QEDE_WR_PCICFG
:
1842 ret
= qede_ioctl_pcicfg_wr(qede
, off
, up_data
, size
);
1846 ret
= qede_ioctl_rd_wr_reg(qede
, (void *)up_data
);
1850 ret
= qede_ioctl_rd_wr_nvram(qede
, mp1
);
1853 case QEDE_FUNC_INFO
:
1854 ret
= qede_get_func_info(qede
, (void *)up_data
);
1858 snprintf(mac_addr
, sizeof(mac_addr
),
1859 "%02x:%02x:%02x:%02x:%02x:%02x",
1860 qede
->ether_addr
[0], qede
->ether_addr
[1],
1861 qede
->ether_addr
[2], qede
->ether_addr
[3],
1862 qede
->ether_addr
[4], qede
->ether_addr
[5]);
1863 (void) memcpy(up_data
->uabc
, &mac_addr
, sizeof(mac_addr
));
1867 //if (cmd == QEDE_RW_NVRAM) {
1868 // miocack (q, mp, (sizeof(qede_ioctl_data_t)), 0);
1869 // return IOC_REPLY;
1871 miocack (q
, mp
, (sizeof(qede_ioctl_data_t
)), ret
);
1872 //miocack (q, mp, 0, ret);
1877 qede_ioctl(qede_t
*qede
, int cmd
, queue_t
*q
, mblk_t
*mp
)
1883 (void) qede_do_ioctl(qede
, q
, mp
);
1886 cmn_err(CE_WARN
, "qede ioctl command %x not supported\n", cmd
);
1892 qede_loopback_ioctl(qede_t
*qede
, queue_t
*wq
, mblk_t
*mp
,
1893 struct iocblk
*iocp
)
1895 lb_info_sz_t
*lb_info_size
;
1896 lb_property_t
*lb_prop
;
1901 * Validate format of ioctl
1903 if(mp
->b_cont
== NULL
) {
1907 cmd
= iocp
->ioc_cmd
;
1911 qede_print("!%s(%d): unknown ioctl command %x\n",
1912 __func__
, qede
->instance
, cmd
);
1914 case LB_GET_INFO_SIZE
:
1915 if (iocp
->ioc_count
!= sizeof(lb_info_sz_t
)) {
1916 qede_info(qede
, "error: ioc_count %d, sizeof %d",
1917 iocp
->ioc_count
, sizeof(lb_info_sz_t
));
1920 lb_info_size
= (void *)mp
->b_cont
->b_rptr
;
1921 *lb_info_size
= sizeof(loopmodes
);
1924 if (iocp
->ioc_count
!= sizeof (loopmodes
)) {
1925 qede_info(qede
, "error: iocp->ioc_count %d, sizepof %d",
1926 iocp
->ioc_count
, sizeof (loopmodes
));
1929 lb_prop
= (void *)mp
->b_cont
->b_rptr
;
1930 bcopy(loopmodes
, lb_prop
, sizeof (loopmodes
));
1933 if (iocp
->ioc_count
!= sizeof (uint32_t)) {
1934 qede_info(qede
, "iocp->ioc_count %d, sizeof : %d\n",
1935 iocp
->ioc_count
, sizeof (uint32_t));
1938 lb_mode
= (void *)mp
->b_cont
->b_rptr
;
1939 *lb_mode
= qede
->loop_back_mode
;
1942 if (iocp
->ioc_count
!= sizeof (uint32_t)) {
1943 qede_info(qede
, "iocp->ioc_count %d, sizeof : %d\n",
1944 iocp
->ioc_count
, sizeof (uint32_t));
1947 lb_mode
= (void *)mp
->b_cont
->b_rptr
;
1948 return (qede_set_loopback_mode(qede
,*lb_mode
));
1953 qede_mac_ioctl(void * arg
,
1958 qede_t
* qede
= (qede_t
*)arg
;
1959 struct iocblk
*iocp
= (struct iocblk
*) (uintptr_t)mp
->b_rptr
;
1960 enum ioc_reply status
= IOC_DONE
;
1961 boolean_t need_privilege
= B_TRUE
;
1963 iocp
->ioc_error
= 0;
1964 cmd
= iocp
->ioc_cmd
;
1966 mutex_enter(&qede
->drv_lock
);
1967 if ((qede
->qede_state
== QEDE_STATE_SUSPENDING
) ||
1968 (qede
->qede_state
== QEDE_STATE_SUSPENDED
)) {
1969 mutex_exit(&qede
->drv_lock
);
1970 miocnak(wq
, mp
, 0, EINVAL
);
1977 case LB_GET_INFO_SIZE
:
1980 need_privilege
= B_FALSE
;
1984 qede_print("!%s(%d) unknown ioctl command %x\n",
1985 __func__
, qede
->instance
, cmd
);
1986 miocnak(wq
, mp
, 0, EINVAL
);
1987 mutex_exit(&qede
->drv_lock
);
1991 if(need_privilege
) {
1992 err
= secpolicy_net_config(iocp
->ioc_cr
, B_FALSE
);
1994 qede_info(qede
, "secpolicy() failed");
1995 miocnak(wq
, mp
, 0, err
);
1996 mutex_exit(&qede
->drv_lock
);
2003 qede_print("!%s(%d) : unknown ioctl command %x\n",
2004 __func__
, qede
->instance
, cmd
);
2006 mutex_exit(&qede
->drv_lock
);
2008 case LB_GET_INFO_SIZE
:
2012 status
= qede_loopback_ioctl(qede
, wq
, mp
, iocp
);
2015 qede_ioctl(qede
, cmd
, wq
, mp
);
2022 qede_print("!%s(%d) : invalid status from ioctl",
2023 __func__
,qede
->instance
);
2027 * OK, Reply already sent
2032 mp
->b_datap
->db_type
= iocp
->ioc_error
== 0 ?
2033 M_IOCACK
: M_IOCNAK
;
2037 mutex_exit(&qede
->drv_lock
);
2038 //miocack(wq, mp, 0, 0);
2039 miocnak(wq
, mp
, 0, iocp
->ioc_error
== 0 ?
2040 EINVAL
: iocp
->ioc_error
);
2043 mutex_exit(&qede
->drv_lock
);
2046 extern ddi_dma_attr_t qede_buf2k_dma_attr_txbuf
;
2047 extern ddi_dma_attr_t qede_dma_attr_rxbuf
;
2048 extern ddi_dma_attr_t qede_dma_attr_desc
;
2051 qede_mac_get_capability(void *arg
,
2052 mac_capab_t capability
,
2055 qede_t
* qede
= (qede_t
*)arg
;
2056 uint32_t *txflags
= cap_data
;
2057 boolean_t ret
= B_FALSE
;
2059 switch (capability
) {
2060 case MAC_CAPAB_HCKSUM
: {
2061 u32
*tx_flags
= cap_data
;
2063 * Check if checksum is enabled on
2064 * tx and advertise the cksum capab
2065 * to mac layer accordingly. On Rx
2066 * side checksummed packets are
2069 qede_info(qede
, "%s tx checksum offload",
2070 (qede
->checksum
== DEFAULT_CKSUM_OFFLOAD
) ?
2074 if (qede
->checksum
!= DEFAULT_CKSUM_OFFLOAD
) {
2079 * Hardware does not support ICMPv6 checksumming. Right now the
2080 * GLDv3 doesn't provide us a way to specify that we don't
2081 * support that. As such, we cannot indicate
2082 * HCKSUM_INET_FULL_V6.
2085 *tx_flags
= HCKSUM_INET_FULL_V4
|
2090 case MAC_CAPAB_LSO
: {
2091 mac_capab_lso_t
*cap_lso
= (mac_capab_lso_t
*)cap_data
;
2093 qede_info(qede
, "%s large segmentation offload",
2094 qede
->lso_enable
? "Enabling": "Disabling");
2095 if (qede
->lso_enable
) {
2096 cap_lso
->lso_flags
= LSO_TX_BASIC_TCP_IPV4
;
2097 cap_lso
->lso_basic_tcp_ipv4
.lso_max
= QEDE_LSO_MAXLEN
;
2102 case MAC_CAPAB_RINGS
: {
2104 mac_capab_rings_t
*cap_rings
= cap_data
;
2106 cap_rings
->mr_version
= MAC_RINGS_VERSION_1
;
2109 switch (cap_rings
->mr_type
) {
2110 case MAC_RING_TYPE_RX
:
2112 cap_rings
->mr_flags
= MAC_RINGS_VLAN_TRANSPARENT
;
2114 cap_rings
->mr_group_type
= MAC_GROUP_TYPE_STATIC
;
2115 //cap_rings->mr_rnum = 1; /* qede variable */
2116 cap_rings
->mr_rnum
= qede
->num_fp
; /* qede variable */
2117 cap_rings
->mr_gnum
= 1;
2118 cap_rings
->mr_rget
= qede_fill_ring
;
2119 cap_rings
->mr_gget
= qede_fill_group
;
2120 cap_rings
->mr_gaddring
= NULL
;
2121 cap_rings
->mr_gremring
= NULL
;
2123 cap_rings
->mr_ggetringtc
= NULL
;
2127 case MAC_RING_TYPE_TX
:
2129 cap_rings
->mr_flags
= MAC_RINGS_VLAN_TRANSPARENT
;
2131 cap_rings
->mr_group_type
= MAC_GROUP_TYPE_STATIC
;
2132 //cap_rings->mr_rnum = 1;
2133 cap_rings
->mr_rnum
= qede
->num_fp
;
2134 cap_rings
->mr_gnum
= 0;
2135 cap_rings
->mr_rget
= qede_fill_ring
;
2136 cap_rings
->mr_gget
= qede_fill_group
;
2137 cap_rings
->mr_gaddring
= NULL
;
2138 cap_rings
->mr_gremring
= NULL
;
2140 cap_rings
->mr_ggetringtc
= NULL
;
2149 break; /* CASE MAC_CAPAB_RINGS */
2152 case MAC_CAPAB_TRANSCEIVER
: {
2153 mac_capab_transceiver_t
*mct
= cap_data
;
2156 mct
->mct_ntransceivers
= qede
->edev
.num_hwfns
;
2157 mct
->mct_info
= qede_transceiver_info
;
2158 mct
->mct_read
= qede_transceiver_read
;
2172 qede_configure_link(qede_t
*qede
, bool op
);
2175 qede_mac_set_property(void * arg
,
2176 const char * pr_name
,
2177 mac_prop_id_t pr_num
,
2179 const void * pr_val
)
2181 qede_t
* qede
= (qede_t
*)arg
;
2182 struct ecore_mcp_link_params
*link_params
;
2183 struct ecore_dev
*edev
= &qede
->edev
;
2184 struct ecore_hwfn
*hwfn
;
2188 mutex_enter(&qede
->gld_lock
);
2192 bcopy(pr_val
, &option
, sizeof (option
));
2194 if(option
== qede
->mtu
) {
2198 if ((option
!= DEFAULT_JUMBO_MTU
) &&
2199 (option
!= DEFAULT_MTU
)) {
2203 if(qede
->qede_state
== QEDE_STATE_STARTED
) {
2208 ret_val
= mac_maxsdu_update(qede
->mac_handle
, qede
->mtu
);
2212 if (option
== DEFAULT_JUMBO_MTU
) {
2213 qede
->jumbo_enable
= B_TRUE
;
2215 qede
->jumbo_enable
= B_FALSE
;
2218 hwfn
= ECORE_LEADING_HWFN(edev
);
2219 hwfn
->hw_info
.mtu
= qede
->mtu
;
2220 ret_val
= ecore_mcp_ov_update_mtu(hwfn
,
2223 if (ret_val
!= ECORE_SUCCESS
) {
2224 qede_print("!%s(%d): MTU change %d option %d"
2226 __func__
,qede
->instance
, qede
->mtu
, option
);
2229 qede_print("!%s(%d): MTU changed %d MTU option"
2231 __func__
,qede
->instance
, qede
->mtu
,
2232 option
, hwfn
->hw_info
.mtu
);
2236 case MAC_PROP_EN_10GFDX_CAP
:
2237 hwfn
= &edev
->hwfns
[0];
2238 link_params
= ecore_mcp_get_link_params(hwfn
);
2239 if (*(uint8_t *) pr_val
) {
2240 link_params
->speed
.autoneg
= 0;
2241 link_params
->speed
.forced_speed
= 10000;
2242 link_params
->speed
.advertised_speeds
=
2243 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
2244 qede
->forced_speed_10G
= *(uint8_t *)pr_val
;
2248 &qede
->link_input_params
.default_link_params
,
2249 sizeof (struct ecore_mcp_link_params
));
2250 qede
->forced_speed_10G
= *(uint8_t *)pr_val
;
2252 if (qede
->qede_state
== QEDE_STATE_STARTED
) {
2253 qede_configure_link(qede
,1);
2255 mutex_exit(&qede
->gld_lock
);
2263 mutex_exit(&qede
->gld_lock
);
2268 qede_mac_stop(void *arg
)
2270 qede_t
*qede
= (qede_t
*)arg
;
2273 qede_print("!%s(%d): called",
2274 __func__
,qede
->instance
);
2275 mutex_enter(&qede
->drv_lock
);
2276 status
= qede_stop(qede
);
2277 if (status
!= DDI_SUCCESS
) {
2278 qede_print("!%s(%d): qede_stop "
2280 __func__
,qede
->instance
);
2283 mac_link_update(qede
->mac_handle
, LINK_STATE_UNKNOWN
);
2284 mutex_exit(&qede
->drv_lock
);
2288 qede_mac_start(void *arg
)
2290 qede_t
*qede
= (qede_t
*)arg
;
2293 qede_print("!%s(%d): called", __func__
,qede
->instance
);
2294 if (!mutex_tryenter(&qede
->drv_lock
)) {
2298 if (qede
->qede_state
== QEDE_STATE_SUSPENDED
) {
2299 mutex_exit(&qede
->drv_lock
);
2303 status
= qede_start(qede
);
2304 if (status
!= DDI_SUCCESS
) {
2305 mutex_exit(&qede
->drv_lock
);
2309 mutex_exit(&qede
->drv_lock
);
2311 #ifdef DBLK_DMA_PREMAP
2312 qede
->pm_handle
= mac_pmh_tx_get(qede
->mac_handle
);
2318 qede_mac_get_property(void *arg
,
2319 const char *pr_name
,
2320 mac_prop_id_t pr_num
,
2324 qede_t
*qede
= (qede_t
*)arg
;
2325 struct ecore_dev
*edev
= &qede
->edev
;
2326 link_state_t link_state
;
2327 link_duplex_t link_duplex
;
2328 uint64_t link_speed
;
2329 link_flowctrl_t link_flowctrl
;
2330 struct qede_link_cfg link_cfg
;
2331 qede_link_cfg_t
*hw_cfg
= &qede
->hwinit
;
2334 memset(&link_cfg
, 0, sizeof (struct qede_link_cfg
));
2335 qede_get_link_info(&edev
->hwfns
[0], &link_cfg
);
2343 ASSERT(pr_valsize
>= sizeof(uint32_t));
2344 bcopy(&qede
->mtu
, pr_val
, sizeof(uint32_t));
2347 case MAC_PROP_DUPLEX
:
2349 ASSERT(pr_valsize
>= sizeof(link_duplex_t
));
2350 link_duplex
= (qede
->props
.link_duplex
) ?
2351 LINK_DUPLEX_FULL
: LINK_DUPLEX_HALF
;
2352 bcopy(&link_duplex
, pr_val
, sizeof(link_duplex_t
));
2355 case MAC_PROP_SPEED
:
2357 ASSERT(pr_valsize
>= sizeof(link_speed
));
2359 link_speed
= (qede
->props
.link_speed
* 1000000ULL);
2360 bcopy(&link_speed
, pr_val
, sizeof(link_speed
));
2363 case MAC_PROP_STATUS
:
2365 ASSERT(pr_valsize
>= sizeof(link_state_t
));
2367 link_state
= (qede
->params
.link_state
) ?
2368 LINK_STATE_UP
: LINK_STATE_DOWN
;
2369 bcopy(&link_state
, pr_val
, sizeof(link_state_t
));
2370 qede_info(qede
, "mac_prop_status %d\n", link_state
);
2373 case MAC_PROP_AUTONEG
:
2375 *(uint8_t *)pr_val
= link_cfg
.autoneg
;
2378 case MAC_PROP_FLOWCTRL
:
2380 ASSERT(pr_valsize
>= sizeof(link_flowctrl_t
));
2383 * illumos does not have the notion of LINK_FLOWCTRL_AUTO at this time.
2386 if (link_cfg
.pause_cfg
& QEDE_LINK_PAUSE_AUTONEG_ENABLE
) {
2387 link_flowctrl
= LINK_FLOWCTRL_AUTO
;
2391 if (!(link_cfg
.pause_cfg
& QEDE_LINK_PAUSE_RX_ENABLE
) &&
2392 !(link_cfg
.pause_cfg
& QEDE_LINK_PAUSE_TX_ENABLE
)) {
2393 link_flowctrl
= LINK_FLOWCTRL_NONE
;
2395 if ((link_cfg
.pause_cfg
& QEDE_LINK_PAUSE_RX_ENABLE
) &&
2396 !(link_cfg
.pause_cfg
& QEDE_LINK_PAUSE_TX_ENABLE
)) {
2397 link_flowctrl
= LINK_FLOWCTRL_RX
;
2399 if (!(link_cfg
.pause_cfg
& QEDE_LINK_PAUSE_RX_ENABLE
) &&
2400 (link_cfg
.pause_cfg
& QEDE_LINK_PAUSE_TX_ENABLE
)) {
2401 link_flowctrl
= LINK_FLOWCTRL_TX
;
2403 if ((link_cfg
.pause_cfg
& QEDE_LINK_PAUSE_RX_ENABLE
) &&
2404 (link_cfg
.pause_cfg
& QEDE_LINK_PAUSE_TX_ENABLE
)) {
2405 link_flowctrl
= LINK_FLOWCTRL_BI
;
2408 bcopy(&link_flowctrl
, pr_val
, sizeof (link_flowctrl_t
));
2411 case MAC_PROP_ADV_10GFDX_CAP
:
2412 *(uint8_t *)pr_val
= link_cfg
.adv_capab
.param_10000fdx
;
2415 case MAC_PROP_EN_10GFDX_CAP
:
2416 *(uint8_t *)pr_val
= qede
->forced_speed_10G
;
2419 case MAC_PROP_PRIVATE
:
2429 qede_mac_property_info(void *arg
,
2430 const char *pr_name
,
2431 mac_prop_id_t pr_num
,
2432 mac_prop_info_handle_t prh
)
2434 qede_t
*qede
= (qede_t
*)arg
;
2435 qede_link_props_t
*def_cfg
= &qede_def_link_props
;
2436 link_flowctrl_t link_flowctrl
;
2442 case MAC_PROP_STATUS
:
2443 case MAC_PROP_SPEED
:
2444 case MAC_PROP_DUPLEX
:
2445 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
2450 mac_prop_info_set_range_uint32(prh
,
2455 case MAC_PROP_AUTONEG
:
2457 mac_prop_info_set_default_uint8(prh
, def_cfg
->autoneg
);
2460 case MAC_PROP_FLOWCTRL
:
2462 if (!def_cfg
->pause
) {
2463 link_flowctrl
= LINK_FLOWCTRL_NONE
;
2465 link_flowctrl
= LINK_FLOWCTRL_BI
;
2468 mac_prop_info_set_default_link_flowctrl(prh
, link_flowctrl
);
2471 case MAC_PROP_EN_10GFDX_CAP
:
2472 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_RW
);
2475 case MAC_PROP_ADV_10GFDX_CAP
:
2476 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
2480 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
2486 static mac_callbacks_t qede_callbacks
=
2490 /* | MC_RESOURCES */
2499 qede_mac_promiscuous
,
2507 NULL
, /* qede_mac_resources, */
2509 qede_mac_get_capability
,
2512 qede_mac_set_property
,
2513 qede_mac_get_property
,
2515 qede_mac_property_info
2520 qede_gld_init(qede_t
*qede
)
2523 mac_register_t
*macp
;
2525 macp
= mac_alloc(MAC_VERSION
);
2527 cmn_err(CE_NOTE
, "%s: mac_alloc() failed\n", __func__
);
2531 macp
->m_driver
= qede
;
2532 macp
->m_dip
= qede
->dip
;
2533 macp
->m_instance
= qede
->instance
;
2534 macp
->m_priv_props
= NULL
;
2535 macp
->m_type_ident
= MAC_PLUGIN_IDENT_ETHER
;
2536 macp
->m_src_addr
= qede
->ether_addr
;
2537 macp
->m_callbacks
= &qede_callbacks
;
2538 macp
->m_min_sdu
= 0;
2539 macp
->m_max_sdu
= qede
->mtu
;
2540 macp
->m_margin
= VLAN_TAGSZ
;
2542 macp
->m_v12n
= MAC_VIRT_LEVEL1
;
2545 status
= mac_register(macp
, &qede
->mac_handle
);
2547 cmn_err(CE_NOTE
, "%s: mac_register() failed\n", __func__
);
2557 boolean_t
qede_gld_fini(qede_t
* qede
)
2563 void qede_link_update(qede_t
* qede
,
2566 mac_link_update(qede
->mac_handle
, state
);