9506 Want support for QLogic QL41000/45000 series devices
[unleashed.git] / usr / src / uts / common / io / qede / qede_gld.c
blobcb3dfaa7d7ca235941ea6e4642d2d60ec2925be2
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
27 * You may not use this file except in compliance with the License.
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
37 #include "qede.h"
39 #define FP_LOCK(ptr) \
40 mutex_enter(&ptr->fp_lock);
41 #define FP_UNLOCK(ptr) \
42 mutex_exit(&ptr->fp_lock);
44 int
45 qede_ucst_find(qede_t *qede, const uint8_t *mac_addr)
47 int slot;
49 for(slot = 0; slot < qede->ucst_total; slot++) {
50 if (bcmp(qede->ucst_mac[slot].mac_addr.ether_addr_octet,
51 mac_addr, ETHERADDRL) == 0) {
52 return (slot);
55 return (-1);
59 static int
60 qede_set_mac_addr(qede_t *qede, uint8_t *mac_addr, uint8_t fl)
62 struct ecore_filter_ucast params;
64 memset(&params, 0, sizeof (params));
66 params.opcode = fl;
67 params.type = ECORE_FILTER_MAC;
68 params.is_rx_filter = true;
69 params.is_tx_filter = true;
70 COPY_ETH_ADDRESS(mac_addr, params.mac);
72 return (ecore_filter_ucast_cmd(&qede->edev,
73 &params, ECORE_SPQ_MODE_EBLOCK, NULL));
77 static int
78 qede_add_macaddr(qede_t *qede, uint8_t *mac_addr)
80 int i, ret = 0;
82 i = qede_ucst_find(qede, mac_addr);
83 if (i != -1) {
84 /* LINTED E_ARGUMENT_MISMATCH */
85 qede_info(qede, "mac addr already added %d\n",
86 qede->ucst_avail);
87 return (0);
89 if (qede->ucst_avail == 0) {
90 qede_info(qede, "add macaddr ignored \n");
91 return (ENOSPC);
93 for (i = 0; i < qede->ucst_total; i++) {
94 if (qede->ucst_mac[i].set == 0) {
95 break;
98 if (i >= qede->ucst_total) {
99 qede_info(qede, "add macaddr ignored no space");
100 return (ENOSPC);
102 ret = qede_set_mac_addr(qede, (uint8_t *)mac_addr, ECORE_FILTER_ADD);
103 if (ret == 0) {
104 bcopy(mac_addr,
105 qede->ucst_mac[i].mac_addr.ether_addr_octet,
106 ETHERADDRL);
107 qede->ucst_mac[i].set = 1;
108 qede->ucst_avail--;
109 /* LINTED E_ARGUMENT_MISMATCH */
110 qede_info(qede, " add macaddr passed for addr "
111 "%02x:%02x:%02x:%02x:%02x:%02x",
112 mac_addr[0], mac_addr[1],
113 mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
114 } else {
115 /* LINTED E_ARGUMENT_MISMATCH */
116 qede_info(qede, "add macaddr failed for addr "
117 "%02x:%02x:%02x:%02x:%02x:%02x",
118 mac_addr[0], mac_addr[1],
119 mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
122 if (qede->ucst_avail == (qede->ucst_total -1)) {
123 u8 bcast_addr[] =
125 0xff, 0xff, 0xff, 0xff, 0xff,
126 0xff
128 for (i = 0; i < qede->ucst_total; i++) {
129 if (qede->ucst_mac[i].set == 0)
130 break;
132 ret = qede_set_mac_addr(qede,
133 (uint8_t *)bcast_addr, ECORE_FILTER_ADD);
134 if (ret == 0) {
135 bcopy(bcast_addr,
136 qede->ucst_mac[i].mac_addr.ether_addr_octet,
137 ETHERADDRL);
138 qede->ucst_mac[i].set = 1;
139 qede->ucst_avail--;
140 } else {
142 /* LINTED E_ARGUMENT_MISMATCH */
143 qede_info(qede, "add macaddr failed for addr "
144 "%02x:%02x:%02x:%02x:%02x:%02x",
145 mac_addr[0], mac_addr[1],
146 mac_addr[2], mac_addr[3], mac_addr[4],
147 mac_addr[5]);
152 return (ret);
156 #ifndef ILLUMOS
157 static int
158 qede_add_mac_addr(void *arg, const uint8_t *mac_addr, const uint64_t flags)
159 #else
160 static int
161 qede_add_mac_addr(void *arg, const uint8_t *mac_addr)
162 #endif
164 qede_mac_group_t *rx_group = (qede_mac_group_t *)arg;
165 qede_t *qede = rx_group->qede;
166 int ret = DDI_SUCCESS;
168 /* LINTED E_ARGUMENT_MISMATCH */
169 qede_info(qede, " mac addr :" MAC_STRING, MACTOSTR(mac_addr));
171 mutex_enter(&qede->gld_lock);
172 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
173 mutex_exit(&qede->gld_lock);
174 return (ECANCELED);
176 ret = qede_add_macaddr(qede, (uint8_t *)mac_addr);
178 mutex_exit(&qede->gld_lock);
181 return (ret);
184 static int
185 qede_rem_macaddr(qede_t *qede, uint8_t *mac_addr)
187 int ret = 0;
188 int i;
190 i = qede_ucst_find(qede, mac_addr);
191 if (i == -1) {
192 /* LINTED E_ARGUMENT_MISMATCH */
193 qede_info(qede,
194 "mac addr not there to remove",
195 MAC_STRING, MACTOSTR(mac_addr));
196 return (0);
198 if (qede->ucst_mac[i].set == 0) {
199 return (EINVAL);
201 ret = qede_set_mac_addr(qede, (uint8_t *)mac_addr, ECORE_FILTER_REMOVE);
202 if (ret == 0) {
203 bzero(qede->ucst_mac[i].mac_addr.ether_addr_octet,ETHERADDRL);
204 qede->ucst_mac[i].set = 0;
205 qede->ucst_avail++;
206 } else {
207 /* LINTED E_ARGUMENT_MISMATCH */
208 qede_info(qede, "mac addr remove failed",
209 MAC_STRING, MACTOSTR(mac_addr));
211 return (ret);
216 static int
217 qede_rem_mac_addr(void *arg, const uint8_t *mac_addr)
219 qede_mac_group_t *rx_group = (qede_mac_group_t *)arg;
220 qede_t *qede = rx_group->qede;
221 int ret = DDI_SUCCESS;
223 /* LINTED E_ARGUMENT_MISMATCH */
224 qede_info(qede, "mac addr remove:" MAC_STRING, MACTOSTR(mac_addr));
225 mutex_enter(&qede->gld_lock);
226 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
227 mutex_exit(&qede->gld_lock);
228 return (ECANCELED);
230 ret = qede_rem_macaddr(qede, (uint8_t *)mac_addr);
231 mutex_exit(&qede->gld_lock);
232 return (ret);
236 static int
237 qede_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
239 int ret = 0;
241 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
242 qede_tx_ring_t *tx_ring = fp->tx_ring[0];
243 qede_t *qede = fp->qede;
246 if (qede->qede_state == QEDE_STATE_SUSPENDED)
247 return (ECANCELED);
249 switch (stat) {
250 case MAC_STAT_OBYTES:
251 *val = tx_ring->tx_byte_count;
252 break;
254 case MAC_STAT_OPACKETS:
255 *val = tx_ring->tx_pkt_count;
256 break;
258 default:
259 *val = 0;
260 ret = ENOTSUP;
263 return (ret);
266 #ifndef ILLUMOS
267 static mblk_t *
268 qede_rx_ring_poll(void *arg, int poll_bytes, int poll_pkts)
270 #else
271 static mblk_t *
272 qede_rx_ring_poll(void *arg, int poll_bytes)
274 /* XXX pick a value at the moment */
275 int poll_pkts = 100;
276 #endif
277 qede_fastpath_t *fp = (qede_fastpath_t *)arg;
278 mblk_t *mp = NULL;
279 int work_done = 0;
280 qede_t *qede = fp->qede;
282 if (poll_bytes == 0) {
283 return (NULL);
286 mutex_enter(&fp->fp_lock);
287 qede->intrSbPollCnt[fp->vect_info->vect_index]++;
289 mp = qede_process_fastpath(fp, poll_bytes, poll_pkts, &work_done);
290 if (mp != NULL) {
291 fp->rx_ring->rx_poll_cnt++;
292 } else if ((mp == NULL) && (work_done == 0)) {
293 qede->intrSbPollNoChangeCnt[fp->vect_info->vect_index]++;
296 mutex_exit(&fp->fp_lock);
297 return (mp);
300 #ifndef ILLUMOS
301 static int
302 qede_rx_ring_intr_enable(mac_ring_driver_t rh)
303 #else
304 static int
305 qede_rx_ring_intr_enable(mac_intr_handle_t rh)
306 #endif
308 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
310 mutex_enter(&fp->qede->drv_lock);
311 if (!fp->sb_phys && (fp->sb_dma_handle == NULL)) {
312 mutex_exit(&fp->qede->drv_lock);
313 return (DDI_FAILURE);
316 fp->rx_ring->intrEnableCnt++;
317 qede_enable_hw_intr(fp);
318 fp->disabled_by_poll = 0;
319 mutex_exit(&fp->qede->drv_lock);
321 return (DDI_SUCCESS);
324 #ifndef ILLUMOS
325 static int
326 qede_rx_ring_intr_disable(mac_ring_driver_t rh)
327 #else
328 static int
329 qede_rx_ring_intr_disable(mac_intr_handle_t rh)
330 #endif
332 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
334 mutex_enter(&fp->qede->drv_lock);
335 if (!fp->sb_phys && (fp->sb_dma_handle == NULL)) {
336 mutex_exit(&fp->qede->drv_lock);
337 return (DDI_FAILURE);
339 fp->rx_ring->intrDisableCnt++;
340 qede_disable_hw_intr(fp);
341 fp->disabled_by_poll = 1;
342 mutex_exit(&fp->qede->drv_lock);
343 return (DDI_SUCCESS);
346 static int
347 qede_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
350 int ret = 0;
352 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
353 qede_t *qede = fp->qede;
354 qede_rx_ring_t *rx_ring = fp->rx_ring;
356 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
357 return (ECANCELED);
360 switch (stat) {
361 case MAC_STAT_RBYTES:
362 *val = rx_ring->rx_byte_cnt;
363 break;
364 case MAC_STAT_IPACKETS:
365 *val = rx_ring->rx_pkt_cnt;
366 break;
367 default:
368 *val = 0;
369 ret = ENOTSUP;
370 break;
373 return (ret);
376 static int
377 qede_get_global_ring_index(qede_t *qede, int gindex, int rindex)
379 qede_fastpath_t *fp;
380 qede_rx_ring_t *rx_ring;
381 int i = 0;
383 for (i = 0; i < qede->num_fp; i++) {
384 fp = &qede->fp_array[i];
385 rx_ring = fp->rx_ring;
387 if (rx_ring->group_index == gindex) {
388 rindex--;
390 if (rindex < 0) {
391 return (i);
395 return (-1);
398 static void
399 qede_rx_ring_stop(mac_ring_driver_t rh)
401 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
402 qede_rx_ring_t *rx_ring = fp->rx_ring;
404 qede_print("!%s(%d): called", __func__,fp->qede->instance);
405 mutex_enter(&fp->fp_lock);
406 rx_ring->mac_ring_started = B_FALSE;
407 mutex_exit(&fp->fp_lock);
410 static int
411 qede_rx_ring_start(mac_ring_driver_t rh, u64 mr_gen_num)
413 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
414 qede_rx_ring_t *rx_ring = fp->rx_ring;
416 qede_print("!%s(%d): called", __func__,fp->qede->instance);
417 mutex_enter(&fp->fp_lock);
418 rx_ring->mr_gen_num = mr_gen_num;
419 rx_ring->mac_ring_started = B_TRUE;
420 rx_ring->intrDisableCnt = 0;
421 rx_ring->intrEnableCnt = 0;
422 fp->disabled_by_poll = 0;
424 mutex_exit(&fp->fp_lock);
426 return (DDI_SUCCESS);
429 /* Callback function from mac layer to register rings */
430 void
431 qede_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
432 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
434 qede_t *qede = (qede_t *)arg;
435 mac_intr_t *mintr = &infop->mri_intr;
437 switch (rtype) {
438 case MAC_RING_TYPE_RX: {
440 * Index passed as a param is the ring index within the
441 * given group index. If multiple groups are supported
442 * then need to search into all groups to find out the
443 * global ring index for the passed group relative
444 * ring index
446 int global_ring_index = qede_get_global_ring_index(qede,
447 group_index, ring_index);
448 qede_fastpath_t *fp;
449 qede_rx_ring_t *rx_ring;
450 int i;
453 * global_ring_index < 0 means group index passed
454 * was registered by our driver
456 ASSERT(global_ring_index >= 0);
458 if (rh == NULL) {
459 cmn_err(CE_WARN, "!rx ring(%d) ring handle NULL",
460 global_ring_index);
463 fp = &qede->fp_array[global_ring_index];
464 rx_ring = fp->rx_ring;
465 fp->qede = qede;
467 rx_ring->mac_ring_handle = rh;
469 qede_info(qede, "rx_ring %d mac_ring_handle %p",
470 rx_ring->rss_id, rh);
472 /* mri_driver passed as arg to mac_ring* callbacks */
473 infop->mri_driver = (mac_ring_driver_t)fp;
475 * mri_start callback will supply a mac rings generation
476 * number which is needed while indicating packets
477 * upstream via mac_ring_rx() call
479 infop->mri_start = qede_rx_ring_start;
480 infop->mri_stop = qede_rx_ring_stop;
481 infop->mri_poll = qede_rx_ring_poll;
482 infop->mri_stat = qede_rx_ring_stat;
484 mintr->mi_handle = (mac_intr_handle_t)fp;
485 mintr->mi_enable = qede_rx_ring_intr_enable;
486 mintr->mi_disable = qede_rx_ring_intr_disable;
487 if (qede->intr_ctx.intr_type_in_use &
488 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
489 mintr->mi_ddi_handle =
490 qede->intr_ctx.
491 intr_hdl_array[global_ring_index + qede->num_hwfns];
493 break;
495 case MAC_RING_TYPE_TX: {
496 qede_fastpath_t *fp;
497 qede_tx_ring_t *tx_ring;
498 int i, tc;
500 ASSERT(ring_index < qede->num_fp);
502 fp = &qede->fp_array[ring_index];
503 fp->qede = qede;
504 tx_ring = fp->tx_ring[0];
505 tx_ring->mac_ring_handle = rh;
506 qede_info(qede, "tx_ring %d mac_ring_handle %p",
507 tx_ring->tx_queue_index, rh);
508 infop->mri_driver = (mac_ring_driver_t)fp;
509 infop->mri_start = NULL;
510 infop->mri_stop = NULL;
511 infop->mri_tx = qede_ring_tx;
512 infop->mri_stat = qede_tx_ring_stat;
513 if (qede->intr_ctx.intr_type_in_use &
514 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
515 mintr->mi_ddi_handle =
516 qede->intr_ctx.
517 intr_hdl_array[ring_index + qede->num_hwfns];
519 break;
521 default:
522 break;
527 * Callback function from mac layer to register group
529 void
530 qede_fill_group(void *arg, mac_ring_type_t rtype, const int index,
531 mac_group_info_t *infop, mac_group_handle_t gh)
533 qede_t *qede = (qede_t *)arg;
535 switch (rtype) {
536 case MAC_RING_TYPE_RX: {
537 qede_mac_group_t *rx_group;
539 rx_group = &qede->rx_groups[index];
540 rx_group->group_handle = gh;
541 rx_group->group_index = index;
542 rx_group->qede = qede;
543 infop->mgi_driver = (mac_group_driver_t)rx_group;
544 infop->mgi_start = NULL;
545 infop->mgi_stop = NULL;
546 #ifndef ILLUMOS
547 infop->mgi_addvlan = NULL;
548 infop->mgi_remvlan = NULL;
549 infop->mgi_getsriov_info = NULL;
550 infop->mgi_setmtu = NULL;
551 #endif
552 infop->mgi_addmac = qede_add_mac_addr;
553 infop->mgi_remmac = qede_rem_mac_addr;
554 infop->mgi_count = qede->num_fp;
555 #ifndef ILLUMOS
556 if (index == 0) {
557 infop->mgi_flags = MAC_GROUP_DEFAULT;
559 #endif
561 break;
563 case MAC_RING_TYPE_TX: {
564 qede_mac_group_t *tx_group;
566 tx_group = &qede->tx_groups[index];
567 tx_group->group_handle = gh;
568 tx_group->group_index = index;
569 tx_group->qede = qede;
571 infop->mgi_driver = (mac_group_driver_t)tx_group;
572 infop->mgi_start = NULL;
573 infop->mgi_stop = NULL;
574 infop->mgi_addmac = NULL;
575 infop->mgi_remmac = NULL;
576 #ifndef ILLUMOS
577 infop->mgi_addvlan = NULL;
578 infop->mgi_remvlan = NULL;
579 infop->mgi_setmtu = NULL;
580 infop->mgi_getsriov_info = NULL;
581 #endif
583 infop->mgi_count = qede->num_fp;
585 #ifndef ILLUMOS
586 if (index == 0) {
587 infop->mgi_flags = MAC_GROUP_DEFAULT;
589 #endif
590 break;
592 default:
593 break;
597 #ifdef ILLUMOS
598 static int
599 qede_transceiver_info(void *arg, uint_t id, mac_transceiver_info_t *infop)
601 qede_t *qede = arg;
602 struct ecore_dev *edev = &qede->edev;
603 struct ecore_hwfn *hwfn;
604 struct ecore_ptt *ptt;
605 uint32_t transceiver_state;
607 if (id >= edev->num_hwfns || arg == NULL || infop == NULL)
608 return (EINVAL);
610 hwfn = &edev->hwfns[id];
611 ptt = ecore_ptt_acquire(hwfn);
612 if (ptt == NULL) {
613 return (EIO);
616 * Use the underlying raw API to get this information. While the
617 * ecore_phy routines have some ways of getting to this information, it
618 * ends up writing the raw data as ASCII characters which doesn't help
619 * us one bit.
621 transceiver_state = ecore_rd(hwfn, ptt, hwfn->mcp_info->port_addr +
622 OFFSETOF(struct public_port, transceiver_data));
623 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
624 ecore_ptt_release(hwfn, ptt);
626 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) != 0) {
627 mac_transceiver_info_set_present(infop, B_TRUE);
629 * Based on our testing, the ETH_TRANSCEIVER_STATE_VALID flag is
630 * not set, so we cannot rely on it. Instead, we have found that
631 * the ETH_TRANSCEIVER_STATE_UPDATING will be set when we cannot
632 * use the transceiver.
634 if ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) != 0) {
635 mac_transceiver_info_set_usable(infop, B_FALSE);
636 } else {
637 mac_transceiver_info_set_usable(infop, B_TRUE);
639 } else {
640 mac_transceiver_info_set_present(infop, B_FALSE);
641 mac_transceiver_info_set_usable(infop, B_FALSE);
644 return (0);
647 static int
648 qede_transceiver_read(void *arg, uint_t id, uint_t page, void *buf,
649 size_t nbytes, off_t offset, size_t *nread)
651 qede_t *qede = arg;
652 struct ecore_dev *edev = &qede->edev;
653 struct ecore_hwfn *hwfn;
654 uint32_t port, lane;
655 struct ecore_ptt *ptt;
656 enum _ecore_status_t ret;
658 if (id >= edev->num_hwfns || buf == NULL || nbytes == 0 || nread == NULL ||
659 (page != 0xa0 && page != 0xa2) || offset < 0)
660 return (EINVAL);
663 * Both supported pages have a length of 256 bytes, ensure nothing asks
664 * us to go beyond that.
666 if (nbytes > 256 || offset >= 256 || (offset + nbytes > 256)) {
667 return (EINVAL);
670 hwfn = &edev->hwfns[id];
671 ptt = ecore_ptt_acquire(hwfn);
672 if (ptt == NULL) {
673 return (EIO);
676 ret = ecore_mcp_phy_sfp_read(hwfn, ptt, hwfn->port_id, page, offset,
677 nbytes, buf);
678 ecore_ptt_release(hwfn, ptt);
679 if (ret != ECORE_SUCCESS) {
680 return (EIO);
682 *nread = nbytes;
683 return (0);
685 #endif /* ILLUMOS */
688 static int
689 qede_mac_stats(void * arg,
690 uint_t stat,
691 uint64_t * value)
693 qede_t * qede = (qede_t *)arg;
694 struct ecore_eth_stats vstats;
695 struct ecore_dev *edev = &qede->edev;
696 struct qede_link_cfg lnkcfg;
697 int rc = 0;
698 qede_fastpath_t *fp = &qede->fp_array[0];
699 qede_rx_ring_t *rx_ring;
700 qede_tx_ring_t *tx_ring;
702 if ((qede == NULL) || (value == NULL)) {
703 return EINVAL;
707 mutex_enter(&qede->gld_lock);
709 if(qede->qede_state != QEDE_STATE_STARTED) {
710 mutex_exit(&qede->gld_lock);
711 return EAGAIN;
714 *value = 0;
716 memset(&vstats, 0, sizeof(struct ecore_eth_stats));
717 ecore_get_vport_stats(edev, &vstats);
720 memset(&qede->curcfg, 0, sizeof(struct qede_link_cfg));
721 qede_get_link_info(&edev->hwfns[0], &qede->curcfg);
725 switch (stat)
727 case MAC_STAT_IFSPEED:
728 *value = (qede->props.link_speed * 1000000ULL);
729 break;
730 case MAC_STAT_MULTIRCV:
731 *value = vstats.common.rx_mcast_pkts;
732 break;
733 case MAC_STAT_BRDCSTRCV:
734 *value = vstats.common.rx_bcast_pkts;
735 break;
736 case MAC_STAT_MULTIXMT:
737 *value = vstats.common.tx_mcast_pkts;
738 break;
739 case MAC_STAT_BRDCSTXMT:
740 *value = vstats.common.tx_bcast_pkts;
741 break;
742 case MAC_STAT_NORCVBUF:
743 *value = vstats.common.no_buff_discards;
744 break;
745 case MAC_STAT_NOXMTBUF:
746 *value = 0;
747 break;
748 case MAC_STAT_IERRORS:
749 case ETHER_STAT_MACRCV_ERRORS:
750 *value = vstats.common.mac_filter_discards +
751 vstats.common.packet_too_big_discard +
752 vstats.common.rx_crc_errors;
753 break;
755 case MAC_STAT_OERRORS:
756 break;
758 case MAC_STAT_COLLISIONS:
759 *value = vstats.bb.tx_total_collisions;
760 break;
762 case MAC_STAT_RBYTES:
763 *value = vstats.common.rx_ucast_bytes +
764 vstats.common.rx_mcast_bytes +
765 vstats.common.rx_bcast_bytes;
766 break;
768 case MAC_STAT_IPACKETS:
769 *value = vstats.common.rx_ucast_pkts +
770 vstats.common.rx_mcast_pkts +
771 vstats.common.rx_bcast_pkts;
772 break;
774 case MAC_STAT_OBYTES:
775 *value = vstats.common.tx_ucast_bytes +
776 vstats.common.tx_mcast_bytes +
777 vstats.common.tx_bcast_bytes;
778 break;
780 case MAC_STAT_OPACKETS:
781 *value = vstats.common.tx_ucast_pkts +
782 vstats.common.tx_mcast_pkts +
783 vstats.common.tx_bcast_pkts;
784 break;
786 case ETHER_STAT_ALIGN_ERRORS:
787 *value = vstats.common.rx_align_errors;
788 break;
790 case ETHER_STAT_FCS_ERRORS:
791 *value = vstats.common.rx_crc_errors;
792 break;
794 case ETHER_STAT_FIRST_COLLISIONS:
795 break;
797 case ETHER_STAT_MULTI_COLLISIONS:
798 break;
800 case ETHER_STAT_DEFER_XMTS:
801 break;
803 case ETHER_STAT_TX_LATE_COLLISIONS:
804 break;
806 case ETHER_STAT_EX_COLLISIONS:
807 break;
809 case ETHER_STAT_MACXMT_ERRORS:
810 *value = 0;
811 break;
813 case ETHER_STAT_CARRIER_ERRORS:
814 break;
816 case ETHER_STAT_TOOLONG_ERRORS:
817 *value = vstats.common.rx_oversize_packets;
818 break;
820 #if (MAC_VERSION > 1)
821 case ETHER_STAT_TOOSHORT_ERRORS:
822 *value = vstats.common.rx_undersize_packets;
823 break;
824 #endif
826 case ETHER_STAT_XCVR_ADDR:
827 *value = 0;
828 break;
830 case ETHER_STAT_XCVR_ID:
831 *value = 0;
832 break;
834 case ETHER_STAT_XCVR_INUSE:
835 switch (qede->props.link_speed) {
836 default:
837 *value = XCVR_UNDEFINED;
839 break;
840 #if (MAC_VERSION > 1)
841 case ETHER_STAT_CAP_10GFDX:
842 *value = 0;
843 break;
844 #endif
845 case ETHER_STAT_CAP_100FDX:
846 *value = 0;
847 break;
848 case ETHER_STAT_CAP_100HDX:
849 *value = 0;
850 break;
851 case ETHER_STAT_CAP_ASMPAUSE:
852 *value = 1;
853 break;
854 case ETHER_STAT_CAP_PAUSE:
855 *value = 1;
856 break;
857 case ETHER_STAT_CAP_AUTONEG:
858 *value = 1;
859 break;
861 #if (MAC_VERSION > 1)
862 case ETHER_STAT_CAP_REMFAULT:
863 *value = 0;
864 break;
865 #endif
867 #if (MAC_VERSION > 1)
868 case ETHER_STAT_ADV_CAP_10GFDX:
869 *value = 0;
870 break;
871 #endif
872 case ETHER_STAT_ADV_CAP_ASMPAUSE:
873 *value = 1;
874 break;
876 case ETHER_STAT_ADV_CAP_PAUSE:
877 *value = 1;
878 break;
880 case ETHER_STAT_ADV_CAP_AUTONEG:
881 *value = qede->curcfg.adv_capab.autoneg;
882 break;
884 #if (MAC_VERSION > 1)
885 case ETHER_STAT_ADV_REMFAULT:
886 *value = 0;
887 break;
888 #endif
890 case ETHER_STAT_LINK_AUTONEG:
891 *value = qede->curcfg.autoneg;
892 break;
894 case ETHER_STAT_LINK_DUPLEX:
895 *value = (qede->props.link_duplex == DUPLEX_FULL) ?
896 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
897 break;
899 * Supported speeds. These indicate what hardware is capable of.
901 case ETHER_STAT_CAP_1000HDX:
902 *value = qede->curcfg.supp_capab.param_1000hdx;
903 break;
905 case ETHER_STAT_CAP_1000FDX:
906 *value = qede->curcfg.supp_capab.param_1000fdx;
907 break;
909 case ETHER_STAT_CAP_10GFDX:
910 *value = qede->curcfg.supp_capab.param_10000fdx;
911 break;
913 case ETHER_STAT_CAP_25GFDX:
914 *value = qede->curcfg.supp_capab.param_25000fdx;
915 break;
917 case ETHER_STAT_CAP_40GFDX:
918 *value = qede->curcfg.supp_capab.param_40000fdx;
919 break;
921 case ETHER_STAT_CAP_50GFDX:
922 *value = qede->curcfg.supp_capab.param_50000fdx;
923 break;
925 case ETHER_STAT_CAP_100GFDX:
926 *value = qede->curcfg.supp_capab.param_100000fdx;
927 break;
930 * Advertised speeds. These indicate what hardware is currently sending.
932 case ETHER_STAT_ADV_CAP_1000HDX:
933 *value = qede->curcfg.adv_capab.param_1000hdx;
934 break;
936 case ETHER_STAT_ADV_CAP_1000FDX:
937 *value = qede->curcfg.adv_capab.param_1000fdx;
938 break;
940 case ETHER_STAT_ADV_CAP_10GFDX:
941 *value = qede->curcfg.adv_capab.param_10000fdx;
942 break;
944 case ETHER_STAT_ADV_CAP_25GFDX:
945 *value = qede->curcfg.adv_capab.param_25000fdx;
946 break;
948 case ETHER_STAT_ADV_CAP_40GFDX:
949 *value = qede->curcfg.adv_capab.param_40000fdx;
950 break;
952 case ETHER_STAT_ADV_CAP_50GFDX:
953 *value = qede->curcfg.adv_capab.param_50000fdx;
954 break;
956 case ETHER_STAT_ADV_CAP_100GFDX:
957 *value = qede->curcfg.adv_capab.param_100000fdx;
958 break;
960 default:
961 rc = ENOTSUP;
964 mutex_exit(&qede->gld_lock);
965 return (rc);
968 /* (flag) TRUE = on, FALSE = off */
969 static int
970 qede_mac_promiscuous(void *arg,
971 boolean_t on)
973 qede_t *qede = (qede_t *)arg;
974 qede_print("!%s(%d): called", __func__,qede->instance);
975 int ret = DDI_SUCCESS;
976 enum qede_filter_rx_mode_type mode;
978 mutex_enter(&qede->drv_lock);
980 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
981 ret = ECANCELED;
982 goto exit;
985 if (on) {
986 qede_info(qede, "Entering promiscuous mode");
987 mode = QEDE_FILTER_RX_MODE_PROMISC;
988 qede->params.promisc_fl = B_TRUE;
989 } else {
990 qede_info(qede, "Leaving promiscuous mode");
991 if(qede->params.multi_promisc_fl == B_TRUE) {
992 mode = QEDE_FILTER_RX_MODE_MULTI_PROMISC;
993 } else {
994 mode = QEDE_FILTER_RX_MODE_REGULAR;
996 qede->params.promisc_fl = B_FALSE;
999 ret = qede_set_filter_rx_mode(qede, mode);
1001 exit:
1002 mutex_exit(&qede->drv_lock);
1003 return (ret);
1006 int qede_set_rx_mac_mcast(qede_t *qede, enum ecore_filter_opcode opcode,
1007 uint8_t *mac, int mc_cnt)
1009 struct ecore_filter_mcast cmd;
1010 int i;
1011 memset(&cmd, 0, sizeof(cmd));
1012 cmd.opcode = opcode;
1013 cmd.num_mc_addrs = mc_cnt;
1015 for (i = 0; i < mc_cnt; i++, mac += ETH_ALLEN) {
1016 COPY_ETH_ADDRESS(mac, cmd.mac[i]);
1020 return (ecore_filter_mcast_cmd(&qede->edev, &cmd,
1021 ECORE_SPQ_MODE_CB, NULL));
1026 qede_set_filter_rx_mode(qede_t * qede, enum qede_filter_rx_mode_type type)
1028 struct ecore_filter_accept_flags flg;
1030 memset(&flg, 0, sizeof(flg));
1032 flg.update_rx_mode_config = 1;
1033 flg.update_tx_mode_config = 1;
1034 flg.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
1035 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
1036 flg.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
1037 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
1039 if (type == QEDE_FILTER_RX_MODE_PROMISC)
1040 flg.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
1041 ECORE_ACCEPT_MCAST_UNMATCHED;
1042 else if (type == QEDE_FILTER_RX_MODE_MULTI_PROMISC)
1043 flg.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
1044 qede_info(qede, "rx_mode rx_filter=0x%x tx_filter=0x%x type=0x%x\n",
1045 flg.rx_accept_filter, flg.tx_accept_filter, type);
1046 return (ecore_filter_accept_cmd(&qede->edev, 0, flg,
1047 0, /* update_accept_any_vlan */
1048 0, /* accept_any_vlan */
1049 ECORE_SPQ_MODE_CB, NULL));
1052 int
1053 qede_multicast(qede_t *qede, boolean_t flag, const uint8_t *ptr_mcaddr)
1055 int i, ret = DDI_SUCCESS;
1056 qede_mcast_list_entry_t *ptr_mlist;
1057 qede_mcast_list_entry_t *ptr_entry;
1058 int mc_cnt;
1059 unsigned char *mc_macs, *tmpmc;
1060 size_t size;
1061 boolean_t mcmac_exists = B_FALSE;
1062 enum qede_filter_rx_mode_type mode;
1064 if (!ptr_mcaddr) {
1065 cmn_err(CE_NOTE, "Removing all multicast");
1066 } else {
1067 cmn_err(CE_NOTE,
1068 "qede=%p %s multicast: %02x:%02x:%02x:%02x:%02x:%02x",
1069 qede, (flag) ? "Adding" : "Removing", ptr_mcaddr[0],
1070 ptr_mcaddr[1],ptr_mcaddr[2],ptr_mcaddr[3],ptr_mcaddr[4],
1071 ptr_mcaddr[5]);
1075 if (flag && (ptr_mcaddr == NULL)) {
1076 cmn_err(CE_WARN, "ERROR: Multicast address not specified");
1077 return EINVAL;
1081 /* exceeds addition of mcaddr above limit */
1082 if (flag && (qede->mc_cnt >= MAX_MC_SOFT_LIMIT)) {
1083 qede_info(qede, "Cannot add more than MAX_MC_SOFT_LIMIT");
1084 return ENOENT;
1087 size = MAX_MC_SOFT_LIMIT * ETH_ALLEN;
1089 mc_macs = kmem_zalloc(size, KM_NOSLEEP);
1090 if (!mc_macs) {
1091 cmn_err(CE_WARN, "ERROR: Failed to allocate for mc_macs");
1092 return EINVAL;
1095 tmpmc = mc_macs;
1097 /* remove all multicast - as flag not set and mcaddr not specified*/
1098 if (!flag && (ptr_mcaddr == NULL)) {
1099 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry,
1100 &qede->mclist.head, qede_mcast_list_entry_t, mclist_entry)
1102 if (ptr_entry != NULL) {
1103 QEDE_LIST_REMOVE(&ptr_entry->mclist_entry,
1104 &qede->mclist.head);
1105 kmem_free(ptr_entry,
1106 sizeof (qede_mcast_list_entry_t) + ETH_ALLEN);
1110 ret = qede_set_rx_mac_mcast(qede,
1111 ECORE_FILTER_REMOVE, mc_macs, 1);
1112 qede->mc_cnt = 0;
1113 goto exit;
1116 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry,
1117 &qede->mclist.head, qede_mcast_list_entry_t, mclist_entry)
1119 if ((ptr_entry != NULL) &&
1120 IS_ETH_ADDRESS_EQUAL(ptr_mcaddr, ptr_entry->mac)) {
1121 mcmac_exists = B_TRUE;
1122 break;
1125 if (flag && mcmac_exists) {
1126 ret = DDI_SUCCESS;
1127 goto exit;
1128 } else if (!flag && !mcmac_exists) {
1129 ret = DDI_SUCCESS;
1130 goto exit;
1133 if (flag) {
1134 ptr_entry = kmem_zalloc((sizeof (qede_mcast_list_entry_t) +
1135 ETH_ALLEN), KM_NOSLEEP);
1136 ptr_entry->mac = (uint8_t *)ptr_entry +
1137 sizeof (qede_mcast_list_entry_t);
1138 COPY_ETH_ADDRESS(ptr_mcaddr, ptr_entry->mac);
1139 QEDE_LIST_ADD(&ptr_entry->mclist_entry, &qede->mclist.head);
1140 } else {
1141 QEDE_LIST_REMOVE(&ptr_entry->mclist_entry, &qede->mclist.head);
1142 kmem_free(ptr_entry, sizeof(qede_mcast_list_entry_t) +
1143 ETH_ALLEN);
1146 mc_cnt = 0;
1147 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry, &qede->mclist.head,
1148 qede_mcast_list_entry_t, mclist_entry) {
1149 COPY_ETH_ADDRESS(ptr_entry->mac, tmpmc);
1150 tmpmc += ETH_ALLEN;
1151 mc_cnt++;
1153 qede->mc_cnt = mc_cnt;
1154 if (mc_cnt <=64) {
1155 ret = qede_set_rx_mac_mcast(qede, ECORE_FILTER_ADD,
1156 (unsigned char *)mc_macs, mc_cnt);
1157 if ((qede->params.multi_promisc_fl == B_TRUE) &&
1158 (qede->params.promisc_fl == B_FALSE)) {
1159 mode = QEDE_FILTER_RX_MODE_REGULAR;
1160 ret = qede_set_filter_rx_mode(qede, mode);
1162 qede->params.multi_promisc_fl = B_FALSE;
1163 } else {
1164 if ((qede->params.multi_promisc_fl == B_FALSE) &&
1165 (qede->params.promisc_fl = B_FALSE)) {
1166 ret = qede_set_filter_rx_mode(qede,
1167 QEDE_FILTER_RX_MODE_MULTI_PROMISC);
1169 qede->params.multi_promisc_fl = B_TRUE;
1170 qede_info(qede, "mode is MULTI_PROMISC");
1172 exit:
1173 kmem_free(mc_macs, size);
1174 qede_info(qede, "multicast ret %d mc_cnt %d\n", ret, qede->mc_cnt);
1175 return (ret);
1179 * This function is used to enable or disable multicast packet reception for
1180 * particular multicast addresses.
1181 * (flag) TRUE = add, FALSE = remove
1183 static int
1184 qede_mac_multicast(void *arg,
1185 boolean_t flag,
1186 const uint8_t * mcast_addr)
1188 qede_t *qede = (qede_t *)arg;
1189 int ret = DDI_SUCCESS;
1192 mutex_enter(&qede->gld_lock);
1193 if(qede->qede_state != QEDE_STATE_STARTED) {
1194 mutex_exit(&qede->gld_lock);
1195 return (EAGAIN);
1197 ret = qede_multicast(qede, flag, mcast_addr);
1199 mutex_exit(&qede->gld_lock);
1201 return (ret);
1203 int
1204 qede_clear_filters(qede_t *qede)
1206 int ret = 0;
1207 int i;
1208 if ((qede->params.promisc_fl == B_TRUE) ||
1209 (qede->params.multi_promisc_fl == B_TRUE)) {
1210 ret = qede_set_filter_rx_mode(qede,
1211 QEDE_FILTER_RX_MODE_REGULAR);
1212 if (ret) {
1213 qede_info(qede,
1214 "qede_clear_filters failed to set rx_mode");
1217 for (i=0; i < qede->ucst_total; i++)
1219 if (qede->ucst_mac[i].set) {
1220 qede_rem_macaddr(qede,
1221 qede->ucst_mac[i].mac_addr.ether_addr_octet);
1224 qede_multicast(qede, B_FALSE, NULL);
1225 return (ret);
1229 #ifdef NO_CROSSBOW
1230 static int
1231 qede_mac_unicast(void *arg,
1232 const uint8_t * mac_addr)
1234 qede_t *qede = (qede_t *)arg;
1235 return 0;
1239 static mblk_t *
1240 qede_mac_tx(void *arg,
1241 mblk_t * mblk)
1243 qede_t *qede = (qede_t *)arg;
1244 qede_fastpath_t *fp = &qede->fp_array[0];
1246 mblk = qede_ring_tx((void *)fp, mblk);
1248 return (mblk);
1250 #endif /* NO_CROSSBOW */
1253 static lb_property_t loopmodes[] = {
1254 { normal, "normal", QEDE_LOOP_NONE },
1255 { internal, "internal", QEDE_LOOP_INTERNAL },
1256 { external, "external", QEDE_LOOP_EXTERNAL },
1260 * Set Loopback mode
1263 static enum ioc_reply
1264 qede_set_loopback_mode(qede_t *qede, uint32_t mode)
1266 int ret, i = 0;
1267 struct ecore_dev *edev = &qede->edev;
1268 struct ecore_hwfn *hwfn;
1269 struct ecore_ptt *ptt = NULL;
1270 struct ecore_mcp_link_params *link_params;
1272 hwfn = &edev->hwfns[0];
1273 link_params = ecore_mcp_get_link_params(hwfn);
1274 ptt = ecore_ptt_acquire(hwfn);
1276 switch(mode) {
1277 default:
1278 qede_info(qede, "unknown loopback mode !!");
1279 ecore_ptt_release(hwfn, ptt);
1280 return IOC_INVAL;
1282 case QEDE_LOOP_NONE:
1283 ecore_mcp_set_link(hwfn, ptt, 0);
1285 while (qede->params.link_state && i < 5000) {
1286 OSAL_MSLEEP(1);
1287 i++;
1289 i = 0;
1291 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1292 qede->loop_back_mode = QEDE_LOOP_NONE;
1293 ret = ecore_mcp_set_link(hwfn, ptt, 1);
1294 ecore_ptt_release(hwfn, ptt);
1296 while (!qede->params.link_state && i < 5000) {
1297 OSAL_MSLEEP(1);
1298 i++;
1300 return IOC_REPLY;
1302 case QEDE_LOOP_INTERNAL:
1303 qede_print("!%s(%d) : loopback mode (INTERNAL) is set!",
1304 __func__, qede->instance);
1305 ecore_mcp_set_link(hwfn, ptt, 0);
1307 while(qede->params.link_state && i < 5000) {
1308 OSAL_MSLEEP(1);
1309 i++;
1311 i = 0;
1312 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1313 qede->loop_back_mode = QEDE_LOOP_INTERNAL;
1314 ret = ecore_mcp_set_link(hwfn, ptt, 1);
1315 ecore_ptt_release(hwfn, ptt);
1317 while(!qede->params.link_state && i < 5000) {
1318 OSAL_MSLEEP(1);
1319 i++;
1321 return IOC_REPLY;
1323 case QEDE_LOOP_EXTERNAL:
1324 qede_print("!%s(%d) : External loopback mode is not supported",
1325 __func__, qede->instance);
1326 ecore_ptt_release(hwfn, ptt);
1327 return IOC_INVAL;
1331 static int
1332 qede_ioctl_pcicfg_rd(qede_t *qede, u32 addr, void *data,
1333 int len)
1335 u32 crb, actual_crb;
1336 uint32_t ret = 0;
1337 int cap_offset = 0, cap_id = 0, next_cap = 0;
1338 ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
1339 qede_ioctl_data_t * data1 = (qede_ioctl_data_t *) data;
1341 cap_offset = pci_config_get8(pci_cfg_handle, PCI_CONF_CAP_PTR);
1342 while (cap_offset != 0) {
1343 /* Check for an invalid PCI read. */
1344 if (cap_offset == PCI_EINVAL8) {
1345 return DDI_FAILURE;
1347 cap_id = pci_config_get8(pci_cfg_handle, cap_offset);
1348 if (cap_id == PCI_CAP_ID_PCI_E) {
1349 /* PCIe expr capab struct found */
1350 break;
1351 } else {
1352 next_cap = pci_config_get8(pci_cfg_handle,
1353 cap_offset + 1);
1354 cap_offset = next_cap;
1358 switch (len) {
1359 case 1:
1360 ret = pci_config_get8(qede->pci_cfg_handle, addr);
1361 (void) memcpy(data, &ret, sizeof(uint8_t));
1362 break;
1363 case 2:
1364 ret = pci_config_get16(qede->pci_cfg_handle, addr);
1365 (void) memcpy(data, &ret, sizeof(uint16_t));
1366 break;
1367 case 4:
1368 ret = pci_config_get32(qede->pci_cfg_handle, addr);
1369 (void) memcpy(data, &ret, sizeof(uint32_t));
1370 break;
1371 default:
1372 cmn_err(CE_WARN, "bad length for pci config read\n");
1373 return (1);
1375 return (0);
1378 static int
1379 qede_ioctl_pcicfg_wr(qede_t *qede, u32 addr, void *data,
1380 int len)
1382 uint16_t ret = 0;
1383 int cap_offset = 0, cap_id = 0, next_cap = 0;
1384 qede_ioctl_data_t * data1 = (qede_ioctl_data_t *) data;
1385 ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
1386 #if 1
1387 cap_offset = pci_config_get8(pci_cfg_handle, PCI_CONF_CAP_PTR);
1388 while (cap_offset != 0) {
1389 cap_id = pci_config_get8(pci_cfg_handle, cap_offset);
1390 if (cap_id == PCI_CAP_ID_PCI_E) {
1391 /* PCIe expr capab struct found */
1392 break;
1393 } else {
1394 next_cap = pci_config_get8(pci_cfg_handle,
1395 cap_offset + 1);
1396 cap_offset = next_cap;
1399 #endif
1401 switch(len) {
1402 case 1:
1403 pci_config_put8(qede->pci_cfg_handle, addr,
1404 *(char *)&(data));
1405 break;
1406 case 2:
1407 ret = pci_config_get16(qede->pci_cfg_handle, addr);
1408 ret = ret | *(uint16_t *)data1->uabc;
1410 pci_config_put16(qede->pci_cfg_handle, addr,
1411 ret);
1412 break;
1413 case 4:
1414 pci_config_put32(qede->pci_cfg_handle, addr, *(uint32_t *)data1->uabc);
1415 break;
1417 default:
1418 return (1);
1420 return (0);
1423 static int
1424 qede_ioctl_rd_wr_reg(qede_t *qede, void *data)
1426 struct ecore_hwfn *p_hwfn;
1427 struct ecore_dev *edev = &qede->edev;
1428 struct ecore_ptt *ptt;
1429 qede_ioctl_data_t *data1 = (qede_ioctl_data_t *)data;
1430 uint32_t ret = 0;
1431 uint8_t cmd = (uint8_t) data1->unused1;
1432 uint32_t addr = data1->off;
1433 uint32_t val = *(uint32_t *)&data1->uabc[1];
1434 uint32_t hwfn_index = *(uint32_t *)&data1->uabc[5];
1435 uint32_t *reg_addr;
1437 if (hwfn_index > qede->num_hwfns) {
1438 cmn_err(CE_WARN, "invalid hwfn index from application\n");
1439 return (EINVAL);
1441 p_hwfn = &edev->hwfns[hwfn_index];
1443 switch(cmd) {
1444 case QEDE_REG_READ:
1445 ret = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, addr);
1446 (void) memcpy(data1->uabc, &ret, sizeof(uint32_t));
1447 break;
1449 case QEDE_REG_WRITE:
1450 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, addr, val);
1451 break;
1453 default:
1454 cmn_err(CE_WARN,
1455 "wrong command in register read/write from application\n");
1456 break;
1458 return (ret);
1461 static int
1462 qede_ioctl_rd_wr_nvram(qede_t *qede, mblk_t *mp)
1464 qede_nvram_data_t *data1 = (qede_nvram_data_t *)(mp->b_cont->b_rptr);
1465 qede_nvram_data_t *data2, *next_data;
1466 struct ecore_dev *edev = &qede->edev;
1467 uint32_t ret = 0, hdr_size = 24, bytes_to_copy, copy_len = 0;
1468 uint32_t copy_len1 = 0;
1469 uint32_t addr = data1->off;
1470 uint32_t size = data1->size, i, buf_size;
1471 uint8_t cmd, cmd2;
1472 uint8_t *buf, *tmp_buf;
1473 mblk_t *mp1;
1475 cmd = (uint8_t)data1->unused1;
1477 switch(cmd) {
1478 case QEDE_NVRAM_CMD_READ:
1479 buf = kmem_zalloc(size, GFP_KERNEL);
1480 if(buf == NULL) {
1481 cmn_err(CE_WARN, "memory allocation failed"
1482 " in nvram read ioctl\n");
1483 return (DDI_FAILURE);
1485 ret = ecore_mcp_nvm_read(edev, addr, buf, data1->size);
1487 copy_len = (MBLKL(mp->b_cont)) - hdr_size;
1488 if(copy_len > size) {
1489 (void) memcpy(data1->uabc, buf, size);
1490 kmem_free(buf, size);
1491 //OSAL_FREE(edev, buf);
1492 ret = 0;
1493 break;
1495 (void) memcpy(data1->uabc, buf, copy_len);
1496 bytes_to_copy = size - copy_len;
1497 tmp_buf = ((uint8_t *)buf) + copy_len;
1498 copy_len1 = copy_len;
1499 mp1 = mp->b_cont;
1500 mp1 = mp1->b_cont;
1502 while (mp1) {
1503 copy_len = MBLKL(mp1);
1504 if(mp1->b_cont == NULL) {
1505 copy_len = MBLKL(mp1) - 4;
1507 data2 = (qede_nvram_data_t *)mp1->b_rptr;
1508 if (copy_len > bytes_to_copy) {
1509 (void) memcpy(data2->uabc, tmp_buf,
1510 bytes_to_copy);
1511 kmem_free(buf, size);
1512 //OSAL_FREE(edev, buf);
1513 break;
1515 (void) memcpy(data2->uabc, tmp_buf, copy_len);
1516 tmp_buf = tmp_buf + copy_len;
1517 copy_len += copy_len;
1518 mp1 = mp1->b_cont;
1519 bytes_to_copy = bytes_to_copy - copy_len;
1522 kmem_free(buf, size);
1523 //OSAL_FREE(edev, buf);
1524 break;
1526 case QEDE_NVRAM_CMD_WRITE:
1527 cmd2 = (uint8_t )data1->cmd2;
1528 size = data1->size;
1529 addr = data1->off;
1530 buf_size = size; //data1->buf_size;
1531 //buf_size = data1->buf_size;
1532 ret = 0;
1534 switch(cmd2){
1535 case START_NVM_WRITE:
1536 buf = kmem_zalloc(size, GFP_KERNEL);
1537 //buf = qede->reserved_buf;
1538 qede->nvm_buf_size = data1->size;
1539 if(buf == NULL) {
1540 cmn_err(CE_WARN,
1541 "memory allocation failed in START_NVM_WRITE\n");
1542 return DDI_FAILURE;
1544 qede->nvm_buf_start = buf;
1545 cmn_err(CE_NOTE,
1546 "buf = %p, size = %x\n", qede->nvm_buf_start, size);
1547 qede->nvm_buf = buf;
1548 qede->copy_len = 0;
1549 //tmp_buf = buf + addr;
1550 ret = 0;
1551 break;
1553 case ACCUMULATE_NVM_BUF:
1554 tmp_buf = qede->nvm_buf;
1555 copy_len = MBLKL(mp->b_cont) - hdr_size;
1556 if(copy_len > buf_size) {
1557 if (buf_size < qede->nvm_buf_size) {
1558 (void) memcpy(tmp_buf, data1->uabc, buf_size);
1559 qede->copy_len = qede->copy_len +
1560 buf_size;
1561 } else {
1562 (void) memcpy(tmp_buf,
1563 data1->uabc, qede->nvm_buf_size);
1564 qede->copy_len =
1565 qede->copy_len + qede->nvm_buf_size;
1567 tmp_buf = tmp_buf + buf_size;
1568 qede->nvm_buf = tmp_buf;
1569 //qede->copy_len = qede->copy_len + buf_size;
1570 cmn_err(CE_NOTE,
1571 "buf_size from app = %x\n", copy_len);
1572 ret = 0;
1573 break;
1575 (void) memcpy(tmp_buf, data1->uabc, copy_len);
1576 tmp_buf = tmp_buf + copy_len;
1577 bytes_to_copy = buf_size - copy_len;
1578 mp1 = mp->b_cont;
1579 mp1 = mp1->b_cont;
1580 copy_len1 = copy_len;
1582 while (mp1) {
1583 copy_len = MBLKL(mp1);
1584 if (mp1->b_cont == NULL) {
1585 copy_len = MBLKL(mp1) - 4;
1587 next_data = (qede_nvram_data_t *) mp1->b_rptr;
1588 if (copy_len > bytes_to_copy){
1589 (void) memcpy(tmp_buf, next_data->uabc,
1590 bytes_to_copy);
1591 qede->copy_len = qede->copy_len +
1592 bytes_to_copy;
1593 ret = 0;
1594 break;
1596 (void) memcpy(tmp_buf, next_data->uabc,
1597 copy_len);
1598 qede->copy_len = qede->copy_len + copy_len;
1599 tmp_buf = tmp_buf + copy_len;
1600 copy_len = copy_len1 + copy_len;
1601 bytes_to_copy = bytes_to_copy - copy_len;
1602 mp1 = mp1->b_cont;
1604 qede->nvm_buf = tmp_buf;
1605 ret = 0;
1606 break;
1608 case STOP_NVM_WRITE:
1609 //qede->nvm_buf = tmp_buf;
1610 ret = 0;
1611 break;
1612 case READ_BUF:
1613 tmp_buf = (uint8_t *)qede->nvm_buf_start;
1614 for(i = 0; i < size ; i++){
1615 cmn_err(CE_NOTE,
1616 "buff (%d) : %d\n", i, *tmp_buf);
1617 tmp_buf ++;
1619 ret = 0;
1620 break;
1622 break;
1623 case QEDE_NVRAM_CMD_PUT_FILE_DATA:
1624 tmp_buf = qede->nvm_buf_start;
1625 ret = ecore_mcp_nvm_write(edev, ECORE_PUT_FILE_DATA,
1626 addr, tmp_buf, size);
1627 kmem_free(qede->nvm_buf_start, size);
1628 //OSAL_FREE(edev, tmp_buf);
1629 cmn_err(CE_NOTE, "total size = %x, copied size = %x\n",
1630 qede->nvm_buf_size, qede->copy_len);
1631 tmp_buf = NULL;
1632 qede->nvm_buf = NULL;
1633 qede->nvm_buf_start = NULL;
1634 ret = 0;
1635 break;
1637 case QEDE_NVRAM_CMD_SET_SECURE_MODE:
1638 ret = ecore_mcp_nvm_set_secure_mode(edev, addr);
1639 break;
1641 case QEDE_NVRAM_CMD_DEL_FILE:
1642 ret = ecore_mcp_nvm_del_file(edev, addr);
1643 break;
1645 case QEDE_NVRAM_CMD_PUT_FILE_BEGIN:
1646 ret = ecore_mcp_nvm_put_file_begin(edev, addr);
1647 break;
1649 case QEDE_NVRAM_CMD_GET_NVRAM_RESP:
1650 buf = kmem_zalloc(size, KM_SLEEP);
1651 ret = ecore_mcp_nvm_resp(edev, buf);
1652 (void)memcpy(data1->uabc, buf, size);
1653 kmem_free(buf, size);
1654 break;
1656 default:
1657 cmn_err(CE_WARN,
1658 "wrong command in NVRAM read/write from application\n");
1659 break;
1661 return (DDI_SUCCESS);
1664 static int
1665 qede_get_func_info(qede_t *qede, void *data)
1667 qede_link_output_t link_op;
1668 qede_func_info_t func_info;
1669 qede_ioctl_data_t *data1 = (qede_ioctl_data_t *)data;
1670 struct ecore_dev *edev = &qede->edev;
1671 struct ecore_hwfn *hwfn;
1672 struct ecore_mcp_link_params params;
1673 struct ecore_mcp_link_state link;
1675 hwfn = &edev->hwfns[0];
1677 if(hwfn == NULL){
1678 cmn_err(CE_WARN, "(%s) : cannot acquire hwfn\n",
1679 __func__);
1680 return (DDI_FAILURE);
1682 memcpy(&params, &hwfn->mcp_info->link_input, sizeof(params));
1683 memcpy(&link, &hwfn->mcp_info->link_output, sizeof(link));
1685 if(link.link_up) {
1686 link_op.link_up = true;
1689 link_op.supported_caps = SUPPORTED_FIBRE;
1690 if(params.speed.autoneg) {
1691 link_op.supported_caps |= SUPPORTED_Autoneg;
1694 if(params.pause.autoneg ||
1695 (params.pause.forced_rx && params.pause.forced_tx)) {
1696 link_op.supported_caps |= SUPPORTED_Asym_Pause;
1699 if (params.pause.autoneg || params.pause.forced_rx ||
1700 params.pause.forced_tx) {
1701 link_op.supported_caps |= SUPPORTED_Pause;
1704 if (params.speed.advertised_speeds &
1705 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1706 link_op.supported_caps |= SUPPORTED_1000baseT_Half |
1707 SUPPORTED_1000baseT_Full;
1710 if (params.speed.advertised_speeds &
1711 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1712 link_op.supported_caps |= SUPPORTED_10000baseKR_Full;
1715 if (params.speed.advertised_speeds &
1716 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) {
1717 link_op.supported_caps |= SUPPORTED_40000baseLR4_Full;
1720 link_op.advertised_caps = link_op.supported_caps;
1722 if(link.link_up) {
1723 link_op.speed = link.speed;
1724 } else {
1725 link_op.speed = 0;
1728 link_op.duplex = DUPLEX_FULL;
1729 link_op.port = PORT_FIBRE;
1731 link_op.autoneg = params.speed.autoneg;
1733 /* Link partner capabilities */
1734 if (link.partner_adv_speed &
1735 ECORE_LINK_PARTNER_SPEED_1G_HD) {
1736 link_op.lp_caps |= SUPPORTED_1000baseT_Half;
1739 if (link.partner_adv_speed &
1740 ECORE_LINK_PARTNER_SPEED_1G_FD) {
1741 link_op.lp_caps |= SUPPORTED_1000baseT_Full;
1744 if (link.partner_adv_speed &
1745 ECORE_LINK_PARTNER_SPEED_10G) {
1746 link_op.lp_caps |= SUPPORTED_10000baseKR_Full;
1749 if (link.partner_adv_speed &
1750 ECORE_LINK_PARTNER_SPEED_20G) {
1751 link_op.lp_caps |= SUPPORTED_20000baseKR2_Full;
1754 if (link.partner_adv_speed &
1755 ECORE_LINK_PARTNER_SPEED_40G) {
1756 link_op.lp_caps |= SUPPORTED_40000baseLR4_Full;
1759 if (link.an_complete) {
1760 link_op.lp_caps |= SUPPORTED_Autoneg;
1763 if (link.partner_adv_pause) {
1764 link_op.lp_caps |= SUPPORTED_Pause;
1767 if (link.partner_adv_pause == ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1768 link.partner_adv_pause == ECORE_LINK_PARTNER_BOTH_PAUSE) {
1769 link_op.lp_caps |= SUPPORTED_Asym_Pause;
1772 func_info.supported = link_op.supported_caps;
1773 func_info.advertising = link_op.advertised_caps;
1774 func_info.speed = link_op.speed;
1775 func_info.duplex = link_op.duplex;
1776 func_info.port = qede->pci_func & 0x1;
1777 func_info.autoneg = link_op.autoneg;
1779 (void) memcpy(data1->uabc, &func_info, sizeof(qede_func_info_t));
1781 return (0);
1784 static int
1785 qede_do_ioctl(qede_t *qede, queue_t *q, mblk_t *mp)
1787 qede_ioctl_data_t *up_data;
1788 qede_driver_info_t driver_info;
1789 struct ecore_dev *edev = &qede->edev;
1790 struct ecore_hwfn *hwfn;
1791 struct ecore_ptt *ptt = NULL;
1792 struct mcp_file_att attrib;
1793 uint32_t flash_size;
1794 uint32_t mcp_resp, mcp_param, txn_size;
1795 uint32_t cmd, size, ret = 0;
1796 uint64_t off;
1797 int * up_data1;
1798 void * ptr;
1799 mblk_t *mp1 = mp;
1800 char mac_addr[32];
1802 up_data = (qede_ioctl_data_t *)(mp->b_cont->b_rptr);
1804 cmd = up_data->cmd;
1805 off = up_data->off;
1806 size = up_data->size;
1808 switch (cmd) {
1809 case QEDE_DRV_INFO:
1810 hwfn = &edev->hwfns[0];
1811 ptt = ecore_ptt_acquire(hwfn);
1813 snprintf(driver_info.drv_name, MAX_QEDE_NAME_LEN, "%s", "qede");
1814 snprintf(driver_info.drv_version, QEDE_STR_SIZE,
1815 "v:%s", qede->version);
1816 snprintf(driver_info.mfw_version, QEDE_STR_SIZE,
1817 "%s", qede->versionMFW);
1818 snprintf(driver_info.stormfw_version, QEDE_STR_SIZE,
1819 "%s", qede->versionFW);
1820 snprintf(driver_info.bus_info, QEDE_STR_SIZE,
1821 "%s", qede->bus_dev_func);
1825 * calling ecore_mcp_nvm_rd_cmd to find the flash length, i
1826 * 0x08 is equivalent of NVM_TYPE_MFW_TRACE1
1828 ecore_mcp_get_flash_size(hwfn, ptt, &flash_size);
1829 driver_info.eeprom_dump_len = flash_size;
1830 (void) memcpy(up_data->uabc, &driver_info,
1831 sizeof (qede_driver_info_t));
1832 up_data->size = sizeof (qede_driver_info_t);
1834 ecore_ptt_release(hwfn, ptt);
1835 break;
1837 case QEDE_RD_PCICFG:
1838 ret = qede_ioctl_pcicfg_rd(qede, off, up_data->uabc, size);
1839 break;
1841 case QEDE_WR_PCICFG:
1842 ret = qede_ioctl_pcicfg_wr(qede, off, up_data, size);
1843 break;
1845 case QEDE_RW_REG:
1846 ret = qede_ioctl_rd_wr_reg(qede, (void *)up_data);
1847 break;
1849 case QEDE_RW_NVRAM:
1850 ret = qede_ioctl_rd_wr_nvram(qede, mp1);
1851 break;
1853 case QEDE_FUNC_INFO:
1854 ret = qede_get_func_info(qede, (void *)up_data);
1855 break;
1857 case QEDE_MAC_ADDR:
1858 snprintf(mac_addr, sizeof(mac_addr),
1859 "%02x:%02x:%02x:%02x:%02x:%02x",
1860 qede->ether_addr[0], qede->ether_addr[1],
1861 qede->ether_addr[2], qede->ether_addr[3],
1862 qede->ether_addr[4], qede->ether_addr[5]);
1863 (void) memcpy(up_data->uabc, &mac_addr, sizeof(mac_addr));
1864 break;
1867 //if (cmd == QEDE_RW_NVRAM) {
1868 // miocack (q, mp, (sizeof(qede_ioctl_data_t)), 0);
1869 // return IOC_REPLY;
1871 miocack (q, mp, (sizeof(qede_ioctl_data_t)), ret);
1872 //miocack (q, mp, 0, ret);
1873 return (IOC_REPLY);
1876 static void
1877 qede_ioctl(qede_t *qede, int cmd, queue_t *q, mblk_t *mp)
1879 void *ptr;
1881 switch(cmd) {
1882 case QEDE_CMD:
1883 (void) qede_do_ioctl(qede, q, mp);
1884 break;
1885 default :
1886 cmn_err(CE_WARN, "qede ioctl command %x not supported\n", cmd);
1887 break;
1889 return;
1891 enum ioc_reply
1892 qede_loopback_ioctl(qede_t *qede, queue_t *wq, mblk_t *mp,
1893 struct iocblk *iocp)
1895 lb_info_sz_t *lb_info_size;
1896 lb_property_t *lb_prop;
1897 uint32_t *lb_mode;
1898 int cmd;
1901 * Validate format of ioctl
1903 if(mp->b_cont == NULL) {
1904 return IOC_INVAL;
1907 cmd = iocp->ioc_cmd;
1909 switch(cmd) {
1910 default:
1911 qede_print("!%s(%d): unknown ioctl command %x\n",
1912 __func__, qede->instance, cmd);
1913 return IOC_INVAL;
1914 case LB_GET_INFO_SIZE:
1915 if (iocp->ioc_count != sizeof(lb_info_sz_t)) {
1916 qede_info(qede, "error: ioc_count %d, sizeof %d",
1917 iocp->ioc_count, sizeof(lb_info_sz_t));
1918 return IOC_INVAL;
1920 lb_info_size = (void *)mp->b_cont->b_rptr;
1921 *lb_info_size = sizeof(loopmodes);
1922 return IOC_REPLY;
1923 case LB_GET_INFO:
1924 if (iocp->ioc_count != sizeof (loopmodes)) {
1925 qede_info(qede, "error: iocp->ioc_count %d, sizepof %d",
1926 iocp->ioc_count, sizeof (loopmodes));
1927 return (IOC_INVAL);
1929 lb_prop = (void *)mp->b_cont->b_rptr;
1930 bcopy(loopmodes, lb_prop, sizeof (loopmodes));
1931 return IOC_REPLY;
1932 case LB_GET_MODE:
1933 if (iocp->ioc_count != sizeof (uint32_t)) {
1934 qede_info(qede, "iocp->ioc_count %d, sizeof : %d\n",
1935 iocp->ioc_count, sizeof (uint32_t));
1936 return (IOC_INVAL);
1938 lb_mode = (void *)mp->b_cont->b_rptr;
1939 *lb_mode = qede->loop_back_mode;
1940 return IOC_REPLY;
1941 case LB_SET_MODE:
1942 if (iocp->ioc_count != sizeof (uint32_t)) {
1943 qede_info(qede, "iocp->ioc_count %d, sizeof : %d\n",
1944 iocp->ioc_count, sizeof (uint32_t));
1945 return (IOC_INVAL);
1947 lb_mode = (void *)mp->b_cont->b_rptr;
1948 return (qede_set_loopback_mode(qede,*lb_mode));
1952 static void
1953 qede_mac_ioctl(void * arg,
1954 queue_t * wq,
1955 mblk_t * mp)
1957 int err, cmd;
1958 qede_t * qede = (qede_t *)arg;
1959 struct iocblk *iocp = (struct iocblk *) (uintptr_t)mp->b_rptr;
1960 enum ioc_reply status = IOC_DONE;
1961 boolean_t need_privilege = B_TRUE;
1963 iocp->ioc_error = 0;
1964 cmd = iocp->ioc_cmd;
1966 mutex_enter(&qede->drv_lock);
1967 if ((qede->qede_state == QEDE_STATE_SUSPENDING) ||
1968 (qede->qede_state == QEDE_STATE_SUSPENDED)) {
1969 mutex_exit(&qede->drv_lock);
1970 miocnak(wq, mp, 0, EINVAL);
1971 return;
1974 switch(cmd) {
1975 case QEDE_CMD:
1976 break;
1977 case LB_GET_INFO_SIZE:
1978 case LB_GET_INFO:
1979 case LB_GET_MODE:
1980 need_privilege = B_FALSE;
1981 case LB_SET_MODE:
1982 break;
1983 default:
1984 qede_print("!%s(%d) unknown ioctl command %x\n",
1985 __func__, qede->instance, cmd);
1986 miocnak(wq, mp, 0, EINVAL);
1987 mutex_exit(&qede->drv_lock);
1988 return;
1991 if(need_privilege) {
1992 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1993 if(err){
1994 qede_info(qede, "secpolicy() failed");
1995 miocnak(wq, mp, 0, err);
1996 mutex_exit(&qede->drv_lock);
1997 return;
2001 switch (cmd) {
2002 default:
2003 qede_print("!%s(%d) : unknown ioctl command %x\n",
2004 __func__, qede->instance, cmd);
2005 status = IOC_INVAL;
2006 mutex_exit(&qede->drv_lock);
2007 return;
2008 case LB_GET_INFO_SIZE:
2009 case LB_GET_INFO:
2010 case LB_GET_MODE:
2011 case LB_SET_MODE:
2012 status = qede_loopback_ioctl(qede, wq, mp, iocp);
2013 break;
2014 case QEDE_CMD:
2015 qede_ioctl(qede, cmd, wq, mp);
2016 status = IOC_DONE;
2017 break;
2020 switch(status){
2021 default:
2022 qede_print("!%s(%d) : invalid status from ioctl",
2023 __func__,qede->instance);
2024 break;
2025 case IOC_DONE:
2027 * OK, Reply already sent
2030 break;
2031 case IOC_REPLY:
2032 mp->b_datap->db_type = iocp->ioc_error == 0 ?
2033 M_IOCACK : M_IOCNAK;
2034 qreply(wq, mp);
2035 break;
2036 case IOC_INVAL:
2037 mutex_exit(&qede->drv_lock);
2038 //miocack(wq, mp, 0, 0);
2039 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2040 EINVAL : iocp->ioc_error);
2041 return;
2043 mutex_exit(&qede->drv_lock);
2046 extern ddi_dma_attr_t qede_buf2k_dma_attr_txbuf;
2047 extern ddi_dma_attr_t qede_dma_attr_rxbuf;
2048 extern ddi_dma_attr_t qede_dma_attr_desc;
2050 static boolean_t
2051 qede_mac_get_capability(void *arg,
2052 mac_capab_t capability,
2053 void * cap_data)
2055 qede_t * qede = (qede_t *)arg;
2056 uint32_t *txflags = cap_data;
2057 boolean_t ret = B_FALSE;
2059 switch (capability) {
2060 case MAC_CAPAB_HCKSUM: {
2061 u32 *tx_flags = cap_data;
2063 * Check if checksum is enabled on
2064 * tx and advertise the cksum capab
2065 * to mac layer accordingly. On Rx
2066 * side checksummed packets are
2067 * reveiced anyway
2069 qede_info(qede, "%s tx checksum offload",
2070 (qede->checksum == DEFAULT_CKSUM_OFFLOAD) ?
2071 "Enabling":
2072 "Disabling");
2074 if (qede->checksum != DEFAULT_CKSUM_OFFLOAD) {
2075 ret = B_FALSE;
2076 break;
2079 * Hardware does not support ICMPv6 checksumming. Right now the
2080 * GLDv3 doesn't provide us a way to specify that we don't
2081 * support that. As such, we cannot indicate
2082 * HCKSUM_INET_FULL_V6.
2085 *tx_flags = HCKSUM_INET_FULL_V4 |
2086 HCKSUM_IPHDRCKSUM;
2087 ret = B_TRUE;
2088 break;
2090 case MAC_CAPAB_LSO: {
2091 mac_capab_lso_t *cap_lso = (mac_capab_lso_t *)cap_data;
2093 qede_info(qede, "%s large segmentation offload",
2094 qede->lso_enable ? "Enabling": "Disabling");
2095 if (qede->lso_enable) {
2096 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2097 cap_lso->lso_basic_tcp_ipv4.lso_max = QEDE_LSO_MAXLEN;
2098 ret = B_TRUE;
2100 break;
2102 case MAC_CAPAB_RINGS: {
2103 #ifndef NO_CROSSBOW
2104 mac_capab_rings_t *cap_rings = cap_data;
2105 #ifndef ILLUMOS
2106 cap_rings->mr_version = MAC_RINGS_VERSION_1;
2107 #endif
2109 switch (cap_rings->mr_type) {
2110 case MAC_RING_TYPE_RX:
2111 #ifndef ILLUMOS
2112 cap_rings->mr_flags = MAC_RINGS_VLAN_TRANSPARENT;
2113 #endif
2114 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2115 //cap_rings->mr_rnum = 1; /* qede variable */
2116 cap_rings->mr_rnum = qede->num_fp; /* qede variable */
2117 cap_rings->mr_gnum = 1;
2118 cap_rings->mr_rget = qede_fill_ring;
2119 cap_rings->mr_gget = qede_fill_group;
2120 cap_rings->mr_gaddring = NULL;
2121 cap_rings->mr_gremring = NULL;
2122 #ifndef ILLUMOS
2123 cap_rings->mr_ggetringtc = NULL;
2124 #endif
2125 ret = B_TRUE;
2126 break;
2127 case MAC_RING_TYPE_TX:
2128 #ifndef ILLUMOS
2129 cap_rings->mr_flags = MAC_RINGS_VLAN_TRANSPARENT;
2130 #endif
2131 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2132 //cap_rings->mr_rnum = 1;
2133 cap_rings->mr_rnum = qede->num_fp;
2134 cap_rings->mr_gnum = 0;
2135 cap_rings->mr_rget = qede_fill_ring;
2136 cap_rings->mr_gget = qede_fill_group;
2137 cap_rings->mr_gaddring = NULL;
2138 cap_rings->mr_gremring = NULL;
2139 #ifndef ILLUMOS
2140 cap_rings->mr_ggetringtc = NULL;
2141 #endif
2142 ret = B_TRUE;
2143 break;
2144 default:
2145 ret = B_FALSE;
2146 break;
2148 #endif
2149 break; /* CASE MAC_CAPAB_RINGS */
2151 #ifdef ILLUMOS
2152 case MAC_CAPAB_TRANSCEIVER: {
2153 mac_capab_transceiver_t *mct = cap_data;
2155 mct->mct_flags = 0;
2156 mct->mct_ntransceivers = qede->edev.num_hwfns;
2157 mct->mct_info = qede_transceiver_info;
2158 mct->mct_read = qede_transceiver_read;
2160 ret = B_TRUE;
2161 break;
2163 #endif
2164 default:
2165 break;
2168 return (ret);
2172 qede_configure_link(qede_t *qede, bool op);
2174 static int
2175 qede_mac_set_property(void * arg,
2176 const char * pr_name,
2177 mac_prop_id_t pr_num,
2178 uint_t pr_valsize,
2179 const void * pr_val)
2181 qede_t * qede = (qede_t *)arg;
2182 struct ecore_mcp_link_params *link_params;
2183 struct ecore_dev *edev = &qede->edev;
2184 struct ecore_hwfn *hwfn;
2185 int ret_val = 0, i;
2186 uint32_t option;
2188 mutex_enter(&qede->gld_lock);
2189 switch (pr_num)
2191 case MAC_PROP_MTU:
2192 bcopy(pr_val, &option, sizeof (option));
2194 if(option == qede->mtu) {
2195 ret_val = 0;
2196 break;
2198 if ((option != DEFAULT_JUMBO_MTU) &&
2199 (option != DEFAULT_MTU)) {
2200 ret_val = EINVAL;
2201 break;
2203 if(qede->qede_state == QEDE_STATE_STARTED) {
2204 ret_val = EBUSY;
2205 break;
2208 ret_val = mac_maxsdu_update(qede->mac_handle, qede->mtu);
2209 if (ret_val == 0) {
2211 qede->mtu = option;
2212 if (option == DEFAULT_JUMBO_MTU) {
2213 qede->jumbo_enable = B_TRUE;
2214 } else {
2215 qede->jumbo_enable = B_FALSE;
2218 hwfn = ECORE_LEADING_HWFN(edev);
2219 hwfn->hw_info.mtu = qede->mtu;
2220 ret_val = ecore_mcp_ov_update_mtu(hwfn,
2221 hwfn->p_main_ptt,
2222 hwfn->hw_info.mtu);
2223 if (ret_val != ECORE_SUCCESS) {
2224 qede_print("!%s(%d): MTU change %d option %d"
2225 "FAILED",
2226 __func__,qede->instance, qede->mtu, option);
2227 break;
2229 qede_print("!%s(%d): MTU changed %d MTU option"
2230 " %d hwfn %d",
2231 __func__,qede->instance, qede->mtu,
2232 option, hwfn->hw_info.mtu);
2234 break;
2236 case MAC_PROP_EN_10GFDX_CAP:
2237 hwfn = &edev->hwfns[0];
2238 link_params = ecore_mcp_get_link_params(hwfn);
2239 if (*(uint8_t *) pr_val) {
2240 link_params->speed.autoneg = 0;
2241 link_params->speed.forced_speed = 10000;
2242 link_params->speed.advertised_speeds =
2243 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2244 qede->forced_speed_10G = *(uint8_t *)pr_val;
2246 else {
2247 memcpy(link_params,
2248 &qede->link_input_params.default_link_params,
2249 sizeof (struct ecore_mcp_link_params));
2250 qede->forced_speed_10G = *(uint8_t *)pr_val;
2252 if (qede->qede_state == QEDE_STATE_STARTED) {
2253 qede_configure_link(qede,1);
2254 } else {
2255 mutex_exit(&qede->gld_lock);
2256 return (0);
2258 break;
2259 default:
2260 ret_val = ENOTSUP;
2261 break;
2263 mutex_exit(&qede->gld_lock);
2264 return (ret_val);
2267 static void
2268 qede_mac_stop(void *arg)
2270 qede_t *qede = (qede_t *)arg;
2271 int status;
2273 qede_print("!%s(%d): called",
2274 __func__,qede->instance);
2275 mutex_enter(&qede->drv_lock);
2276 status = qede_stop(qede);
2277 if (status != DDI_SUCCESS) {
2278 qede_print("!%s(%d): qede_stop "
2279 "FAILED",
2280 __func__,qede->instance);
2283 mac_link_update(qede->mac_handle, LINK_STATE_UNKNOWN);
2284 mutex_exit(&qede->drv_lock);
2287 static int
2288 qede_mac_start(void *arg)
2290 qede_t *qede = (qede_t *)arg;
2291 int status;
2293 qede_print("!%s(%d): called", __func__,qede->instance);
2294 if (!mutex_tryenter(&qede->drv_lock)) {
2295 return (EAGAIN);
2298 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
2299 mutex_exit(&qede->drv_lock);
2300 return (ECANCELED);
2303 status = qede_start(qede);
2304 if (status != DDI_SUCCESS) {
2305 mutex_exit(&qede->drv_lock);
2306 return (EIO);
2309 mutex_exit(&qede->drv_lock);
2311 #ifdef DBLK_DMA_PREMAP
2312 qede->pm_handle = mac_pmh_tx_get(qede->mac_handle);
2313 #endif
2314 return (0);
2317 static int
2318 qede_mac_get_property(void *arg,
2319 const char *pr_name,
2320 mac_prop_id_t pr_num,
2321 uint_t pr_valsize,
2322 void *pr_val)
2324 qede_t *qede = (qede_t *)arg;
2325 struct ecore_dev *edev = &qede->edev;
2326 link_state_t link_state;
2327 link_duplex_t link_duplex;
2328 uint64_t link_speed;
2329 link_flowctrl_t link_flowctrl;
2330 struct qede_link_cfg link_cfg;
2331 qede_link_cfg_t *hw_cfg = &qede->hwinit;
2332 int ret_val = 0;
2334 memset(&link_cfg, 0, sizeof (struct qede_link_cfg));
2335 qede_get_link_info(&edev->hwfns[0], &link_cfg);
2339 switch (pr_num)
2341 case MAC_PROP_MTU:
2343 ASSERT(pr_valsize >= sizeof(uint32_t));
2344 bcopy(&qede->mtu, pr_val, sizeof(uint32_t));
2345 break;
2347 case MAC_PROP_DUPLEX:
2349 ASSERT(pr_valsize >= sizeof(link_duplex_t));
2350 link_duplex = (qede->props.link_duplex) ?
2351 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
2352 bcopy(&link_duplex, pr_val, sizeof(link_duplex_t));
2353 break;
2355 case MAC_PROP_SPEED:
2357 ASSERT(pr_valsize >= sizeof(link_speed));
2359 link_speed = (qede->props.link_speed * 1000000ULL);
2360 bcopy(&link_speed, pr_val, sizeof(link_speed));
2361 break;
2363 case MAC_PROP_STATUS:
2365 ASSERT(pr_valsize >= sizeof(link_state_t));
2367 link_state = (qede->params.link_state) ?
2368 LINK_STATE_UP : LINK_STATE_DOWN;
2369 bcopy(&link_state, pr_val, sizeof(link_state_t));
2370 qede_info(qede, "mac_prop_status %d\n", link_state);
2371 break;
2373 case MAC_PROP_AUTONEG:
2375 *(uint8_t *)pr_val = link_cfg.autoneg;
2376 break;
2378 case MAC_PROP_FLOWCTRL:
2380 ASSERT(pr_valsize >= sizeof(link_flowctrl_t));
2383 * illumos does not have the notion of LINK_FLOWCTRL_AUTO at this time.
2385 #ifndef ILLUMOS
2386 if (link_cfg.pause_cfg & QEDE_LINK_PAUSE_AUTONEG_ENABLE) {
2387 link_flowctrl = LINK_FLOWCTRL_AUTO;
2389 #endif
2391 if (!(link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2392 !(link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2393 link_flowctrl = LINK_FLOWCTRL_NONE;
2395 if ((link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2396 !(link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2397 link_flowctrl = LINK_FLOWCTRL_RX;
2399 if (!(link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2400 (link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2401 link_flowctrl = LINK_FLOWCTRL_TX;
2403 if ((link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2404 (link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2405 link_flowctrl = LINK_FLOWCTRL_BI;
2408 bcopy(&link_flowctrl, pr_val, sizeof (link_flowctrl_t));
2409 break;
2411 case MAC_PROP_ADV_10GFDX_CAP:
2412 *(uint8_t *)pr_val = link_cfg.adv_capab.param_10000fdx;
2413 break;
2415 case MAC_PROP_EN_10GFDX_CAP:
2416 *(uint8_t *)pr_val = qede->forced_speed_10G;
2417 break;
2419 case MAC_PROP_PRIVATE:
2420 default:
2421 return (ENOTSUP);
2425 return (0);
2428 static void
2429 qede_mac_property_info(void *arg,
2430 const char *pr_name,
2431 mac_prop_id_t pr_num,
2432 mac_prop_info_handle_t prh)
2434 qede_t *qede = (qede_t *)arg;
2435 qede_link_props_t *def_cfg = &qede_def_link_props;
2436 link_flowctrl_t link_flowctrl;
2439 switch (pr_num)
2442 case MAC_PROP_STATUS:
2443 case MAC_PROP_SPEED:
2444 case MAC_PROP_DUPLEX:
2445 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2446 break;
2448 case MAC_PROP_MTU:
2450 mac_prop_info_set_range_uint32(prh,
2451 MIN_MTU,
2452 MAX_MTU);
2453 break;
2455 case MAC_PROP_AUTONEG:
2457 mac_prop_info_set_default_uint8(prh, def_cfg->autoneg);
2458 break;
2460 case MAC_PROP_FLOWCTRL:
2462 if (!def_cfg->pause) {
2463 link_flowctrl = LINK_FLOWCTRL_NONE;
2464 } else {
2465 link_flowctrl = LINK_FLOWCTRL_BI;
2468 mac_prop_info_set_default_link_flowctrl(prh, link_flowctrl);
2469 break;
2471 case MAC_PROP_EN_10GFDX_CAP:
2472 mac_prop_info_set_perm(prh, MAC_PROP_PERM_RW);
2473 break;
2475 case MAC_PROP_ADV_10GFDX_CAP:
2476 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2477 break;
2479 default:
2480 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2481 break;
2486 static mac_callbacks_t qede_callbacks =
2489 MC_IOCTL
2490 /* | MC_RESOURCES */
2491 | MC_SETPROP
2492 | MC_GETPROP
2493 | MC_PROPINFO
2494 | MC_GETCAPAB
2496 qede_mac_stats,
2497 qede_mac_start,
2498 qede_mac_stop,
2499 qede_mac_promiscuous,
2500 qede_mac_multicast,
2501 NULL,
2502 #ifndef NO_CROSSBOW
2503 NULL,
2504 #else
2505 qede_mac_tx,
2506 #endif
2507 NULL, /* qede_mac_resources, */
2508 qede_mac_ioctl,
2509 qede_mac_get_capability,
2510 NULL,
2511 NULL,
2512 qede_mac_set_property,
2513 qede_mac_get_property,
2514 #ifdef MC_PROPINFO
2515 qede_mac_property_info
2516 #endif
2519 boolean_t
2520 qede_gld_init(qede_t *qede)
2522 int status, ret;
2523 mac_register_t *macp;
2525 macp = mac_alloc(MAC_VERSION);
2526 if (macp == NULL) {
2527 cmn_err(CE_NOTE, "%s: mac_alloc() failed\n", __func__);
2528 return (B_FALSE);
2531 macp->m_driver = qede;
2532 macp->m_dip = qede->dip;
2533 macp->m_instance = qede->instance;
2534 macp->m_priv_props = NULL;
2535 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2536 macp->m_src_addr = qede->ether_addr;
2537 macp->m_callbacks = &qede_callbacks;
2538 macp->m_min_sdu = 0;
2539 macp->m_max_sdu = qede->mtu;
2540 macp->m_margin = VLAN_TAGSZ;
2541 #ifdef ILLUMOS
2542 macp->m_v12n = MAC_VIRT_LEVEL1;
2543 #endif
2545 status = mac_register(macp, &qede->mac_handle);
2546 if (status != 0) {
2547 cmn_err(CE_NOTE, "%s: mac_register() failed\n", __func__);
2550 mac_free(macp);
2551 if (status == 0) {
2552 return (B_TRUE);
2554 return (B_FALSE);
2557 boolean_t qede_gld_fini(qede_t * qede)
2559 return (B_TRUE);
2563 void qede_link_update(qede_t * qede,
2564 link_state_t state)
2566 mac_link_update(qede->mac_handle, state);