2 * Copyright (C) 2013 Emulex
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * Contact Information:
32 * freebsd-drivers@emulex.com
36 * Costa Mesa, CA 92626
40 /* $FreeBSD: src/sys/dev/oce/oce_if.c,v 1.14 2013/07/07 00:30:13 svnexp Exp $ */
42 #include "opt_inet6.h"
48 /* Driver entry points prototypes */
49 static int oce_probe(device_t dev
);
50 static int oce_attach(device_t dev
);
51 static int oce_detach(device_t dev
);
52 static int oce_shutdown(device_t dev
);
53 static int oce_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
);
54 static void oce_init(void *xsc
);
55 #if 0 /* XXX swildner: MULTIQUEUE */
56 static int oce_multiq_start(struct ifnet
*ifp
, struct mbuf
*m
);
57 static void oce_multiq_flush(struct ifnet
*ifp
);
60 /* Driver interrupt routines protypes */
61 static void oce_intr(void *arg
, int pending
);
62 static int oce_setup_intr(POCE_SOFTC sc
);
63 static void oce_fast_isr(void *arg
);
64 static int oce_alloc_intr(POCE_SOFTC sc
, int vector
,
65 void (*isr
) (void *arg
, int pending
));
67 /* Media callbacks prototypes */
68 static void oce_media_status(struct ifnet
*ifp
, struct ifmediareq
*req
);
69 static int oce_media_change(struct ifnet
*ifp
);
71 /* Transmit routines prototypes */
72 static int oce_tx(POCE_SOFTC sc
, struct mbuf
**mpp
, int wq_index
);
73 static void oce_tx_restart(POCE_SOFTC sc
, struct oce_wq
*wq
);
74 static void oce_tx_complete(struct oce_wq
*wq
, uint32_t wqe_idx
,
76 #if 0 /* XXX swildner: MULTIQUEUE */
77 static int oce_multiq_transmit(struct ifnet
*ifp
, struct mbuf
*m
,
81 /* Receive routines prototypes */
82 static void oce_discard_rx_comp(struct oce_rq
*rq
, struct oce_nic_rx_cqe
*cqe
);
83 #if 0 /* XXX swildner: ETHER_VTAG */
84 static int oce_cqe_vtp_valid(POCE_SOFTC sc
, struct oce_nic_rx_cqe
*cqe
);
86 static int oce_cqe_portid_valid(POCE_SOFTC sc
, struct oce_nic_rx_cqe
*cqe
);
87 static void oce_rx(struct oce_rq
*rq
, uint32_t rqe_idx
,
88 struct oce_nic_rx_cqe
*cqe
);
90 /* Helper function prototypes in this file */
91 static int oce_attach_ifp(POCE_SOFTC sc
);
92 static void oce_add_vlan(void *arg
, struct ifnet
*ifp
, uint16_t vtag
);
93 static void oce_del_vlan(void *arg
, struct ifnet
*ifp
, uint16_t vtag
);
94 static int oce_vid_config(POCE_SOFTC sc
);
95 static void oce_mac_addr_set(POCE_SOFTC sc
);
96 static int oce_handle_passthrough(struct ifnet
*ifp
, caddr_t data
);
97 static void oce_local_timer(void *arg
);
98 static void oce_if_deactivate(POCE_SOFTC sc
);
99 static void oce_if_activate(POCE_SOFTC sc
);
100 static void setup_max_queues_want(POCE_SOFTC sc
);
101 static void update_queues_got(POCE_SOFTC sc
);
102 static void process_link_state(POCE_SOFTC sc
,
103 struct oce_async_cqe_link_state
*acqe
);
104 static int oce_tx_asic_stall_verify(POCE_SOFTC sc
, struct mbuf
*m
);
105 static void oce_get_config(POCE_SOFTC sc
);
106 static struct mbuf
*oce_insert_vlan_tag(POCE_SOFTC sc
, struct mbuf
*m
, boolean_t
*complete
);
109 #if defined(INET6) || defined(INET)
110 #if 0 /* XXX swildner: LRO */
111 static int oce_init_lro(POCE_SOFTC sc
);
112 static void oce_rx_flush_lro(struct oce_rq
*rq
);
114 static struct mbuf
* oce_tso_setup(POCE_SOFTC sc
, struct mbuf
**mpp
);
117 static device_method_t oce_dispatch
[] = {
118 DEVMETHOD(device_probe
, oce_probe
),
119 DEVMETHOD(device_attach
, oce_attach
),
120 DEVMETHOD(device_detach
, oce_detach
),
121 DEVMETHOD(device_shutdown
, oce_shutdown
),
126 static driver_t oce_driver
= {
131 static devclass_t oce_devclass
;
134 DRIVER_MODULE(oce
, pci
, oce_driver
, oce_devclass
, NULL
, NULL
);
135 MODULE_DEPEND(oce
, pci
, 1, 1, 1);
136 MODULE_DEPEND(oce
, ether
, 1, 1, 1);
137 MODULE_VERSION(oce
, 1);
141 const char component_revision
[32] = {"///" COMPONENT_REVISION
"///"};
143 /* Module capabilites and parameters */
144 uint32_t oce_max_rsp_handled
= OCE_MAX_RSP_HANDLED
;
145 #if 0 /* XXX swildner: RSS */
146 uint32_t oce_enable_rss
= OCE_MODCAP_RSS
;
148 uint32_t oce_enable_rss
= 0;
152 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled
);
153 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss
);
156 /* Supported devices table */
157 static uint32_t supportedDevices
[] = {
158 (PCI_VENDOR_SERVERENGINES
<< 16) | PCI_PRODUCT_BE2
,
159 (PCI_VENDOR_SERVERENGINES
<< 16) | PCI_PRODUCT_BE3
,
160 (PCI_VENDOR_EMULEX
<< 16) | PCI_PRODUCT_BE3
,
161 (PCI_VENDOR_EMULEX
<< 16) | PCI_PRODUCT_XE201
,
162 (PCI_VENDOR_EMULEX
<< 16) | PCI_PRODUCT_XE201_VF
,
163 (PCI_VENDOR_EMULEX
<< 16) | PCI_PRODUCT_SH
169 /*****************************************************************************
170 * Driver entry points functions *
171 *****************************************************************************/
174 oce_probe(device_t dev
)
182 sc
= device_get_softc(dev
);
183 bzero(sc
, sizeof(OCE_SOFTC
));
186 vendor
= pci_get_vendor(dev
);
187 device
= pci_get_device(dev
);
189 for (i
= 0; i
< (sizeof(supportedDevices
) / sizeof(uint32_t)); i
++) {
190 if (vendor
== ((supportedDevices
[i
] >> 16) & 0xffff)) {
191 if (device
== (supportedDevices
[i
] & 0xffff)) {
192 ksprintf(str
, "%s:%s", "Emulex CNA NIC function",
194 device_set_desc_copy(dev
, str
);
197 case PCI_PRODUCT_BE2
:
198 sc
->flags
|= OCE_FLAGS_BE2
;
200 case PCI_PRODUCT_BE3
:
201 sc
->flags
|= OCE_FLAGS_BE3
;
203 case PCI_PRODUCT_XE201
:
204 case PCI_PRODUCT_XE201_VF
:
205 sc
->flags
|= OCE_FLAGS_XE201
;
208 sc
->flags
|= OCE_FLAGS_SH
;
213 return BUS_PROBE_DEFAULT
;
223 oce_attach(device_t dev
)
228 sc
= device_get_softc(dev
);
230 rc
= oce_hw_pci_alloc(sc
);
234 sc
->tx_ring_size
= OCE_TX_RING_SIZE
;
235 sc
->rx_ring_size
= OCE_RX_RING_SIZE
;
236 sc
->rq_frag_size
= OCE_RQ_BUF_SIZE
;
237 sc
->flow_control
= OCE_DEFAULT_FLOW_CONTROL
;
238 sc
->promisc
= OCE_DEFAULT_PROMISCUOUS
;
240 LOCK_CREATE(&sc
->bmbx_lock
, "Mailbox_lock");
241 LOCK_CREATE(&sc
->dev_lock
, "Device_lock");
243 /* initialise the hardware */
244 rc
= oce_hw_init(sc
);
250 setup_max_queues_want(sc
);
252 rc
= oce_setup_intr(sc
);
256 rc
= oce_queue_init_all(sc
);
260 rc
= oce_attach_ifp(sc
);
264 #if defined(INET6) || defined(INET)
265 #if 0 /* XXX swildner: LRO */
266 rc
= oce_init_lro(sc
);
272 rc
= oce_hw_start(sc
);
276 sc
->vlan_attach
= EVENTHANDLER_REGISTER(vlan_config
,
277 oce_add_vlan
, sc
, EVENTHANDLER_PRI_FIRST
);
278 sc
->vlan_detach
= EVENTHANDLER_REGISTER(vlan_unconfig
,
279 oce_del_vlan
, sc
, EVENTHANDLER_PRI_FIRST
);
281 rc
= oce_stats_init(sc
);
287 callout_init_mp(&sc
->timer
);
288 callout_reset(&sc
->timer
, 2 * hz
, oce_local_timer
, sc
);
294 EVENTHANDLER_DEREGISTER(vlan_config
, sc
->vlan_attach
);
296 EVENTHANDLER_DEREGISTER(vlan_unconfig
, sc
->vlan_detach
);
297 oce_hw_intr_disable(sc
);
299 #if defined(INET6) || defined(INET)
300 #if 0 /* XXX swildner: LRO */
305 ether_ifdetach(sc
->ifp
);
308 oce_queue_release_all(sc
);
312 oce_dma_free(sc
, &sc
->bsmbx
);
315 LOCK_DESTROY(&sc
->dev_lock
);
316 LOCK_DESTROY(&sc
->bmbx_lock
);
323 oce_detach(device_t dev
)
325 POCE_SOFTC sc
= device_get_softc(dev
);
328 oce_if_deactivate(sc
);
329 UNLOCK(&sc
->dev_lock
);
331 callout_terminate(&sc
->timer
);
333 if (sc
->vlan_attach
!= NULL
)
334 EVENTHANDLER_DEREGISTER(vlan_config
, sc
->vlan_attach
);
335 if (sc
->vlan_detach
!= NULL
)
336 EVENTHANDLER_DEREGISTER(vlan_unconfig
, sc
->vlan_detach
);
338 ether_ifdetach(sc
->ifp
);
344 bus_generic_detach(dev
);
350 oce_shutdown(device_t dev
)
354 rc
= oce_detach(dev
);
361 oce_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
363 struct ifreq
*ifr
= (struct ifreq
*)data
;
364 POCE_SOFTC sc
= ifp
->if_softc
;
371 rc
= ifmedia_ioctl(ifp
, ifr
, &sc
->media
, command
);
375 if (ifr
->ifr_mtu
> OCE_MAX_MTU
)
378 ifp
->if_mtu
= ifr
->ifr_mtu
;
382 if (ifp
->if_flags
& IFF_UP
) {
383 if (!(ifp
->if_flags
& IFF_RUNNING
)) {
384 sc
->ifp
->if_flags
|= IFF_RUNNING
;
387 device_printf(sc
->dev
, "Interface Up\n");
391 sc
->ifp
->if_flags
&= ~IFF_RUNNING
;
392 ifq_clr_oactive(&ifp
->if_snd
);
393 oce_if_deactivate(sc
);
395 UNLOCK(&sc
->dev_lock
);
397 device_printf(sc
->dev
, "Interface Down\n");
400 if ((ifp
->if_flags
& IFF_PROMISC
) && !sc
->promisc
) {
402 oce_rxf_set_promiscuous(sc
, sc
->promisc
);
403 } else if (!(ifp
->if_flags
& IFF_PROMISC
) && sc
->promisc
) {
405 oce_rxf_set_promiscuous(sc
, sc
->promisc
);
412 rc
= oce_hw_update_multicast(sc
);
414 device_printf(sc
->dev
,
415 "Update multicast address failed\n");
419 u
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
421 if (u
& IFCAP_TXCSUM
) {
422 ifp
->if_capenable
^= IFCAP_TXCSUM
;
423 ifp
->if_hwassist
^= (CSUM_TCP
| CSUM_UDP
| CSUM_IP
);
425 if (IFCAP_TSO
& ifp
->if_capenable
&&
426 !(IFCAP_TXCSUM
& ifp
->if_capenable
)) {
427 ifp
->if_capenable
&= ~IFCAP_TSO
;
428 ifp
->if_hwassist
&= ~CSUM_TSO
;
430 "TSO disabled due to -txcsum.\n");
434 if (u
& IFCAP_RXCSUM
)
435 ifp
->if_capenable
^= IFCAP_RXCSUM
;
437 if (u
& IFCAP_TSO4
) {
438 ifp
->if_capenable
^= IFCAP_TSO4
;
440 if (IFCAP_TSO
& ifp
->if_capenable
) {
441 if (IFCAP_TXCSUM
& ifp
->if_capenable
)
442 ifp
->if_hwassist
|= CSUM_TSO
;
444 ifp
->if_capenable
&= ~IFCAP_TSO
;
445 ifp
->if_hwassist
&= ~CSUM_TSO
;
447 "Enable txcsum first.\n");
451 ifp
->if_hwassist
&= ~CSUM_TSO
;
454 if (u
& IFCAP_VLAN_HWTAGGING
)
455 ifp
->if_capenable
^= IFCAP_VLAN_HWTAGGING
;
457 #if 0 /* XXX swildner: VLAN_HWFILTER */
458 if (u
& IFCAP_VLAN_HWFILTER
) {
459 ifp
->if_capenable
^= IFCAP_VLAN_HWFILTER
;
463 #if defined(INET6) || defined(INET)
464 #if 0 /* XXX swildner: LRO */
466 ifp
->if_capenable
^= IFCAP_LRO
;
473 rc
= priv_check_cred(cr
, PRIV_ROOT
, NULL_CRED_OKAY
);
475 rc
= oce_handle_passthrough(ifp
, data
);
478 rc
= ether_ioctl(ifp
, command
, data
);
493 if (sc
->ifp
->if_flags
& IFF_UP
) {
494 oce_if_deactivate(sc
);
498 UNLOCK(&sc
->dev_lock
);
503 #if 0 /* XXX swildner: MULTIQUEUE */
505 oce_multiq_start(struct ifnet
*ifp
, struct mbuf
*m
)
507 POCE_SOFTC sc
= ifp
->if_softc
;
508 struct oce_wq
*wq
= NULL
;
512 if (!sc
->link_status
) {
513 ifq_purge(&ifp
->if_snd
);
517 if ((m
->m_flags
& M_FLOWID
) != 0)
518 queue_index
= m
->m_pkthdr
.flowid
% sc
->nwqs
;
520 wq
= sc
->wq
[queue_index
];
523 status
= oce_multiq_transmit(ifp
, m
, wq
);
524 UNLOCK(&wq
->tx_lock
);
532 oce_multiq_flush(struct ifnet
*ifp
)
534 POCE_SOFTC sc
= ifp
->if_softc
;
538 for (i
= 0; i
< sc
->nwqs
; i
++) {
539 while ((m
= buf_ring_dequeue_sc(sc
->wq
[i
]->br
)) != NULL
)
548 /*****************************************************************************
549 * Driver interrupt routines functions *
550 *****************************************************************************/
553 oce_intr(void *arg
, int pending
)
556 POCE_INTR_INFO ii
= (POCE_INTR_INFO
) arg
;
557 POCE_SOFTC sc
= ii
->sc
;
558 struct oce_eq
*eq
= ii
->eq
;
560 struct oce_cq
*cq
= NULL
;
564 bus_dmamap_sync(eq
->ring
->dma
.tag
, eq
->ring
->dma
.map
,
565 BUS_DMASYNC_POSTWRITE
);
567 eqe
= RING_GET_CONSUMER_ITEM_VA(eq
->ring
, struct oce_eqe
);
571 bus_dmamap_sync(eq
->ring
->dma
.tag
, eq
->ring
->dma
.map
,
572 BUS_DMASYNC_POSTWRITE
);
573 RING_GET(eq
->ring
, 1);
579 goto eq_arm
; /* Spurious */
581 /* Clear EQ entries, but dont arm */
582 oce_arm_eq(sc
, eq
->eq_id
, num_eqes
, FALSE
, FALSE
);
584 /* Process TX, RX and MCC. But dont arm CQ*/
585 for (i
= 0; i
< eq
->cq_valid
; i
++) {
587 (*cq
->cq_handler
)(cq
->cb_arg
);
590 /* Arm all cqs connected to this EQ */
591 for (i
= 0; i
< eq
->cq_valid
; i
++) {
593 oce_arm_cq(sc
, cq
->cq_id
, 0, TRUE
);
597 oce_arm_eq(sc
, eq
->eq_id
, 0, TRUE
, FALSE
);
604 oce_setup_intr(POCE_SOFTC sc
)
606 int rc
= 0, use_intx
= 0;
608 #if 0 /* XXX swildner: MSI-X */
611 if (is_rss_enabled(sc
))
612 req_vectors
= MAX((sc
->nrqs
- 1), sc
->nwqs
);
616 if (sc
->flags
& OCE_FLAGS_MSIX_CAPABLE
) {
617 sc
->intr_count
= req_vectors
;
618 rc
= pci_alloc_msix(sc
->dev
, &sc
->intr_count
);
621 pci_release_msi(sc
->dev
);
623 sc
->flags
|= OCE_FLAGS_USING_MSIX
;
631 /* Scale number of queues based on intr we got */
632 update_queues_got(sc
);
635 device_printf(sc
->dev
, "Using legacy interrupt\n");
636 rc
= oce_alloc_intr(sc
, vector
, oce_intr
);
639 #if 0 /* XXX swildner: MSI-X */
641 for (; vector
< sc
->intr_count
; vector
++) {
642 rc
= oce_alloc_intr(sc
, vector
, oce_intr
);
657 oce_fast_isr(void *arg
)
659 POCE_INTR_INFO ii
= (POCE_INTR_INFO
) arg
;
660 POCE_SOFTC sc
= ii
->sc
;
665 oce_arm_eq(sc
, ii
->eq
->eq_id
, 0, FALSE
, TRUE
);
667 taskqueue_enqueue(ii
->tq
, &ii
->task
);
674 oce_alloc_intr(POCE_SOFTC sc
, int vector
, void (*isr
) (void *arg
, int pending
))
676 POCE_INTR_INFO ii
= &sc
->intrs
[vector
];
680 if (vector
>= OCE_MAX_EQ
)
683 #if 0 /* XXX swildner: MSI-X */
684 /* Set the resource id for the interrupt.
685 * MSIx is vector + 1 for the resource id,
686 * INTx is 0 for the resource id.
688 if (sc
->flags
& OCE_FLAGS_USING_MSIX
)
693 ii
->irq_type
= pci_alloc_1intr(sc
->dev
,
694 sc
->flags
& OCE_FLAGS_USING_MSI
, &rr
, &irq_flags
);
695 ii
->intr_res
= bus_alloc_resource_any(sc
->dev
,
699 if (ii
->intr_res
== NULL
) {
700 device_printf(sc
->dev
,
701 "Could not allocate interrupt\n");
706 TASK_INIT(&ii
->task
, 0, isr
, ii
);
708 ksprintf(ii
->task_name
, "oce_task[%d]", ii
->vector
);
709 ii
->tq
= taskqueue_create(ii
->task_name
,
711 taskqueue_thread_enqueue
,
713 taskqueue_start_threads(&ii
->tq
, 1, TDPRI_KERN_DAEMON
, -1, "%s taskq",
714 device_get_nameunit(sc
->dev
));
717 rc
= bus_setup_intr(sc
->dev
,
720 oce_fast_isr
, ii
, &ii
->tag
, NULL
);
727 oce_intr_free(POCE_SOFTC sc
)
731 for (i
= 0; i
< sc
->intr_count
; i
++) {
733 if (sc
->intrs
[i
].tag
!= NULL
)
734 bus_teardown_intr(sc
->dev
, sc
->intrs
[i
].intr_res
,
736 if (sc
->intrs
[i
].tq
!= NULL
)
737 taskqueue_free(sc
->intrs
[i
].tq
);
739 if (sc
->intrs
[i
].intr_res
!= NULL
)
740 bus_release_resource(sc
->dev
, SYS_RES_IRQ
,
742 sc
->intrs
[i
].intr_res
);
743 sc
->intrs
[i
].tag
= NULL
;
744 sc
->intrs
[i
].intr_res
= NULL
;
747 if (sc
->flags
& OCE_FLAGS_USING_MSIX
||
748 sc
->flags
& OCE_FLAGS_USING_MSI
)
749 pci_release_msi(sc
->dev
);
755 /******************************************************************************
756 * Media callbacks functions *
757 ******************************************************************************/
760 oce_media_status(struct ifnet
*ifp
, struct ifmediareq
*req
)
762 POCE_SOFTC sc
= (POCE_SOFTC
) ifp
->if_softc
;
765 req
->ifm_status
= IFM_AVALID
;
766 req
->ifm_active
= IFM_ETHER
;
768 if (sc
->link_status
== 1)
769 req
->ifm_status
|= IFM_ACTIVE
;
773 switch (sc
->link_speed
) {
774 case 1: /* 10 Mbps */
775 req
->ifm_active
|= IFM_10_T
| IFM_FDX
;
778 case 2: /* 100 Mbps */
779 req
->ifm_active
|= IFM_100_TX
| IFM_FDX
;
783 req
->ifm_active
|= IFM_1000_T
| IFM_FDX
;
786 case 4: /* 10 Gbps */
787 req
->ifm_active
|= IFM_10G_SR
| IFM_FDX
;
797 oce_media_change(struct ifnet
*ifp
)
805 /*****************************************************************************
806 * Transmit routines functions *
807 *****************************************************************************/
810 oce_tx(POCE_SOFTC sc
, struct mbuf
**mpp
, int wq_index
)
812 int rc
= 0, i
, retry_cnt
= 0;
813 bus_dma_segment_t segs
[OCE_MAX_TX_ELEMENTS
];
814 struct mbuf
*m
, *m_temp
;
815 struct oce_wq
*wq
= sc
->wq
[wq_index
];
816 struct oce_packet_desc
*pd
;
817 struct oce_nic_hdr_wqe
*nichdr
;
818 struct oce_nic_frag_wqe
*nicfrag
;
821 boolean_t complete
= TRUE
;
827 if (!(m
->m_flags
& M_PKTHDR
)) {
832 if(oce_tx_asic_stall_verify(sc
, m
)) {
833 m
= oce_insert_vlan_tag(sc
, m
, &complete
);
835 device_printf(sc
->dev
, "Insertion unsuccessful\n");
841 if (m
->m_pkthdr
.csum_flags
& CSUM_TSO
) {
842 /* consolidate packet buffers for TSO/LSO segment offload */
843 #if defined(INET6) || defined(INET)
844 m
= oce_tso_setup(sc
, mpp
);
854 pd
= &wq
->pckts
[wq
->pkt_desc_head
];
856 rc
= bus_dmamap_load_mbuf_defrag(wq
->tag
,
858 mpp
, segs
, OCE_MAX_TX_ELEMENTS
,
859 &pd
->nsegs
, BUS_DMA_NOWAIT
);
861 num_wqes
= pd
->nsegs
+ 1;
862 if (IS_BE(sc
) || IS_SH(sc
)) {
863 /*Dummy required only for BE3.*/
867 if (num_wqes
>= RING_NUM_FREE(wq
->ring
)) {
868 bus_dmamap_unload(wq
->tag
, pd
->map
);
871 atomic_store_rel_int(&wq
->pkt_desc_head
,
872 (wq
->pkt_desc_head
+ 1) % \
873 OCE_WQ_PACKET_ARRAY_SIZE
);
874 bus_dmamap_sync(wq
->tag
, pd
->map
, BUS_DMASYNC_PREWRITE
);
878 RING_GET_PRODUCER_ITEM_VA(wq
->ring
, struct oce_nic_hdr_wqe
);
879 nichdr
->u0
.dw
[0] = 0;
880 nichdr
->u0
.dw
[1] = 0;
881 nichdr
->u0
.dw
[2] = 0;
882 nichdr
->u0
.dw
[3] = 0;
884 nichdr
->u0
.s
.complete
= complete
;
885 nichdr
->u0
.s
.event
= 1;
886 nichdr
->u0
.s
.crc
= 1;
887 nichdr
->u0
.s
.forward
= 0;
888 nichdr
->u0
.s
.ipcs
= (m
->m_pkthdr
.csum_flags
& CSUM_IP
) ? 1 : 0;
890 (m
->m_pkthdr
.csum_flags
& CSUM_UDP
) ? 1 : 0;
892 (m
->m_pkthdr
.csum_flags
& CSUM_TCP
) ? 1 : 0;
893 nichdr
->u0
.s
.num_wqe
= num_wqes
;
894 nichdr
->u0
.s
.total_length
= m
->m_pkthdr
.len
;
895 #if 0 /* XXX swildner: ETHER_VTAG */
896 if (m
->m_flags
& M_VLANTAG
) {
897 nichdr
->u0
.s
.vlan
= 1; /*Vlan present*/
898 nichdr
->u0
.s
.vlan_tag
= m
->m_pkthdr
.ether_vtag
;
901 if (m
->m_pkthdr
.csum_flags
& CSUM_TSO
) {
902 if (m
->m_pkthdr
.tso_segsz
) {
903 nichdr
->u0
.s
.lso
= 1;
904 nichdr
->u0
.s
.lso_mss
= m
->m_pkthdr
.tso_segsz
;
906 if (!IS_BE(sc
) || !IS_SH(sc
))
907 nichdr
->u0
.s
.ipcs
= 1;
910 RING_PUT(wq
->ring
, 1);
911 atomic_add_int(&wq
->ring
->num_used
, 1);
913 for (i
= 0; i
< pd
->nsegs
; i
++) {
915 RING_GET_PRODUCER_ITEM_VA(wq
->ring
,
916 struct oce_nic_frag_wqe
);
917 nicfrag
->u0
.s
.rsvd0
= 0;
918 nicfrag
->u0
.s
.frag_pa_hi
= ADDR_HI(segs
[i
].ds_addr
);
919 nicfrag
->u0
.s
.frag_pa_lo
= ADDR_LO(segs
[i
].ds_addr
);
920 nicfrag
->u0
.s
.frag_len
= segs
[i
].ds_len
;
921 pd
->wqe_idx
= wq
->ring
->pidx
;
922 RING_PUT(wq
->ring
, 1);
923 atomic_add_int(&wq
->ring
->num_used
, 1);
925 if (num_wqes
> (pd
->nsegs
+ 1)) {
927 RING_GET_PRODUCER_ITEM_VA(wq
->ring
,
928 struct oce_nic_frag_wqe
);
929 nicfrag
->u0
.dw
[0] = 0;
930 nicfrag
->u0
.dw
[1] = 0;
931 nicfrag
->u0
.dw
[2] = 0;
932 nicfrag
->u0
.dw
[3] = 0;
933 pd
->wqe_idx
= wq
->ring
->pidx
;
934 RING_PUT(wq
->ring
, 1);
935 atomic_add_int(&wq
->ring
->num_used
, 1);
939 sc
->ifp
->if_opackets
++;
940 wq
->tx_stats
.tx_reqs
++;
941 wq
->tx_stats
.tx_wrbs
+= num_wqes
;
942 wq
->tx_stats
.tx_bytes
+= m
->m_pkthdr
.len
;
943 wq
->tx_stats
.tx_pkts
++;
945 bus_dmamap_sync(wq
->ring
->dma
.tag
, wq
->ring
->dma
.map
,
946 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
947 reg_value
= (num_wqes
<< 16) | wq
->wq_id
;
948 OCE_WRITE_REG32(sc
, db
, wq
->db_offset
, reg_value
);
950 } else if (rc
== EFBIG
) {
951 if (retry_cnt
== 0) {
952 m_temp
= m_defrag(m
, M_NOWAIT
);
957 retry_cnt
= retry_cnt
+ 1;
961 } else if (rc
== ENOMEM
)
976 oce_tx_complete(struct oce_wq
*wq
, uint32_t wqe_idx
, uint32_t status
)
978 struct oce_packet_desc
*pd
;
979 POCE_SOFTC sc
= (POCE_SOFTC
) wq
->parent
;
982 pd
= &wq
->pckts
[wq
->pkt_desc_tail
];
983 atomic_store_rel_int(&wq
->pkt_desc_tail
,
984 (wq
->pkt_desc_tail
+ 1) % OCE_WQ_PACKET_ARRAY_SIZE
);
985 atomic_subtract_int(&wq
->ring
->num_used
, pd
->nsegs
+ 1);
986 bus_dmamap_sync(wq
->tag
, pd
->map
, BUS_DMASYNC_POSTWRITE
);
987 bus_dmamap_unload(wq
->tag
, pd
->map
);
993 if (ifq_is_oactive(&sc
->ifp
->if_snd
)) {
994 if (wq
->ring
->num_used
< (wq
->ring
->num_items
/ 2)) {
995 ifq_clr_oactive(&sc
->ifp
->if_snd
);
996 oce_tx_restart(sc
, wq
);
1003 oce_tx_restart(POCE_SOFTC sc
, struct oce_wq
*wq
)
1006 if ((sc
->ifp
->if_flags
& IFF_RUNNING
) != IFF_RUNNING
)
1009 #if 0 /* __FreeBSD_version >= 800000 */
1010 if (!drbr_empty(sc
->ifp
, wq
->br
))
1012 if (!ifq_is_empty(&sc
->ifp
->if_snd
))
1014 taskqueue_enqueue(taskqueue_swi
, &wq
->txtask
);
1019 #if defined(INET6) || defined(INET)
1020 static struct mbuf
*
1021 oce_tso_setup(POCE_SOFTC sc
, struct mbuf
**mpp
)
1028 struct ip6_hdr
*ip6
;
1030 struct ether_vlan_header
*eh
;
1033 int total_len
= 0, ehdrlen
= 0;
1037 if (M_WRITABLE(m
) == 0) {
1038 m
= m_dup(*mpp
, M_NOWAIT
);
1045 eh
= mtod(m
, struct ether_vlan_header
*);
1046 if (eh
->evl_encap_proto
== htons(ETHERTYPE_VLAN
)) {
1047 etype
= ntohs(eh
->evl_proto
);
1048 ehdrlen
= ETHER_HDR_LEN
+ ETHER_VLAN_ENCAP_LEN
;
1050 etype
= ntohs(eh
->evl_encap_proto
);
1051 ehdrlen
= ETHER_HDR_LEN
;
1057 ip
= (struct ip
*)(m
->m_data
+ ehdrlen
);
1058 if (ip
->ip_p
!= IPPROTO_TCP
)
1060 th
= (struct tcphdr
*)((caddr_t
)ip
+ (ip
->ip_hl
<< 2));
1062 total_len
= ehdrlen
+ (ip
->ip_hl
<< 2) + (th
->th_off
<< 2);
1066 case ETHERTYPE_IPV6
:
1067 ip6
= (struct ip6_hdr
*)(m
->m_data
+ ehdrlen
);
1068 if (ip6
->ip6_nxt
!= IPPROTO_TCP
)
1070 th
= (struct tcphdr
*)((caddr_t
)ip6
+ sizeof(struct ip6_hdr
));
1072 total_len
= ehdrlen
+ sizeof(struct ip6_hdr
) + (th
->th_off
<< 2);
1079 m
= m_pullup(m
, total_len
);
1086 #endif /* INET6 || INET */
1089 oce_tx_task(void *arg
, int npending
)
1091 struct oce_wq
*wq
= arg
;
1092 POCE_SOFTC sc
= wq
->parent
;
1093 struct ifnet
*ifp
= sc
->ifp
;
1094 #if 0 /* XXX swildner: MULTIQUEUE */
1098 rc
= oce_multiq_transmit(ifp
, NULL
, wq
);
1100 device_printf(sc
->dev
,
1101 "TX[%d] restart failed\n", wq
->queue_index
);
1103 UNLOCK(&wq
->tx_lock
);
1105 lwkt_serialize_enter(ifp
->if_serializer
);
1106 oce_start_locked(ifp
);
1107 lwkt_serialize_exit(ifp
->if_serializer
);
1113 oce_start_locked(struct ifnet
*ifp
)
1115 POCE_SOFTC sc
= ifp
->if_softc
;
1118 int def_q
= 0; /* Default tx queue is 0 */
1120 if (!((ifp
->if_flags
& IFF_RUNNING
) && !ifq_is_oactive(&ifp
->if_snd
)))
1123 if (!sc
->link_status
) {
1124 ifq_purge(&ifp
->if_snd
);
1129 m
= ifq_dequeue(&sc
->ifp
->if_snd
);
1133 rc
= oce_tx(sc
, &m
, def_q
);
1136 sc
->wq
[def_q
]->tx_stats
.tx_stops
++;
1137 ifq_set_oactive(&ifp
->if_snd
);
1138 ifq_prepend(&ifp
->if_snd
, m
);
1144 ETHER_BPF_MTAP(ifp
, m
);
1152 oce_start(struct ifnet
*ifp
, struct ifaltq_subque
*ifsq
)
1154 ASSERT_ALTQ_SQ_DEFAULT(ifp
, ifsq
);
1155 oce_start_locked(ifp
);
1159 /* Handle the Completion Queue for transmit */
1161 oce_wq_handler(void *arg
)
1163 struct oce_wq
*wq
= (struct oce_wq
*)arg
;
1164 POCE_SOFTC sc
= wq
->parent
;
1165 struct oce_cq
*cq
= wq
->cq
;
1166 struct oce_nic_tx_cqe
*cqe
;
1169 bus_dmamap_sync(cq
->ring
->dma
.tag
,
1170 cq
->ring
->dma
.map
, BUS_DMASYNC_POSTWRITE
);
1171 cqe
= RING_GET_CONSUMER_ITEM_VA(cq
->ring
, struct oce_nic_tx_cqe
);
1172 while (cqe
->u0
.dw
[3]) {
1173 DW_SWAP((uint32_t *) cqe
, sizeof(oce_wq_cqe
));
1175 wq
->ring
->cidx
= cqe
->u0
.s
.wqe_index
+ 1;
1176 if (wq
->ring
->cidx
>= wq
->ring
->num_items
)
1177 wq
->ring
->cidx
-= wq
->ring
->num_items
;
1179 oce_tx_complete(wq
, cqe
->u0
.s
.wqe_index
, cqe
->u0
.s
.status
);
1180 wq
->tx_stats
.tx_compl
++;
1182 RING_GET(cq
->ring
, 1);
1183 bus_dmamap_sync(cq
->ring
->dma
.tag
,
1184 cq
->ring
->dma
.map
, BUS_DMASYNC_POSTWRITE
);
1186 RING_GET_CONSUMER_ITEM_VA(cq
->ring
, struct oce_nic_tx_cqe
);
1191 oce_arm_cq(sc
, cq
->cq_id
, num_cqes
, FALSE
);
1197 #if 0 /* XXX swildner: MULTIQUEUE */
1199 oce_multiq_transmit(struct ifnet
*ifp
, struct mbuf
*m
, struct oce_wq
*wq
)
1201 POCE_SOFTC sc
= ifp
->if_softc
;
1202 int status
= 0, queue_index
= 0;
1203 struct mbuf
*next
= NULL
;
1204 struct buf_ring
*br
= NULL
;
1207 queue_index
= wq
->queue_index
;
1209 if (!((ifp
->if_flags
& IFF_RUNNING
) && !ifq_is_oactive(&ifp
->if_snd
))) {
1211 status
= drbr_enqueue(ifp
, br
, m
);
1216 if ((status
= drbr_enqueue(ifp
, br
, m
)) != 0)
1219 while ((next
= drbr_peek(ifp
, br
)) != NULL
) {
1220 if (oce_tx(sc
, &next
, queue_index
)) {
1222 drbr_advance(ifp
, br
);
1224 drbr_putback(ifp
, br
, next
);
1225 wq
->tx_stats
.tx_stops
++;
1226 ifp_set_oactive(&ifp
->if_snd
);
1227 status
= drbr_enqueue(ifp
, br
, next
);
1231 drbr_advance(ifp
, br
);
1232 ifp
->if_obytes
+= next
->m_pkthdr
.len
;
1233 if (next
->m_flags
& M_MCAST
)
1235 ETHER_BPF_MTAP(ifp
, next
);
1245 /*****************************************************************************
1246 * Receive routines functions *
1247 *****************************************************************************/
1250 oce_rx(struct oce_rq
*rq
, uint32_t rqe_idx
, struct oce_nic_rx_cqe
*cqe
)
1253 struct oce_packet_desc
*pd
;
1254 POCE_SOFTC sc
= (POCE_SOFTC
) rq
->parent
;
1255 int i
, len
, frag_len
;
1256 struct mbuf
*m
= NULL
, *tail
= NULL
;
1259 len
= cqe
->u0
.s
.pkt_size
;
1261 /*partial DMA workaround for Lancer*/
1262 oce_discard_rx_comp(rq
, cqe
);
1266 /* Get vlan_tag value */
1267 if(IS_BE(sc
) || IS_SH(sc
))
1268 vtag
= BSWAP_16(cqe
->u0
.s
.vlan_tag
);
1270 vtag
= cqe
->u0
.s
.vlan_tag
;
1273 for (i
= 0; i
< cqe
->u0
.s
.num_fragments
; i
++) {
1275 if (rq
->packets_out
== rq
->packets_in
) {
1276 device_printf(sc
->dev
,
1277 "RQ transmit descriptor missing\n");
1279 out
= rq
->packets_out
+ 1;
1280 if (out
== OCE_RQ_PACKET_ARRAY_SIZE
)
1282 pd
= &rq
->pckts
[rq
->packets_out
];
1283 rq
->packets_out
= out
;
1285 bus_dmamap_sync(rq
->tag
, pd
->map
, BUS_DMASYNC_POSTWRITE
);
1286 bus_dmamap_unload(rq
->tag
, pd
->map
);
1289 frag_len
= (len
> rq
->cfg
.frag_size
) ? rq
->cfg
.frag_size
: len
;
1290 pd
->mbuf
->m_len
= frag_len
;
1293 /* additional fragments */
1294 tail
->m_next
= pd
->mbuf
;
1297 /* first fragment, fill out much of the packet header */
1298 pd
->mbuf
->m_pkthdr
.len
= len
;
1299 pd
->mbuf
->m_pkthdr
.csum_flags
= 0;
1300 if (IF_CSUM_ENABLED(sc
)) {
1301 if (cqe
->u0
.s
.l4_cksum_pass
) {
1302 pd
->mbuf
->m_pkthdr
.csum_flags
|=
1303 (CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
);
1304 pd
->mbuf
->m_pkthdr
.csum_data
= 0xffff;
1306 if (cqe
->u0
.s
.ip_cksum_pass
) {
1307 if (!cqe
->u0
.s
.ip_ver
) { /* IPV4 */
1308 pd
->mbuf
->m_pkthdr
.csum_flags
|=
1309 (CSUM_IP_CHECKED
|CSUM_IP_VALID
);
1313 m
= tail
= pd
->mbuf
;
1320 if (!oce_cqe_portid_valid(sc
, cqe
)) {
1325 m
->m_pkthdr
.rcvif
= sc
->ifp
;
1326 #if 0 /* __FreeBSD_version >= 800000 */
1327 if (rq
->queue_index
)
1328 m
->m_pkthdr
.flowid
= (rq
->queue_index
- 1);
1330 m
->m_pkthdr
.flowid
= rq
->queue_index
;
1331 m
->m_flags
|= M_FLOWID
;
1333 #if 0 /* XXX swildner: ETHER_VTAG */
1334 /* This deternies if vlan tag is Valid */
1335 if (oce_cqe_vtp_valid(sc
, cqe
)) {
1336 if (sc
->function_mode
& FNM_FLEX10_MODE
) {
1337 /* FLEX10. If QnQ is not set, neglect VLAN */
1338 if (cqe
->u0
.s
.qnq
) {
1339 m
->m_pkthdr
.ether_vtag
= vtag
;
1340 m
->m_flags
|= M_VLANTAG
;
1342 } else if (sc
->pvid
!= (vtag
& VLAN_VID_MASK
)) {
1343 /* In UMC mode generally pvid will be striped by
1344 hw. But in some cases we have seen it comes
1345 with pvid. So if pvid == vlan, neglect vlan.
1347 m
->m_pkthdr
.ether_vtag
= vtag
;
1348 m
->m_flags
|= M_VLANTAG
;
1353 sc
->ifp
->if_ipackets
++;
1354 #if defined(INET6) || defined(INET)
1355 #if 0 /* XXX swildner: LRO */
1356 /* Try to queue to LRO */
1357 if (IF_LRO_ENABLED(sc
) &&
1358 (cqe
->u0
.s
.ip_cksum_pass
) &&
1359 (cqe
->u0
.s
.l4_cksum_pass
) &&
1360 (!cqe
->u0
.s
.ip_ver
) &&
1361 (rq
->lro
.lro_cnt
!= 0)) {
1363 if (tcp_lro_rx(&rq
->lro
, m
, 0) == 0) {
1364 rq
->lro_pkts_queued
++;
1367 /* If LRO posting fails then try to post to STACK */
1372 sc
->ifp
->if_input(sc
->ifp
, m
, NULL
, -1);
1373 #if defined(INET6) || defined(INET)
1374 #if 0 /* XXX swildner: LRO */
1378 /* Update rx stats per queue */
1379 rq
->rx_stats
.rx_pkts
++;
1380 rq
->rx_stats
.rx_bytes
+= cqe
->u0
.s
.pkt_size
;
1381 rq
->rx_stats
.rx_frags
+= cqe
->u0
.s
.num_fragments
;
1382 if (cqe
->u0
.s
.pkt_type
== OCE_MULTICAST_PACKET
)
1383 rq
->rx_stats
.rx_mcast_pkts
++;
1384 if (cqe
->u0
.s
.pkt_type
== OCE_UNICAST_PACKET
)
1385 rq
->rx_stats
.rx_ucast_pkts
++;
1393 oce_discard_rx_comp(struct oce_rq
*rq
, struct oce_nic_rx_cqe
*cqe
)
1395 uint32_t out
, i
= 0;
1396 struct oce_packet_desc
*pd
;
1397 POCE_SOFTC sc
= (POCE_SOFTC
) rq
->parent
;
1398 int num_frags
= cqe
->u0
.s
.num_fragments
;
1400 for (i
= 0; i
< num_frags
; i
++) {
1401 if (rq
->packets_out
== rq
->packets_in
) {
1402 device_printf(sc
->dev
,
1403 "RQ transmit descriptor missing\n");
1405 out
= rq
->packets_out
+ 1;
1406 if (out
== OCE_RQ_PACKET_ARRAY_SIZE
)
1408 pd
= &rq
->pckts
[rq
->packets_out
];
1409 rq
->packets_out
= out
;
1411 bus_dmamap_sync(rq
->tag
, pd
->map
, BUS_DMASYNC_POSTWRITE
);
1412 bus_dmamap_unload(rq
->tag
, pd
->map
);
1420 #if 0 /* XXX swildner: ETHER_VTAG */
1422 oce_cqe_vtp_valid(POCE_SOFTC sc
, struct oce_nic_rx_cqe
*cqe
)
1424 struct oce_nic_rx_cqe_v1
*cqe_v1
;
1427 if (sc
->be3_native
) {
1428 cqe_v1
= (struct oce_nic_rx_cqe_v1
*)cqe
;
1429 vtp
= cqe_v1
->u0
.s
.vlan_tag_present
;
1431 vtp
= cqe
->u0
.s
.vlan_tag_present
;
1440 oce_cqe_portid_valid(POCE_SOFTC sc
, struct oce_nic_rx_cqe
*cqe
)
1442 struct oce_nic_rx_cqe_v1
*cqe_v1
;
1445 if (sc
->be3_native
&& (IS_BE(sc
) || IS_SH(sc
))) {
1446 cqe_v1
= (struct oce_nic_rx_cqe_v1
*)cqe
;
1447 port_id
= cqe_v1
->u0
.s
.port
;
1448 if (sc
->port_id
!= port_id
)
1451 ;/* For BE3 legacy and Lancer this is dummy */
1458 #if defined(INET6) || defined(INET)
1459 #if 0 /* XXX swildner: LRO */
1461 oce_rx_flush_lro(struct oce_rq
*rq
)
1463 struct lro_ctrl
*lro
= &rq
->lro
;
1464 struct lro_entry
*queued
;
1465 POCE_SOFTC sc
= (POCE_SOFTC
) rq
->parent
;
1467 if (!IF_LRO_ENABLED(sc
))
1470 while ((queued
= SLIST_FIRST(&lro
->lro_active
)) != NULL
) {
1471 SLIST_REMOVE_HEAD(&lro
->lro_active
, next
);
1472 tcp_lro_flush(lro
, queued
);
1474 rq
->lro_pkts_queued
= 0;
1481 oce_init_lro(POCE_SOFTC sc
)
1483 struct lro_ctrl
*lro
= NULL
;
1486 for (i
= 0; i
< sc
->nrqs
; i
++) {
1487 lro
= &sc
->rq
[i
]->lro
;
1488 rc
= tcp_lro_init(lro
);
1490 device_printf(sc
->dev
, "LRO init failed\n");
1501 oce_free_lro(POCE_SOFTC sc
)
1503 struct lro_ctrl
*lro
= NULL
;
1506 for (i
= 0; i
< sc
->nrqs
; i
++) {
1507 lro
= &sc
->rq
[i
]->lro
;
1516 oce_alloc_rx_bufs(struct oce_rq
*rq
, int count
)
1518 POCE_SOFTC sc
= (POCE_SOFTC
) rq
->parent
;
1520 struct oce_packet_desc
*pd
;
1521 bus_dma_segment_t segs
[6];
1522 int nsegs
, added
= 0;
1523 struct oce_nic_rqe
*rqe
;
1524 pd_rxulp_db_t rxdb_reg
;
1526 bzero(&rxdb_reg
, sizeof(pd_rxulp_db_t
));
1527 for (i
= 0; i
< count
; i
++) {
1528 in
= rq
->packets_in
+ 1;
1529 if (in
== OCE_RQ_PACKET_ARRAY_SIZE
)
1531 if (in
== rq
->packets_out
)
1532 break; /* no more room */
1534 pd
= &rq
->pckts
[rq
->packets_in
];
1535 pd
->mbuf
= m_getcl(M_NOWAIT
, MT_DATA
, M_PKTHDR
);
1536 if (pd
->mbuf
== NULL
)
1539 pd
->mbuf
->m_len
= pd
->mbuf
->m_pkthdr
.len
= MCLBYTES
;
1540 rc
= bus_dmamap_load_mbuf_segment(rq
->tag
,
1544 &nsegs
, BUS_DMA_NOWAIT
);
1555 rq
->packets_in
= in
;
1556 bus_dmamap_sync(rq
->tag
, pd
->map
, BUS_DMASYNC_PREREAD
);
1558 rqe
= RING_GET_PRODUCER_ITEM_VA(rq
->ring
, struct oce_nic_rqe
);
1559 rqe
->u0
.s
.frag_pa_hi
= ADDR_HI(segs
[0].ds_addr
);
1560 rqe
->u0
.s
.frag_pa_lo
= ADDR_LO(segs
[0].ds_addr
);
1561 DW_SWAP(u32ptr(rqe
), sizeof(struct oce_nic_rqe
));
1562 RING_PUT(rq
->ring
, 1);
1567 for (i
= added
/ OCE_MAX_RQ_POSTS
; i
> 0; i
--) {
1568 rxdb_reg
.bits
.num_posted
= OCE_MAX_RQ_POSTS
;
1569 rxdb_reg
.bits
.qid
= rq
->rq_id
;
1570 OCE_WRITE_REG32(sc
, db
, PD_RXULP_DB
, rxdb_reg
.dw0
);
1571 added
-= OCE_MAX_RQ_POSTS
;
1574 rxdb_reg
.bits
.qid
= rq
->rq_id
;
1575 rxdb_reg
.bits
.num_posted
= added
;
1576 OCE_WRITE_REG32(sc
, db
, PD_RXULP_DB
, rxdb_reg
.dw0
);
1584 /* Handle the Completion Queue for receive */
1586 oce_rq_handler(void *arg
)
1588 struct oce_rq
*rq
= (struct oce_rq
*)arg
;
1589 struct oce_cq
*cq
= rq
->cq
;
1590 POCE_SOFTC sc
= rq
->parent
;
1591 struct oce_nic_rx_cqe
*cqe
;
1592 int num_cqes
= 0, rq_buffers_used
= 0;
1594 bus_dmamap_sync(cq
->ring
->dma
.tag
,
1595 cq
->ring
->dma
.map
, BUS_DMASYNC_POSTWRITE
);
1596 cqe
= RING_GET_CONSUMER_ITEM_VA(cq
->ring
, struct oce_nic_rx_cqe
);
1597 while (cqe
->u0
.dw
[2]) {
1598 DW_SWAP((uint32_t *) cqe
, sizeof(oce_rq_cqe
));
1600 RING_GET(rq
->ring
, 1);
1601 if (cqe
->u0
.s
.error
== 0) {
1602 oce_rx(rq
, cqe
->u0
.s
.frag_index
, cqe
);
1604 rq
->rx_stats
.rxcp_err
++;
1605 sc
->ifp
->if_ierrors
++;
1606 /* Post L3/L4 errors to stack.*/
1607 oce_rx(rq
, cqe
->u0
.s
.frag_index
, cqe
);
1609 rq
->rx_stats
.rx_compl
++;
1612 #if defined(INET6) || defined(INET)
1613 #if 0 /* XXX swildner: LRO */
1614 if (IF_LRO_ENABLED(sc
) && rq
->lro_pkts_queued
>= 16) {
1615 oce_rx_flush_lro(rq
);
1620 RING_GET(cq
->ring
, 1);
1621 bus_dmamap_sync(cq
->ring
->dma
.tag
,
1622 cq
->ring
->dma
.map
, BUS_DMASYNC_POSTWRITE
);
1624 RING_GET_CONSUMER_ITEM_VA(cq
->ring
, struct oce_nic_rx_cqe
);
1626 if (num_cqes
>= (IS_XE201(sc
) ? 8 : oce_max_rsp_handled
))
1630 #if defined(INET6) || defined(INET)
1631 #if 0 /* XXX swildner: LRO */
1632 if (IF_LRO_ENABLED(sc
))
1633 oce_rx_flush_lro(rq
);
1638 oce_arm_cq(sc
, cq
->cq_id
, num_cqes
, FALSE
);
1639 rq_buffers_used
= OCE_RQ_PACKET_ARRAY_SIZE
- rq
->pending
;
1640 if (rq_buffers_used
> 1)
1641 oce_alloc_rx_bufs(rq
, (rq_buffers_used
- 1));
1651 /*****************************************************************************
1652 * Helper function prototypes in this file *
1653 *****************************************************************************/
1656 oce_attach_ifp(POCE_SOFTC sc
)
1659 sc
->ifp
= if_alloc(IFT_ETHER
);
1663 ifmedia_init(&sc
->media
, IFM_IMASK
, oce_media_change
, oce_media_status
);
1664 ifmedia_add(&sc
->media
, IFM_ETHER
| IFM_AUTO
, 0, NULL
);
1665 ifmedia_set(&sc
->media
, IFM_ETHER
| IFM_AUTO
);
1667 sc
->ifp
->if_flags
= IFF_BROADCAST
| IFF_MULTICAST
;
1668 sc
->ifp
->if_ioctl
= oce_ioctl
;
1669 sc
->ifp
->if_start
= oce_start
;
1670 sc
->ifp
->if_init
= oce_init
;
1671 sc
->ifp
->if_mtu
= ETHERMTU
;
1672 sc
->ifp
->if_softc
= sc
;
1673 #if 0 /* XXX swildner: MULTIQUEUE */
1674 sc
->ifp
->if_transmit
= oce_multiq_start
;
1675 sc
->ifp
->if_qflush
= oce_multiq_flush
;
1678 if_initname(sc
->ifp
,
1679 device_get_name(sc
->dev
), device_get_unit(sc
->dev
));
1681 sc
->ifp
->if_nmbclusters
= sc
->nrqs
* sc
->rq
[0]->cfg
.q_len
;
1683 ifq_set_maxlen(&sc
->ifp
->if_snd
, OCE_MAX_TX_DESC
- 1);
1684 ifq_set_ready(&sc
->ifp
->if_snd
);
1686 sc
->ifp
->if_hwassist
= OCE_IF_HWASSIST
;
1687 sc
->ifp
->if_hwassist
|= CSUM_TSO
;
1688 sc
->ifp
->if_hwassist
|= (CSUM_IP
| CSUM_TCP
| CSUM_UDP
);
1690 sc
->ifp
->if_capabilities
= OCE_IF_CAPABILITIES
;
1691 sc
->ifp
->if_capabilities
|= IFCAP_HWCSUM
;
1692 #if 0 /* XXX swildner: VLAN_HWFILTER */
1693 sc
->ifp
->if_capabilities
|= IFCAP_VLAN_HWFILTER
;
1696 #if defined(INET6) || defined(INET)
1697 sc
->ifp
->if_capabilities
|= IFCAP_TSO
;
1698 #if 0 /* XXX swildner: LRO */
1699 sc
->ifp
->if_capabilities
|= IFCAP_LRO
;
1701 #if 0 /* XXX swildner: VLAN_HWTSO */
1702 sc
->ifp
->if_capabilities
|= IFCAP_VLAN_HWTSO
;
1706 sc
->ifp
->if_capenable
= sc
->ifp
->if_capabilities
;
1707 sc
->ifp
->if_baudrate
= IF_Gbps(10UL);
1709 ether_ifattach(sc
->ifp
, sc
->macaddr
.mac_addr
, NULL
);
1716 oce_add_vlan(void *arg
, struct ifnet
*ifp
, uint16_t vtag
)
1718 POCE_SOFTC sc
= ifp
->if_softc
;
1720 if (ifp
->if_softc
!= arg
)
1722 if ((vtag
== 0) || (vtag
> 4095))
1725 sc
->vlan_tag
[vtag
] = 1;
1732 oce_del_vlan(void *arg
, struct ifnet
*ifp
, uint16_t vtag
)
1734 POCE_SOFTC sc
= ifp
->if_softc
;
1736 if (ifp
->if_softc
!= arg
)
1738 if ((vtag
== 0) || (vtag
> 4095))
1741 sc
->vlan_tag
[vtag
] = 0;
1748 * A max of 64 vlans can be configured in BE. If the user configures
1749 * more, place the card in vlan promiscuous mode.
1752 oce_vid_config(POCE_SOFTC sc
)
1754 #if 0 /* XXX swildner: VLAN_HWFILTER */
1755 struct normal_vlan vtags
[MAX_VLANFILTER_SIZE
];
1756 uint16_t ntags
= 0, i
;
1760 #if 0 /* XXX swildner: VLAN_HWFILTER */
1761 if ((sc
->vlans_added
<= MAX_VLANFILTER_SIZE
) &&
1762 (sc
->ifp
->if_capenable
& IFCAP_VLAN_HWFILTER
)) {
1763 for (i
= 0; i
< MAX_VLANS
; i
++) {
1764 if (sc
->vlan_tag
[i
]) {
1765 vtags
[ntags
].vtag
= i
;
1770 status
= oce_config_vlan(sc
, (uint8_t) sc
->if_id
,
1771 vtags
, ntags
, 1, 0);
1774 status
= oce_config_vlan(sc
, (uint8_t) sc
->if_id
,
1781 oce_mac_addr_set(POCE_SOFTC sc
)
1783 uint32_t old_pmac_id
= sc
->pmac_id
;
1787 status
= bcmp((IF_LLADDR(sc
->ifp
)), sc
->macaddr
.mac_addr
,
1788 sc
->macaddr
.size_of_struct
);
1792 status
= oce_mbox_macaddr_add(sc
, (uint8_t *)(IF_LLADDR(sc
->ifp
)),
1793 sc
->if_id
, &sc
->pmac_id
);
1795 status
= oce_mbox_macaddr_del(sc
, sc
->if_id
, old_pmac_id
);
1796 bcopy((IF_LLADDR(sc
->ifp
)), sc
->macaddr
.mac_addr
,
1797 sc
->macaddr
.size_of_struct
);
1800 device_printf(sc
->dev
, "Failed update macaddress\n");
1806 oce_handle_passthrough(struct ifnet
*ifp
, caddr_t data
)
1808 POCE_SOFTC sc
= ifp
->if_softc
;
1809 struct ifreq
*ifr
= (struct ifreq
*)data
;
1811 char cookie
[32] = {0};
1812 void *priv_data
= (void *)ifr
->ifr_data
;
1816 OCE_DMA_MEM dma_mem
;
1817 struct mbx_common_get_cntl_attr
*fw_cmd
;
1819 if (copyin(priv_data
, cookie
, strlen(IOCTL_COOKIE
)))
1822 if (memcmp(cookie
, IOCTL_COOKIE
, strlen(IOCTL_COOKIE
)))
1825 ioctl_ptr
= (char *)priv_data
+ strlen(IOCTL_COOKIE
);
1826 if (copyin(ioctl_ptr
, &req
, sizeof(struct mbx_hdr
)))
1829 req_size
= le32toh(req
.u0
.req
.request_length
);
1830 if (req_size
> 65536)
1833 req_size
+= sizeof(struct mbx_hdr
);
1834 rc
= oce_dma_alloc(sc
, req_size
, &dma_mem
, 0);
1838 if (copyin(ioctl_ptr
, OCE_DMAPTR(&dma_mem
,char), req_size
)) {
1843 rc
= oce_pass_through_mbox(sc
, &dma_mem
, req_size
);
1849 if (copyout(OCE_DMAPTR(&dma_mem
,char), ioctl_ptr
, req_size
))
1853 firmware is filling all the attributes for this ioctl except
1854 the driver version..so fill it
1856 if(req
.u0
.rsp
.opcode
== OPCODE_COMMON_GET_CNTL_ATTRIBUTES
) {
1857 fw_cmd
= (struct mbx_common_get_cntl_attr
*) ioctl_ptr
;
1858 strncpy(fw_cmd
->params
.rsp
.cntl_attr_info
.hba_attr
.drv_ver_str
,
1859 COMPONENT_REVISION
, strlen(COMPONENT_REVISION
));
1863 oce_dma_free(sc
, &dma_mem
);
1869 oce_eqd_set_periodic(POCE_SOFTC sc
)
1871 struct oce_set_eqd set_eqd
[OCE_MAX_EQ
];
1872 struct oce_aic_obj
*aic
;
1874 uint64_t now
= 0, delta
;
1875 int eqd
, i
, num
= 0;
1879 for (i
= 0 ; i
< sc
->neqs
; i
++) {
1881 aic
= &sc
->aic_obj
[i
];
1882 /* When setting the static eq delay from the user space */
1890 /* Over flow check */
1891 if ((now
< aic
->ticks
) || (eqo
->intr
< aic
->intr_prev
))
1894 delta
= now
- aic
->ticks
;
1897 /* Interrupt rate based on elapsed ticks */
1899 ips
= (uint32_t)(eqo
->intr
- aic
->intr_prev
) / tps
;
1901 if (ips
> INTR_RATE_HWM
)
1902 eqd
= aic
->cur_eqd
+ 20;
1903 else if (ips
< INTR_RATE_LWM
)
1904 eqd
= aic
->cur_eqd
/ 2;
1911 /* Make sure that the eq delay is in the known range */
1912 eqd
= min(eqd
, aic
->max_eqd
);
1913 eqd
= max(eqd
, aic
->min_eqd
);
1916 if (eqd
!= aic
->cur_eqd
) {
1917 set_eqd
[num
].delay_multiplier
= (eqd
* 65)/100;
1918 set_eqd
[num
].eq_id
= eqo
->eq_id
;
1923 aic
->intr_prev
= eqo
->intr
;
1927 /* Is there atleast one eq that needs to be modified? */
1929 oce_mbox_eqd_modify_periodic(sc
, set_eqd
, num
);
1934 oce_local_timer(void *arg
)
1936 POCE_SOFTC sc
= arg
;
1939 lwkt_serialize_enter(sc
->ifp
->if_serializer
);
1940 oce_refresh_nic_stats(sc
);
1941 oce_refresh_queue_stats(sc
);
1942 oce_mac_addr_set(sc
);
1945 for (i
= 0; i
< sc
->nwqs
; i
++)
1946 oce_tx_restart(sc
, sc
->wq
[i
]);
1948 /* calculate and set the eq delay for optimal interrupt rate */
1949 if (IS_BE(sc
) || IS_SH(sc
))
1950 oce_eqd_set_periodic(sc
);
1952 callout_reset(&sc
->timer
, hz
, oce_local_timer
, sc
);
1953 lwkt_serialize_exit(sc
->ifp
->if_serializer
);
1957 /* NOTE : This should only be called holding
1961 oce_if_deactivate(POCE_SOFTC sc
)
1969 sc
->ifp
->if_flags
&= ~IFF_RUNNING
;
1970 ifq_clr_oactive(&sc
->ifp
->if_snd
);
1972 /*Wait for max of 400ms for TX completions to be done */
1973 while (mtime
< 400) {
1975 for_all_wq_queues(sc
, wq
, i
) {
1976 if (wq
->ring
->num_used
) {
1987 /* Stop intrs and finish any bottom halves pending */
1988 oce_hw_intr_disable(sc
);
1990 /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
1991 any other lock. So unlock device lock and require after
1992 completing taskqueue_drain.
1994 UNLOCK(&sc
->dev_lock
);
1995 for (i
= 0; i
< sc
->intr_count
; i
++) {
1996 if (sc
->intrs
[i
].tq
!= NULL
) {
1997 taskqueue_drain(sc
->intrs
[i
].tq
, &sc
->intrs
[i
].task
);
2000 LOCK(&sc
->dev_lock
);
2002 /* Delete RX queue in card with flush param */
2005 /* Invalidate any pending cq and eq entries*/
2006 for_all_evnt_queues(sc
, eq
, i
)
2008 for_all_rq_queues(sc
, rq
, i
)
2009 oce_drain_rq_cq(rq
);
2010 for_all_wq_queues(sc
, wq
, i
)
2011 oce_drain_wq_cq(wq
);
2013 /* But still we need to get MCC aync events.
2014 So enable intrs and also arm first EQ
2016 oce_hw_intr_enable(sc
);
2017 oce_arm_eq(sc
, sc
->eq
[0]->eq_id
, 0, TRUE
, FALSE
);
2024 oce_if_activate(POCE_SOFTC sc
)
2031 sc
->ifp
->if_flags
|= IFF_RUNNING
;
2033 oce_hw_intr_disable(sc
);
2037 for_all_rq_queues(sc
, rq
, i
) {
2038 rc
= oce_start_rq(rq
);
2040 device_printf(sc
->dev
, "Unable to start RX\n");
2043 for_all_wq_queues(sc
, wq
, i
) {
2044 rc
= oce_start_wq(wq
);
2046 device_printf(sc
->dev
, "Unable to start TX\n");
2050 for_all_evnt_queues(sc
, eq
, i
)
2051 oce_arm_eq(sc
, eq
->eq_id
, 0, TRUE
, FALSE
);
2053 oce_hw_intr_enable(sc
);
2058 process_link_state(POCE_SOFTC sc
, struct oce_async_cqe_link_state
*acqe
)
2060 /* Update Link status */
2061 if ((acqe
->u0
.s
.link_status
& ~ASYNC_EVENT_LOGICAL
) ==
2062 ASYNC_EVENT_LINK_UP
) {
2063 sc
->link_status
= ASYNC_EVENT_LINK_UP
;
2064 if_link_state_change(sc
->ifp
);
2066 sc
->link_status
= ASYNC_EVENT_LINK_DOWN
;
2067 if_link_state_change(sc
->ifp
);
2071 sc
->link_speed
= acqe
->u0
.s
.speed
;
2072 sc
->qos_link_speed
= (uint32_t) acqe
->u0
.s
.qos_link_speed
* 10;
2077 /* Handle the Completion Queue for the Mailbox/Async notifications */
2079 oce_mq_handler(void *arg
)
2081 struct oce_mq
*mq
= (struct oce_mq
*)arg
;
2082 POCE_SOFTC sc
= mq
->parent
;
2083 struct oce_cq
*cq
= mq
->cq
;
2084 int num_cqes
= 0, evt_type
= 0, optype
= 0;
2085 struct oce_mq_cqe
*cqe
;
2086 struct oce_async_cqe_link_state
*acqe
;
2087 struct oce_async_event_grp5_pvid_state
*gcqe
;
2088 struct oce_async_event_qnq
*dbgcqe
;
2091 bus_dmamap_sync(cq
->ring
->dma
.tag
,
2092 cq
->ring
->dma
.map
, BUS_DMASYNC_POSTWRITE
);
2093 cqe
= RING_GET_CONSUMER_ITEM_VA(cq
->ring
, struct oce_mq_cqe
);
2095 while (cqe
->u0
.dw
[3]) {
2096 DW_SWAP((uint32_t *) cqe
, sizeof(oce_mq_cqe
));
2097 if (cqe
->u0
.s
.async_event
) {
2098 evt_type
= cqe
->u0
.s
.event_type
;
2099 optype
= cqe
->u0
.s
.async_type
;
2100 if (evt_type
== ASYNC_EVENT_CODE_LINK_STATE
) {
2101 /* Link status evt */
2102 acqe
= (struct oce_async_cqe_link_state
*)cqe
;
2103 process_link_state(sc
, acqe
);
2104 } else if ((evt_type
== ASYNC_EVENT_GRP5
) &&
2105 (optype
== ASYNC_EVENT_PVID_STATE
)) {
2108 (struct oce_async_event_grp5_pvid_state
*)cqe
;
2110 sc
->pvid
= gcqe
->tag
& VLAN_VID_MASK
;
2115 else if(evt_type
== ASYNC_EVENT_CODE_DEBUG
&&
2116 optype
== ASYNC_EVENT_DEBUG_QNQ
) {
2118 (struct oce_async_event_qnq
*)cqe
;
2120 sc
->qnqid
= dbgcqe
->vlan_tag
;
2121 sc
->qnq_debug_event
= TRUE
;
2125 RING_GET(cq
->ring
, 1);
2126 bus_dmamap_sync(cq
->ring
->dma
.tag
,
2127 cq
->ring
->dma
.map
, BUS_DMASYNC_POSTWRITE
);
2128 cqe
= RING_GET_CONSUMER_ITEM_VA(cq
->ring
, struct oce_mq_cqe
);
2133 oce_arm_cq(sc
, cq
->cq_id
, num_cqes
, FALSE
);
2140 setup_max_queues_want(POCE_SOFTC sc
)
2142 /* Check if it is FLEX machine. Is so dont use RSS */
2143 if ((sc
->function_mode
& FNM_FLEX10_MODE
) ||
2144 (sc
->function_mode
& FNM_UMC_MODE
) ||
2145 (sc
->function_mode
& FNM_VNIC_MODE
) ||
2146 (!is_rss_enabled(sc
)) ||
2147 (sc
->flags
& OCE_FLAGS_BE2
)) {
2155 update_queues_got(POCE_SOFTC sc
)
2157 if (is_rss_enabled(sc
)) {
2158 sc
->nrqs
= sc
->intr_count
+ 1;
2159 sc
->nwqs
= sc
->intr_count
;
2167 oce_check_ipv6_ext_hdr(struct mbuf
*m
)
2169 struct ether_header
*eh
= mtod(m
, struct ether_header
*);
2170 caddr_t m_datatemp
= m
->m_data
;
2172 if (eh
->ether_type
== htons(ETHERTYPE_IPV6
)) {
2173 m
->m_data
+= sizeof(struct ether_header
);
2174 struct ip6_hdr
*ip6
= mtod(m
, struct ip6_hdr
*);
2176 if((ip6
->ip6_nxt
!= IPPROTO_TCP
) && \
2177 (ip6
->ip6_nxt
!= IPPROTO_UDP
)){
2178 struct ip6_ext
*ip6e
= NULL
;
2179 m
->m_data
+= sizeof(struct ip6_hdr
);
2181 ip6e
= (struct ip6_ext
*) mtod(m
, struct ip6_ext
*);
2182 if(ip6e
->ip6e_len
== 0xff) {
2183 m
->m_data
= m_datatemp
;
2187 m
->m_data
= m_datatemp
;
2193 is_be3_a1(POCE_SOFTC sc
)
2195 if((sc
->flags
& OCE_FLAGS_BE3
) && ((sc
->asic_revision
& 0xFF) < 2)) {
2201 static struct mbuf
*
2202 oce_insert_vlan_tag(POCE_SOFTC sc
, struct mbuf
*m
, boolean_t
*complete
)
2204 uint16_t vlan_tag
= 0;
2209 #if 0 /* XXX swildner: ETHER_VTAG */
2210 /* Embed vlan tag in the packet if it is not part of it */
2211 if(m
->m_flags
& M_VLANTAG
) {
2212 vlan_tag
= EVL_VLANOFTAG(m
->m_pkthdr
.ether_vtag
);
2213 m
->m_flags
&= ~M_VLANTAG
;
2217 /* if UMC, ignore vlan tag insertion and instead insert pvid */
2220 vlan_tag
= sc
->pvid
;
2224 #if 0 /* XXX swildner: ETHER_VTAG */
2226 m
= ether_vlanencap(m
, vlan_tag
);
2230 m
= ether_vlanencap(m
, sc
->qnqid
);
2238 oce_tx_asic_stall_verify(POCE_SOFTC sc
, struct mbuf
*m
)
2240 if(is_be3_a1(sc
) && IS_QNQ_OR_UMC(sc
) && \
2241 oce_check_ipv6_ext_hdr(m
)) {
2248 oce_get_config(POCE_SOFTC sc
)
2251 uint32_t max_rss
= 0;
2253 if ((IS_BE(sc
) || IS_SH(sc
)) && (!sc
->be3_native
))
2254 max_rss
= OCE_LEGACY_MODE_RSS
;
2256 max_rss
= OCE_MAX_RSS
;
2259 rc
= oce_get_func_config(sc
);
2261 sc
->nwqs
= OCE_MAX_WQ
;
2262 sc
->nrssqs
= max_rss
;
2263 sc
->nrqs
= sc
->nrssqs
+ 1;
2267 rc
= oce_get_profile_config(sc
);
2268 sc
->nrssqs
= max_rss
;
2269 sc
->nrqs
= sc
->nrssqs
+ 1;
2271 sc
->nwqs
= OCE_MAX_WQ
;