bna: Brocade 1860 HW Enablement
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / ethernet / brocade / bna / bnad.c
blobd76d7cb0dd0e082a0f25d532f2a9f180cedc60b4
1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
29 #include "bnad.h"
30 #include "bna.h"
31 #include "cna.h"
33 static DEFINE_MUTEX(bnad_fwimg_mutex);
36 * Module params
38 static uint bnad_msix_disable;
39 module_param(bnad_msix_disable, uint, 0444);
40 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
42 static uint bnad_ioc_auto_recover = 1;
43 module_param(bnad_ioc_auto_recover, uint, 0444);
44 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
47 * Global variables
49 u32 bnad_rxqs_per_cq = 2;
51 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
54 * Local MACROS
56 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
58 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
60 #define BNAD_GET_MBOX_IRQ(_bnad) \
61 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
62 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
63 ((_bnad)->pcidev->irq))
65 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
66 do { \
67 (_res_info)->res_type = BNA_RES_T_MEM; \
68 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
69 (_res_info)->res_u.mem_info.num = (_num); \
70 (_res_info)->res_u.mem_info.len = \
71 sizeof(struct bnad_unmap_q) + \
72 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
73 } while (0)
75 #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
78 * Reinitialize completions in CQ, once Rx is taken down
80 static void
81 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
83 struct bna_cq_entry *cmpl, *next_cmpl;
84 unsigned int wi_range, wis = 0, ccb_prod = 0;
85 int i;
87 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
88 wi_range);
90 for (i = 0; i < ccb->q_depth; i++) {
91 wis++;
92 if (likely(--wi_range))
93 next_cmpl = cmpl + 1;
94 else {
95 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
96 wis = 0;
97 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
98 next_cmpl, wi_range);
100 cmpl->valid = 0;
101 cmpl = next_cmpl;
105 static u32
106 bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
107 u32 index, u32 depth, struct sk_buff *skb, u32 frag)
109 int j;
110 array[index].skb = NULL;
112 dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
113 skb_headlen(skb), DMA_TO_DEVICE);
114 dma_unmap_addr_set(&array[index], dma_addr, 0);
115 BNA_QE_INDX_ADD(index, 1, depth);
117 for (j = 0; j < frag; j++) {
118 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
119 skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
120 dma_unmap_addr_set(&array[index], dma_addr, 0);
121 BNA_QE_INDX_ADD(index, 1, depth);
124 return index;
128 * Frees all pending Tx Bufs
129 * At this point no activity is expected on the Q,
130 * so DMA unmap & freeing is fine.
132 static void
133 bnad_free_all_txbufs(struct bnad *bnad,
134 struct bna_tcb *tcb)
136 u32 unmap_cons;
137 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
138 struct bnad_skb_unmap *unmap_array;
139 struct sk_buff *skb = NULL;
140 int q;
142 unmap_array = unmap_q->unmap_array;
144 for (q = 0; q < unmap_q->q_depth; q++) {
145 skb = unmap_array[q].skb;
146 if (!skb)
147 continue;
149 unmap_cons = q;
150 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
151 unmap_cons, unmap_q->q_depth, skb,
152 skb_shinfo(skb)->nr_frags);
154 dev_kfree_skb_any(skb);
158 /* Data Path Handlers */
161 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
162 * Can be called in a) Interrupt context
163 * b) Sending context
164 * c) Tasklet context
166 static u32
167 bnad_free_txbufs(struct bnad *bnad,
168 struct bna_tcb *tcb)
170 u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
171 u16 wis, updated_hw_cons;
172 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
173 struct bnad_skb_unmap *unmap_array;
174 struct sk_buff *skb;
177 * Just return if TX is stopped. This check is useful
178 * when bnad_free_txbufs() runs out of a tasklet scheduled
179 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
180 * but this routine runs actually after the cleanup has been
181 * executed.
183 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
184 return 0;
186 updated_hw_cons = *(tcb->hw_consumer_index);
188 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
189 updated_hw_cons, tcb->q_depth);
191 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
193 unmap_array = unmap_q->unmap_array;
194 unmap_cons = unmap_q->consumer_index;
196 prefetch(&unmap_array[unmap_cons + 1]);
197 while (wis) {
198 skb = unmap_array[unmap_cons].skb;
200 sent_packets++;
201 sent_bytes += skb->len;
202 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
204 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
205 unmap_cons, unmap_q->q_depth, skb,
206 skb_shinfo(skb)->nr_frags);
208 dev_kfree_skb_any(skb);
211 /* Update consumer pointers. */
212 tcb->consumer_index = updated_hw_cons;
213 unmap_q->consumer_index = unmap_cons;
215 tcb->txq->tx_packets += sent_packets;
216 tcb->txq->tx_bytes += sent_bytes;
218 return sent_packets;
221 /* Tx Free Tasklet function */
222 /* Frees for all the tcb's in all the Tx's */
224 * Scheduled from sending context, so that
225 * the fat Tx lock is not held for too long
226 * in the sending context.
228 static void
229 bnad_tx_free_tasklet(unsigned long bnad_ptr)
231 struct bnad *bnad = (struct bnad *)bnad_ptr;
232 struct bna_tcb *tcb;
233 u32 acked = 0;
234 int i, j;
236 for (i = 0; i < bnad->num_tx; i++) {
237 for (j = 0; j < bnad->num_txq_per_tx; j++) {
238 tcb = bnad->tx_info[i].tcb[j];
239 if (!tcb)
240 continue;
241 if (((u16) (*tcb->hw_consumer_index) !=
242 tcb->consumer_index) &&
243 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
244 &tcb->flags))) {
245 acked = bnad_free_txbufs(bnad, tcb);
246 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
247 &tcb->flags)))
248 bna_ib_ack(tcb->i_dbell, acked);
249 smp_mb__before_clear_bit();
250 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
252 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
253 &tcb->flags)))
254 continue;
255 if (netif_queue_stopped(bnad->netdev)) {
256 if (acked && netif_carrier_ok(bnad->netdev) &&
257 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
258 BNAD_NETIF_WAKE_THRESHOLD) {
259 netif_wake_queue(bnad->netdev);
260 /* TODO */
261 /* Counters for individual TxQs? */
262 BNAD_UPDATE_CTR(bnad,
263 netif_queue_wakeup);
270 static u32
271 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
273 struct net_device *netdev = bnad->netdev;
274 u32 sent = 0;
276 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
277 return 0;
279 sent = bnad_free_txbufs(bnad, tcb);
280 if (sent) {
281 if (netif_queue_stopped(netdev) &&
282 netif_carrier_ok(netdev) &&
283 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
284 BNAD_NETIF_WAKE_THRESHOLD) {
285 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
286 netif_wake_queue(netdev);
287 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
292 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
293 bna_ib_ack(tcb->i_dbell, sent);
295 smp_mb__before_clear_bit();
296 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
298 return sent;
301 /* MSIX Tx Completion Handler */
302 static irqreturn_t
303 bnad_msix_tx(int irq, void *data)
305 struct bna_tcb *tcb = (struct bna_tcb *)data;
306 struct bnad *bnad = tcb->bnad;
308 bnad_tx(bnad, tcb);
310 return IRQ_HANDLED;
313 static void
314 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
316 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
318 rcb->producer_index = 0;
319 rcb->consumer_index = 0;
321 unmap_q->producer_index = 0;
322 unmap_q->consumer_index = 0;
325 static void
326 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
328 struct bnad_unmap_q *unmap_q;
329 struct bnad_skb_unmap *unmap_array;
330 struct sk_buff *skb;
331 int unmap_cons;
333 unmap_q = rcb->unmap_q;
334 unmap_array = unmap_q->unmap_array;
335 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
336 skb = unmap_array[unmap_cons].skb;
337 if (!skb)
338 continue;
339 unmap_array[unmap_cons].skb = NULL;
340 dma_unmap_single(&bnad->pcidev->dev,
341 dma_unmap_addr(&unmap_array[unmap_cons],
342 dma_addr),
343 rcb->rxq->buffer_size,
344 DMA_FROM_DEVICE);
345 dev_kfree_skb(skb);
347 bnad_reset_rcb(bnad, rcb);
350 static void
351 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
353 u16 to_alloc, alloced, unmap_prod, wi_range;
354 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
355 struct bnad_skb_unmap *unmap_array;
356 struct bna_rxq_entry *rxent;
357 struct sk_buff *skb;
358 dma_addr_t dma_addr;
360 alloced = 0;
361 to_alloc =
362 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
364 unmap_array = unmap_q->unmap_array;
365 unmap_prod = unmap_q->producer_index;
367 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
369 while (to_alloc--) {
370 if (!wi_range)
371 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
372 wi_range);
373 skb = netdev_alloc_skb_ip_align(bnad->netdev,
374 rcb->rxq->buffer_size);
375 if (unlikely(!skb)) {
376 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
377 rcb->rxq->rxbuf_alloc_failed++;
378 goto finishing;
380 unmap_array[unmap_prod].skb = skb;
381 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
382 rcb->rxq->buffer_size,
383 DMA_FROM_DEVICE);
384 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
385 dma_addr);
386 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
387 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
389 rxent++;
390 wi_range--;
391 alloced++;
394 finishing:
395 if (likely(alloced)) {
396 unmap_q->producer_index = unmap_prod;
397 rcb->producer_index = unmap_prod;
398 smp_mb();
399 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
400 bna_rxq_prod_indx_doorbell(rcb);
404 static inline void
405 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
407 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
409 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
410 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
411 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
412 bnad_alloc_n_post_rxbufs(bnad, rcb);
413 smp_mb__before_clear_bit();
414 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
418 static u32
419 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
421 struct bna_cq_entry *cmpl, *next_cmpl;
422 struct bna_rcb *rcb = NULL;
423 unsigned int wi_range, packets = 0, wis = 0;
424 struct bnad_unmap_q *unmap_q;
425 struct bnad_skb_unmap *unmap_array;
426 struct sk_buff *skb;
427 u32 flags, unmap_cons;
428 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
429 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
431 set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
433 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
434 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
435 return 0;
438 prefetch(bnad->netdev);
439 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
440 wi_range);
441 BUG_ON(!(wi_range <= ccb->q_depth));
442 while (cmpl->valid && packets < budget) {
443 packets++;
444 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
446 if (bna_is_small_rxq(cmpl->rxq_id))
447 rcb = ccb->rcb[1];
448 else
449 rcb = ccb->rcb[0];
451 unmap_q = rcb->unmap_q;
452 unmap_array = unmap_q->unmap_array;
453 unmap_cons = unmap_q->consumer_index;
455 skb = unmap_array[unmap_cons].skb;
456 BUG_ON(!(skb));
457 unmap_array[unmap_cons].skb = NULL;
458 dma_unmap_single(&bnad->pcidev->dev,
459 dma_unmap_addr(&unmap_array[unmap_cons],
460 dma_addr),
461 rcb->rxq->buffer_size,
462 DMA_FROM_DEVICE);
463 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
465 /* Should be more efficient ? Performance ? */
466 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
468 wis++;
469 if (likely(--wi_range))
470 next_cmpl = cmpl + 1;
471 else {
472 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
473 wis = 0;
474 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
475 next_cmpl, wi_range);
476 BUG_ON(!(wi_range <= ccb->q_depth));
478 prefetch(next_cmpl);
480 flags = ntohl(cmpl->flags);
481 if (unlikely
482 (flags &
483 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
484 BNA_CQ_EF_TOO_LONG))) {
485 dev_kfree_skb_any(skb);
486 rcb->rxq->rx_packets_with_error++;
487 goto next;
490 skb_put(skb, ntohs(cmpl->length));
491 if (likely
492 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
493 (((flags & BNA_CQ_EF_IPV4) &&
494 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
495 (flags & BNA_CQ_EF_IPV6)) &&
496 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
497 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
498 skb->ip_summed = CHECKSUM_UNNECESSARY;
499 else
500 skb_checksum_none_assert(skb);
502 rcb->rxq->rx_packets++;
503 rcb->rxq->rx_bytes += skb->len;
504 skb->protocol = eth_type_trans(skb, bnad->netdev);
506 if (flags & BNA_CQ_EF_VLAN)
507 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
509 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
510 napi_gro_receive(&rx_ctrl->napi, skb);
511 else {
512 netif_receive_skb(skb);
515 next:
516 cmpl->valid = 0;
517 cmpl = next_cmpl;
520 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
522 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
523 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
525 bnad_refill_rxq(bnad, ccb->rcb[0]);
526 if (ccb->rcb[1])
527 bnad_refill_rxq(bnad, ccb->rcb[1]);
529 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
531 return packets;
534 static void
535 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
537 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
538 struct napi_struct *napi = &rx_ctrl->napi;
540 if (likely(napi_schedule_prep(napi))) {
541 __napi_schedule(napi);
542 rx_ctrl->rx_schedule++;
546 /* MSIX Rx Path Handler */
547 static irqreturn_t
548 bnad_msix_rx(int irq, void *data)
550 struct bna_ccb *ccb = (struct bna_ccb *)data;
552 if (ccb) {
553 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
554 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
557 return IRQ_HANDLED;
560 /* Interrupt handlers */
562 /* Mbox Interrupt Handlers */
563 static irqreturn_t
564 bnad_msix_mbox_handler(int irq, void *data)
566 u32 intr_status;
567 unsigned long flags;
568 struct bnad *bnad = (struct bnad *)data;
570 spin_lock_irqsave(&bnad->bna_lock, flags);
571 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
572 spin_unlock_irqrestore(&bnad->bna_lock, flags);
573 return IRQ_HANDLED;
576 bna_intr_status_get(&bnad->bna, intr_status);
578 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
579 bna_mbox_handler(&bnad->bna, intr_status);
581 spin_unlock_irqrestore(&bnad->bna_lock, flags);
583 return IRQ_HANDLED;
586 static irqreturn_t
587 bnad_isr(int irq, void *data)
589 int i, j;
590 u32 intr_status;
591 unsigned long flags;
592 struct bnad *bnad = (struct bnad *)data;
593 struct bnad_rx_info *rx_info;
594 struct bnad_rx_ctrl *rx_ctrl;
595 struct bna_tcb *tcb = NULL;
597 spin_lock_irqsave(&bnad->bna_lock, flags);
598 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
599 spin_unlock_irqrestore(&bnad->bna_lock, flags);
600 return IRQ_NONE;
603 bna_intr_status_get(&bnad->bna, intr_status);
605 if (unlikely(!intr_status)) {
606 spin_unlock_irqrestore(&bnad->bna_lock, flags);
607 return IRQ_NONE;
610 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
611 bna_mbox_handler(&bnad->bna, intr_status);
613 spin_unlock_irqrestore(&bnad->bna_lock, flags);
615 if (!BNA_IS_INTX_DATA_INTR(intr_status))
616 return IRQ_HANDLED;
618 /* Process data interrupts */
619 /* Tx processing */
620 for (i = 0; i < bnad->num_tx; i++) {
621 for (j = 0; j < bnad->num_txq_per_tx; j++) {
622 tcb = bnad->tx_info[i].tcb[j];
623 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
624 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
627 /* Rx processing */
628 for (i = 0; i < bnad->num_rx; i++) {
629 rx_info = &bnad->rx_info[i];
630 if (!rx_info->rx)
631 continue;
632 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
633 rx_ctrl = &rx_info->rx_ctrl[j];
634 if (rx_ctrl->ccb)
635 bnad_netif_rx_schedule_poll(bnad,
636 rx_ctrl->ccb);
639 return IRQ_HANDLED;
643 * Called in interrupt / callback context
644 * with bna_lock held, so cfg_flags access is OK
646 static void
647 bnad_enable_mbox_irq(struct bnad *bnad)
649 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
651 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
655 * Called with bnad->bna_lock held b'cos of
656 * bnad->cfg_flags access.
658 static void
659 bnad_disable_mbox_irq(struct bnad *bnad)
661 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
663 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
666 static void
667 bnad_set_netdev_perm_addr(struct bnad *bnad)
669 struct net_device *netdev = bnad->netdev;
671 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
672 if (is_zero_ether_addr(netdev->dev_addr))
673 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
676 /* Control Path Handlers */
678 /* Callbacks */
679 void
680 bnad_cb_mbox_intr_enable(struct bnad *bnad)
682 bnad_enable_mbox_irq(bnad);
685 void
686 bnad_cb_mbox_intr_disable(struct bnad *bnad)
688 bnad_disable_mbox_irq(bnad);
691 void
692 bnad_cb_ioceth_ready(struct bnad *bnad)
694 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
695 complete(&bnad->bnad_completions.ioc_comp);
698 void
699 bnad_cb_ioceth_failed(struct bnad *bnad)
701 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
702 complete(&bnad->bnad_completions.ioc_comp);
705 void
706 bnad_cb_ioceth_disabled(struct bnad *bnad)
708 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
709 complete(&bnad->bnad_completions.ioc_comp);
712 static void
713 bnad_cb_enet_disabled(void *arg)
715 struct bnad *bnad = (struct bnad *)arg;
717 netif_carrier_off(bnad->netdev);
718 complete(&bnad->bnad_completions.enet_comp);
721 void
722 bnad_cb_ethport_link_status(struct bnad *bnad,
723 enum bna_link_status link_status)
725 bool link_up = 0;
727 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
729 if (link_status == BNA_CEE_UP) {
730 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
731 BNAD_UPDATE_CTR(bnad, cee_toggle);
732 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
733 } else {
734 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
735 BNAD_UPDATE_CTR(bnad, cee_toggle);
736 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
739 if (link_up) {
740 if (!netif_carrier_ok(bnad->netdev)) {
741 uint tx_id, tcb_id;
742 printk(KERN_WARNING "bna: %s link up\n",
743 bnad->netdev->name);
744 netif_carrier_on(bnad->netdev);
745 BNAD_UPDATE_CTR(bnad, link_toggle);
746 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
747 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
748 tcb_id++) {
749 struct bna_tcb *tcb =
750 bnad->tx_info[tx_id].tcb[tcb_id];
751 u32 txq_id;
752 if (!tcb)
753 continue;
755 txq_id = tcb->id;
757 if (test_bit(BNAD_TXQ_TX_STARTED,
758 &tcb->flags)) {
760 * Force an immediate
761 * Transmit Schedule */
762 printk(KERN_INFO "bna: %s %d "
763 "TXQ_STARTED\n",
764 bnad->netdev->name,
765 txq_id);
766 netif_wake_subqueue(
767 bnad->netdev,
768 txq_id);
769 BNAD_UPDATE_CTR(bnad,
770 netif_queue_wakeup);
771 } else {
772 netif_stop_subqueue(
773 bnad->netdev,
774 txq_id);
775 BNAD_UPDATE_CTR(bnad,
776 netif_queue_stop);
781 } else {
782 if (netif_carrier_ok(bnad->netdev)) {
783 printk(KERN_WARNING "bna: %s link down\n",
784 bnad->netdev->name);
785 netif_carrier_off(bnad->netdev);
786 BNAD_UPDATE_CTR(bnad, link_toggle);
791 static void
792 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
794 struct bnad *bnad = (struct bnad *)arg;
796 complete(&bnad->bnad_completions.tx_comp);
799 static void
800 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
802 struct bnad_tx_info *tx_info =
803 (struct bnad_tx_info *)tcb->txq->tx->priv;
804 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
806 tx_info->tcb[tcb->id] = tcb;
807 unmap_q->producer_index = 0;
808 unmap_q->consumer_index = 0;
809 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
812 static void
813 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
815 struct bnad_tx_info *tx_info =
816 (struct bnad_tx_info *)tcb->txq->tx->priv;
817 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
819 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
820 cpu_relax();
822 bnad_free_all_txbufs(bnad, tcb);
824 unmap_q->producer_index = 0;
825 unmap_q->consumer_index = 0;
827 smp_mb__before_clear_bit();
828 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
830 tx_info->tcb[tcb->id] = NULL;
833 static void
834 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
836 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
838 unmap_q->producer_index = 0;
839 unmap_q->consumer_index = 0;
840 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
843 static void
844 bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
846 bnad_free_all_rxbufs(bnad, rcb);
849 static void
850 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
852 struct bnad_rx_info *rx_info =
853 (struct bnad_rx_info *)ccb->cq->rx->priv;
855 rx_info->rx_ctrl[ccb->id].ccb = ccb;
856 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
859 static void
860 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
862 struct bnad_rx_info *rx_info =
863 (struct bnad_rx_info *)ccb->cq->rx->priv;
865 rx_info->rx_ctrl[ccb->id].ccb = NULL;
868 static void
869 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
871 struct bnad_tx_info *tx_info =
872 (struct bnad_tx_info *)tx->priv;
873 struct bna_tcb *tcb;
874 u32 txq_id;
875 int i;
877 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
878 tcb = tx_info->tcb[i];
879 if (!tcb)
880 continue;
881 txq_id = tcb->id;
882 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
883 netif_stop_subqueue(bnad->netdev, txq_id);
884 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
885 bnad->netdev->name, txq_id);
889 static void
890 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
892 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
893 struct bna_tcb *tcb;
894 struct bnad_unmap_q *unmap_q;
895 u32 txq_id;
896 int i;
898 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
899 tcb = tx_info->tcb[i];
900 if (!tcb)
901 continue;
902 txq_id = tcb->id;
904 unmap_q = tcb->unmap_q;
906 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
907 continue;
909 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
910 cpu_relax();
912 bnad_free_all_txbufs(bnad, tcb);
914 unmap_q->producer_index = 0;
915 unmap_q->consumer_index = 0;
917 smp_mb__before_clear_bit();
918 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
920 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
922 if (netif_carrier_ok(bnad->netdev)) {
923 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
924 bnad->netdev->name, txq_id);
925 netif_wake_subqueue(bnad->netdev, txq_id);
926 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
931 * Workaround for first ioceth enable failure & we
932 * get a 0 MAC address. We try to get the MAC address
933 * again here.
935 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
936 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
937 bnad_set_netdev_perm_addr(bnad);
941 static void
942 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
944 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
945 struct bna_tcb *tcb;
946 int i;
948 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
949 tcb = tx_info->tcb[i];
950 if (!tcb)
951 continue;
954 mdelay(BNAD_TXRX_SYNC_MDELAY);
955 bna_tx_cleanup_complete(tx);
958 static void
959 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
961 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
962 struct bna_ccb *ccb;
963 struct bnad_rx_ctrl *rx_ctrl;
964 int i;
966 mdelay(BNAD_TXRX_SYNC_MDELAY);
968 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
969 rx_ctrl = &rx_info->rx_ctrl[i];
970 ccb = rx_ctrl->ccb;
971 if (!ccb)
972 continue;
974 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
976 if (ccb->rcb[1])
977 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
979 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
980 cpu_relax();
983 bna_rx_cleanup_complete(rx);
986 static void
987 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
989 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
990 struct bna_ccb *ccb;
991 struct bna_rcb *rcb;
992 struct bnad_rx_ctrl *rx_ctrl;
993 struct bnad_unmap_q *unmap_q;
994 int i;
995 int j;
997 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
998 rx_ctrl = &rx_info->rx_ctrl[i];
999 ccb = rx_ctrl->ccb;
1000 if (!ccb)
1001 continue;
1003 bnad_cq_cmpl_init(bnad, ccb);
1005 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1006 rcb = ccb->rcb[j];
1007 if (!rcb)
1008 continue;
1009 bnad_free_all_rxbufs(bnad, rcb);
1011 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1012 unmap_q = rcb->unmap_q;
1014 /* Now allocate & post buffers for this RCB */
1015 /* !!Allocation in callback context */
1016 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1017 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1018 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1019 bnad_alloc_n_post_rxbufs(bnad, rcb);
1020 smp_mb__before_clear_bit();
1021 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1027 static void
1028 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1030 struct bnad *bnad = (struct bnad *)arg;
1032 complete(&bnad->bnad_completions.rx_comp);
1035 static void
1036 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1038 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1039 complete(&bnad->bnad_completions.mcast_comp);
1042 void
1043 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1044 struct bna_stats *stats)
1046 if (status == BNA_CB_SUCCESS)
1047 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1049 if (!netif_running(bnad->netdev) ||
1050 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1051 return;
1053 mod_timer(&bnad->stats_timer,
1054 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1057 static void
1058 bnad_cb_enet_mtu_set(struct bnad *bnad)
1060 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1061 complete(&bnad->bnad_completions.mtu_comp);
1064 /* Resource allocation, free functions */
1066 static void
1067 bnad_mem_free(struct bnad *bnad,
1068 struct bna_mem_info *mem_info)
1070 int i;
1071 dma_addr_t dma_pa;
1073 if (mem_info->mdl == NULL)
1074 return;
1076 for (i = 0; i < mem_info->num; i++) {
1077 if (mem_info->mdl[i].kva != NULL) {
1078 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1079 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1080 dma_pa);
1081 dma_free_coherent(&bnad->pcidev->dev,
1082 mem_info->mdl[i].len,
1083 mem_info->mdl[i].kva, dma_pa);
1084 } else
1085 kfree(mem_info->mdl[i].kva);
1088 kfree(mem_info->mdl);
1089 mem_info->mdl = NULL;
1092 static int
1093 bnad_mem_alloc(struct bnad *bnad,
1094 struct bna_mem_info *mem_info)
1096 int i;
1097 dma_addr_t dma_pa;
1099 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1100 mem_info->mdl = NULL;
1101 return 0;
1104 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1105 GFP_KERNEL);
1106 if (mem_info->mdl == NULL)
1107 return -ENOMEM;
1109 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1110 for (i = 0; i < mem_info->num; i++) {
1111 mem_info->mdl[i].len = mem_info->len;
1112 mem_info->mdl[i].kva =
1113 dma_alloc_coherent(&bnad->pcidev->dev,
1114 mem_info->len, &dma_pa,
1115 GFP_KERNEL);
1117 if (mem_info->mdl[i].kva == NULL)
1118 goto err_return;
1120 BNA_SET_DMA_ADDR(dma_pa,
1121 &(mem_info->mdl[i].dma));
1123 } else {
1124 for (i = 0; i < mem_info->num; i++) {
1125 mem_info->mdl[i].len = mem_info->len;
1126 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1127 GFP_KERNEL);
1128 if (mem_info->mdl[i].kva == NULL)
1129 goto err_return;
1133 return 0;
1135 err_return:
1136 bnad_mem_free(bnad, mem_info);
1137 return -ENOMEM;
1140 /* Free IRQ for Mailbox */
1141 static void
1142 bnad_mbox_irq_free(struct bnad *bnad)
1144 int irq;
1145 unsigned long flags;
1147 spin_lock_irqsave(&bnad->bna_lock, flags);
1148 bnad_disable_mbox_irq(bnad);
1149 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1151 irq = BNAD_GET_MBOX_IRQ(bnad);
1152 free_irq(irq, bnad);
1156 * Allocates IRQ for Mailbox, but keep it disabled
1157 * This will be enabled once we get the mbox enable callback
1158 * from bna
1160 static int
1161 bnad_mbox_irq_alloc(struct bnad *bnad)
1163 int err = 0;
1164 unsigned long irq_flags, flags;
1165 u32 irq;
1166 irq_handler_t irq_handler;
1168 spin_lock_irqsave(&bnad->bna_lock, flags);
1169 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1170 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1171 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1172 irq_flags = 0;
1173 } else {
1174 irq_handler = (irq_handler_t)bnad_isr;
1175 irq = bnad->pcidev->irq;
1176 irq_flags = IRQF_SHARED;
1179 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1180 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1183 * Set the Mbox IRQ disable flag, so that the IRQ handler
1184 * called from request_irq() for SHARED IRQs do not execute
1186 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1188 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1190 err = request_irq(irq, irq_handler, irq_flags,
1191 bnad->mbox_irq_name, bnad);
1193 return err;
1196 static void
1197 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1199 kfree(intr_info->idl);
1200 intr_info->idl = NULL;
1203 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1204 static int
1205 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1206 u32 txrx_id, struct bna_intr_info *intr_info)
1208 int i, vector_start = 0;
1209 u32 cfg_flags;
1210 unsigned long flags;
1212 spin_lock_irqsave(&bnad->bna_lock, flags);
1213 cfg_flags = bnad->cfg_flags;
1214 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1216 if (cfg_flags & BNAD_CF_MSIX) {
1217 intr_info->intr_type = BNA_INTR_T_MSIX;
1218 intr_info->idl = kcalloc(intr_info->num,
1219 sizeof(struct bna_intr_descr),
1220 GFP_KERNEL);
1221 if (!intr_info->idl)
1222 return -ENOMEM;
1224 switch (src) {
1225 case BNAD_INTR_TX:
1226 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1227 break;
1229 case BNAD_INTR_RX:
1230 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1231 (bnad->num_tx * bnad->num_txq_per_tx) +
1232 txrx_id;
1233 break;
1235 default:
1236 BUG();
1239 for (i = 0; i < intr_info->num; i++)
1240 intr_info->idl[i].vector = vector_start + i;
1241 } else {
1242 intr_info->intr_type = BNA_INTR_T_INTX;
1243 intr_info->num = 1;
1244 intr_info->idl = kcalloc(intr_info->num,
1245 sizeof(struct bna_intr_descr),
1246 GFP_KERNEL);
1247 if (!intr_info->idl)
1248 return -ENOMEM;
1250 switch (src) {
1251 case BNAD_INTR_TX:
1252 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1253 break;
1255 case BNAD_INTR_RX:
1256 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1257 break;
1260 return 0;
1264 * NOTE: Should be called for MSIX only
1265 * Unregisters Tx MSIX vector(s) from the kernel
1267 static void
1268 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1269 int num_txqs)
1271 int i;
1272 int vector_num;
1274 for (i = 0; i < num_txqs; i++) {
1275 if (tx_info->tcb[i] == NULL)
1276 continue;
1278 vector_num = tx_info->tcb[i]->intr_vector;
1279 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1284 * NOTE: Should be called for MSIX only
1285 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1287 static int
1288 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1289 u32 tx_id, int num_txqs)
1291 int i;
1292 int err;
1293 int vector_num;
1295 for (i = 0; i < num_txqs; i++) {
1296 vector_num = tx_info->tcb[i]->intr_vector;
1297 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1298 tx_id + tx_info->tcb[i]->id);
1299 err = request_irq(bnad->msix_table[vector_num].vector,
1300 (irq_handler_t)bnad_msix_tx, 0,
1301 tx_info->tcb[i]->name,
1302 tx_info->tcb[i]);
1303 if (err)
1304 goto err_return;
1307 return 0;
1309 err_return:
1310 if (i > 0)
1311 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1312 return -1;
1316 * NOTE: Should be called for MSIX only
1317 * Unregisters Rx MSIX vector(s) from the kernel
1319 static void
1320 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1321 int num_rxps)
1323 int i;
1324 int vector_num;
1326 for (i = 0; i < num_rxps; i++) {
1327 if (rx_info->rx_ctrl[i].ccb == NULL)
1328 continue;
1330 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1331 free_irq(bnad->msix_table[vector_num].vector,
1332 rx_info->rx_ctrl[i].ccb);
1337 * NOTE: Should be called for MSIX only
1338 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1340 static int
1341 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1342 u32 rx_id, int num_rxps)
1344 int i;
1345 int err;
1346 int vector_num;
1348 for (i = 0; i < num_rxps; i++) {
1349 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1350 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1351 bnad->netdev->name,
1352 rx_id + rx_info->rx_ctrl[i].ccb->id);
1353 err = request_irq(bnad->msix_table[vector_num].vector,
1354 (irq_handler_t)bnad_msix_rx, 0,
1355 rx_info->rx_ctrl[i].ccb->name,
1356 rx_info->rx_ctrl[i].ccb);
1357 if (err)
1358 goto err_return;
1361 return 0;
1363 err_return:
1364 if (i > 0)
1365 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1366 return -1;
1369 /* Free Tx object Resources */
1370 static void
1371 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1373 int i;
1375 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1376 if (res_info[i].res_type == BNA_RES_T_MEM)
1377 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1378 else if (res_info[i].res_type == BNA_RES_T_INTR)
1379 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1383 /* Allocates memory and interrupt resources for Tx object */
1384 static int
1385 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1386 u32 tx_id)
1388 int i, err = 0;
1390 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1391 if (res_info[i].res_type == BNA_RES_T_MEM)
1392 err = bnad_mem_alloc(bnad,
1393 &res_info[i].res_u.mem_info);
1394 else if (res_info[i].res_type == BNA_RES_T_INTR)
1395 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1396 &res_info[i].res_u.intr_info);
1397 if (err)
1398 goto err_return;
1400 return 0;
1402 err_return:
1403 bnad_tx_res_free(bnad, res_info);
1404 return err;
1407 /* Free Rx object Resources */
1408 static void
1409 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1411 int i;
1413 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1414 if (res_info[i].res_type == BNA_RES_T_MEM)
1415 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1416 else if (res_info[i].res_type == BNA_RES_T_INTR)
1417 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1421 /* Allocates memory and interrupt resources for Rx object */
1422 static int
1423 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1424 uint rx_id)
1426 int i, err = 0;
1428 /* All memory needs to be allocated before setup_ccbs */
1429 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1430 if (res_info[i].res_type == BNA_RES_T_MEM)
1431 err = bnad_mem_alloc(bnad,
1432 &res_info[i].res_u.mem_info);
1433 else if (res_info[i].res_type == BNA_RES_T_INTR)
1434 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1435 &res_info[i].res_u.intr_info);
1436 if (err)
1437 goto err_return;
1439 return 0;
1441 err_return:
1442 bnad_rx_res_free(bnad, res_info);
1443 return err;
1446 /* Timer callbacks */
1447 /* a) IOC timer */
1448 static void
1449 bnad_ioc_timeout(unsigned long data)
1451 struct bnad *bnad = (struct bnad *)data;
1452 unsigned long flags;
1454 spin_lock_irqsave(&bnad->bna_lock, flags);
1455 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1456 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1459 static void
1460 bnad_ioc_hb_check(unsigned long data)
1462 struct bnad *bnad = (struct bnad *)data;
1463 unsigned long flags;
1465 spin_lock_irqsave(&bnad->bna_lock, flags);
1466 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1467 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1470 static void
1471 bnad_iocpf_timeout(unsigned long data)
1473 struct bnad *bnad = (struct bnad *)data;
1474 unsigned long flags;
1476 spin_lock_irqsave(&bnad->bna_lock, flags);
1477 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1478 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1481 static void
1482 bnad_iocpf_sem_timeout(unsigned long data)
1484 struct bnad *bnad = (struct bnad *)data;
1485 unsigned long flags;
1487 spin_lock_irqsave(&bnad->bna_lock, flags);
1488 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1489 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1493 * All timer routines use bnad->bna_lock to protect against
1494 * the following race, which may occur in case of no locking:
1495 * Time CPU m CPU n
1496 * 0 1 = test_bit
1497 * 1 clear_bit
1498 * 2 del_timer_sync
1499 * 3 mod_timer
1502 /* b) Dynamic Interrupt Moderation Timer */
1503 static void
1504 bnad_dim_timeout(unsigned long data)
1506 struct bnad *bnad = (struct bnad *)data;
1507 struct bnad_rx_info *rx_info;
1508 struct bnad_rx_ctrl *rx_ctrl;
1509 int i, j;
1510 unsigned long flags;
1512 if (!netif_carrier_ok(bnad->netdev))
1513 return;
1515 spin_lock_irqsave(&bnad->bna_lock, flags);
1516 for (i = 0; i < bnad->num_rx; i++) {
1517 rx_info = &bnad->rx_info[i];
1518 if (!rx_info->rx)
1519 continue;
1520 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1521 rx_ctrl = &rx_info->rx_ctrl[j];
1522 if (!rx_ctrl->ccb)
1523 continue;
1524 bna_rx_dim_update(rx_ctrl->ccb);
1528 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1529 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1530 mod_timer(&bnad->dim_timer,
1531 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1532 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1535 /* c) Statistics Timer */
1536 static void
1537 bnad_stats_timeout(unsigned long data)
1539 struct bnad *bnad = (struct bnad *)data;
1540 unsigned long flags;
1542 if (!netif_running(bnad->netdev) ||
1543 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1544 return;
1546 spin_lock_irqsave(&bnad->bna_lock, flags);
1547 bna_hw_stats_get(&bnad->bna);
1548 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1552 * Set up timer for DIM
1553 * Called with bnad->bna_lock held
1555 void
1556 bnad_dim_timer_start(struct bnad *bnad)
1558 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1559 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1560 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1561 (unsigned long)bnad);
1562 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1563 mod_timer(&bnad->dim_timer,
1564 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1569 * Set up timer for statistics
1570 * Called with mutex_lock(&bnad->conf_mutex) held
1572 static void
1573 bnad_stats_timer_start(struct bnad *bnad)
1575 unsigned long flags;
1577 spin_lock_irqsave(&bnad->bna_lock, flags);
1578 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1579 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1580 (unsigned long)bnad);
1581 mod_timer(&bnad->stats_timer,
1582 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1584 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1588 * Stops the stats timer
1589 * Called with mutex_lock(&bnad->conf_mutex) held
1591 static void
1592 bnad_stats_timer_stop(struct bnad *bnad)
1594 int to_del = 0;
1595 unsigned long flags;
1597 spin_lock_irqsave(&bnad->bna_lock, flags);
1598 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1599 to_del = 1;
1600 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1601 if (to_del)
1602 del_timer_sync(&bnad->stats_timer);
1605 /* Utilities */
1607 static void
1608 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1610 int i = 1; /* Index 0 has broadcast address */
1611 struct netdev_hw_addr *mc_addr;
1613 netdev_for_each_mc_addr(mc_addr, netdev) {
1614 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1615 ETH_ALEN);
1616 i++;
1620 static int
1621 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1623 struct bnad_rx_ctrl *rx_ctrl =
1624 container_of(napi, struct bnad_rx_ctrl, napi);
1625 struct bnad *bnad = rx_ctrl->bnad;
1626 int rcvd = 0;
1628 rx_ctrl->rx_poll_ctr++;
1630 if (!netif_carrier_ok(bnad->netdev))
1631 goto poll_exit;
1633 rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
1634 if (rcvd >= budget)
1635 return rcvd;
1637 poll_exit:
1638 napi_complete(napi);
1640 rx_ctrl->rx_complete++;
1642 if (rx_ctrl->ccb)
1643 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1645 return rcvd;
1648 #define BNAD_NAPI_POLL_QUOTA 64
1649 static void
1650 bnad_napi_init(struct bnad *bnad, u32 rx_id)
1652 struct bnad_rx_ctrl *rx_ctrl;
1653 int i;
1655 /* Initialize & enable NAPI */
1656 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1657 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1658 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1659 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1663 static void
1664 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1666 struct bnad_rx_ctrl *rx_ctrl;
1667 int i;
1669 /* Initialize & enable NAPI */
1670 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1671 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1673 napi_enable(&rx_ctrl->napi);
1677 static void
1678 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1680 int i;
1682 /* First disable and then clean up */
1683 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1684 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1685 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1689 /* Should be held with conf_lock held */
1690 void
1691 bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
1693 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1694 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1695 unsigned long flags;
1697 if (!tx_info->tx)
1698 return;
1700 init_completion(&bnad->bnad_completions.tx_comp);
1701 spin_lock_irqsave(&bnad->bna_lock, flags);
1702 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1703 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1704 wait_for_completion(&bnad->bnad_completions.tx_comp);
1706 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1707 bnad_tx_msix_unregister(bnad, tx_info,
1708 bnad->num_txq_per_tx);
1710 if (0 == tx_id)
1711 tasklet_kill(&bnad->tx_free_tasklet);
1713 spin_lock_irqsave(&bnad->bna_lock, flags);
1714 bna_tx_destroy(tx_info->tx);
1715 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1717 tx_info->tx = NULL;
1718 tx_info->tx_id = 0;
1720 bnad_tx_res_free(bnad, res_info);
1723 /* Should be held with conf_lock held */
1725 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1727 int err;
1728 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1729 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1730 struct bna_intr_info *intr_info =
1731 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1732 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1733 static const struct bna_tx_event_cbfn tx_cbfn = {
1734 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1735 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1736 .tx_stall_cbfn = bnad_cb_tx_stall,
1737 .tx_resume_cbfn = bnad_cb_tx_resume,
1738 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1741 struct bna_tx *tx;
1742 unsigned long flags;
1744 tx_info->tx_id = tx_id;
1746 /* Initialize the Tx object configuration */
1747 tx_config->num_txq = bnad->num_txq_per_tx;
1748 tx_config->txq_depth = bnad->txq_depth;
1749 tx_config->tx_type = BNA_TX_T_REGULAR;
1750 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1752 /* Get BNA's resource requirement for one tx object */
1753 spin_lock_irqsave(&bnad->bna_lock, flags);
1754 bna_tx_res_req(bnad->num_txq_per_tx,
1755 bnad->txq_depth, res_info);
1756 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1758 /* Fill Unmap Q memory requirements */
1759 BNAD_FILL_UNMAPQ_MEM_REQ(
1760 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1761 bnad->num_txq_per_tx,
1762 BNAD_TX_UNMAPQ_DEPTH);
1764 /* Allocate resources */
1765 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1766 if (err)
1767 return err;
1769 /* Ask BNA to create one Tx object, supplying required resources */
1770 spin_lock_irqsave(&bnad->bna_lock, flags);
1771 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1772 tx_info);
1773 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1774 if (!tx)
1775 goto err_return;
1776 tx_info->tx = tx;
1778 /* Register ISR for the Tx object */
1779 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1780 err = bnad_tx_msix_register(bnad, tx_info,
1781 tx_id, bnad->num_txq_per_tx);
1782 if (err)
1783 goto err_return;
1786 spin_lock_irqsave(&bnad->bna_lock, flags);
1787 bna_tx_enable(tx);
1788 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1790 return 0;
1792 err_return:
1793 bnad_tx_res_free(bnad, res_info);
1794 return err;
1797 /* Setup the rx config for bna_rx_create */
1798 /* bnad decides the configuration */
1799 static void
1800 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1802 rx_config->rx_type = BNA_RX_T_REGULAR;
1803 rx_config->num_paths = bnad->num_rxp_per_rx;
1804 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
1806 if (bnad->num_rxp_per_rx > 1) {
1807 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1808 rx_config->rss_config.hash_type =
1809 (BFI_ENET_RSS_IPV6 |
1810 BFI_ENET_RSS_IPV6_TCP |
1811 BFI_ENET_RSS_IPV4 |
1812 BFI_ENET_RSS_IPV4_TCP);
1813 rx_config->rss_config.hash_mask =
1814 bnad->num_rxp_per_rx - 1;
1815 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1816 sizeof(rx_config->rss_config.toeplitz_hash_key));
1817 } else {
1818 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1819 memset(&rx_config->rss_config, 0,
1820 sizeof(rx_config->rss_config));
1822 rx_config->rxp_type = BNA_RXP_SLR;
1823 rx_config->q_depth = bnad->rxq_depth;
1825 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1827 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1830 static void
1831 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1833 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1834 int i;
1836 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1837 rx_info->rx_ctrl[i].bnad = bnad;
1840 /* Called with mutex_lock(&bnad->conf_mutex) held */
1841 void
1842 bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
1844 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1845 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1846 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1847 unsigned long flags;
1848 int to_del = 0;
1850 if (!rx_info->rx)
1851 return;
1853 if (0 == rx_id) {
1854 spin_lock_irqsave(&bnad->bna_lock, flags);
1855 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1856 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1857 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1858 to_del = 1;
1860 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1861 if (to_del)
1862 del_timer_sync(&bnad->dim_timer);
1865 init_completion(&bnad->bnad_completions.rx_comp);
1866 spin_lock_irqsave(&bnad->bna_lock, flags);
1867 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1868 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1869 wait_for_completion(&bnad->bnad_completions.rx_comp);
1871 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1872 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1874 bnad_napi_disable(bnad, rx_id);
1876 spin_lock_irqsave(&bnad->bna_lock, flags);
1877 bna_rx_destroy(rx_info->rx);
1879 rx_info->rx = NULL;
1880 rx_info->rx_id = 0;
1881 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1883 bnad_rx_res_free(bnad, res_info);
1886 /* Called with mutex_lock(&bnad->conf_mutex) held */
1888 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1890 int err;
1891 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1892 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1893 struct bna_intr_info *intr_info =
1894 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1895 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1896 static const struct bna_rx_event_cbfn rx_cbfn = {
1897 .rcb_setup_cbfn = bnad_cb_rcb_setup,
1898 .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
1899 .ccb_setup_cbfn = bnad_cb_ccb_setup,
1900 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
1901 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
1902 .rx_post_cbfn = bnad_cb_rx_post,
1904 struct bna_rx *rx;
1905 unsigned long flags;
1907 rx_info->rx_id = rx_id;
1909 /* Initialize the Rx object configuration */
1910 bnad_init_rx_config(bnad, rx_config);
1912 /* Get BNA's resource requirement for one Rx object */
1913 spin_lock_irqsave(&bnad->bna_lock, flags);
1914 bna_rx_res_req(rx_config, res_info);
1915 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1917 /* Fill Unmap Q memory requirements */
1918 BNAD_FILL_UNMAPQ_MEM_REQ(
1919 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1920 rx_config->num_paths +
1921 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1922 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1924 /* Allocate resource */
1925 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1926 if (err)
1927 return err;
1929 bnad_rx_ctrl_init(bnad, rx_id);
1931 /* Ask BNA to create one Rx object, supplying required resources */
1932 spin_lock_irqsave(&bnad->bna_lock, flags);
1933 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1934 rx_info);
1935 if (!rx) {
1936 err = -ENOMEM;
1937 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1938 goto err_return;
1940 rx_info->rx = rx;
1941 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1944 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
1945 * so that IRQ handler cannot schedule NAPI at this point.
1947 bnad_napi_init(bnad, rx_id);
1949 /* Register ISR for the Rx object */
1950 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1951 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1952 rx_config->num_paths);
1953 if (err)
1954 goto err_return;
1957 spin_lock_irqsave(&bnad->bna_lock, flags);
1958 if (0 == rx_id) {
1959 /* Set up Dynamic Interrupt Moderation Vector */
1960 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1961 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1963 /* Enable VLAN filtering only on the default Rx */
1964 bna_rx_vlanfilter_enable(rx);
1966 /* Start the DIM timer */
1967 bnad_dim_timer_start(bnad);
1970 bna_rx_enable(rx);
1971 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1973 /* Enable scheduling of NAPI */
1974 bnad_napi_enable(bnad, rx_id);
1976 return 0;
1978 err_return:
1979 bnad_cleanup_rx(bnad, rx_id);
1980 return err;
1983 /* Called with conf_lock & bnad->bna_lock held */
1984 void
1985 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1987 struct bnad_tx_info *tx_info;
1989 tx_info = &bnad->tx_info[0];
1990 if (!tx_info->tx)
1991 return;
1993 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1996 /* Called with conf_lock & bnad->bna_lock held */
1997 void
1998 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2000 struct bnad_rx_info *rx_info;
2001 int i;
2003 for (i = 0; i < bnad->num_rx; i++) {
2004 rx_info = &bnad->rx_info[i];
2005 if (!rx_info->rx)
2006 continue;
2007 bna_rx_coalescing_timeo_set(rx_info->rx,
2008 bnad->rx_coalescing_timeo);
2013 * Called with bnad->bna_lock held
2016 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2018 int ret;
2020 if (!is_valid_ether_addr(mac_addr))
2021 return -EADDRNOTAVAIL;
2023 /* If datapath is down, pretend everything went through */
2024 if (!bnad->rx_info[0].rx)
2025 return 0;
2027 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2028 if (ret != BNA_CB_SUCCESS)
2029 return -EADDRNOTAVAIL;
2031 return 0;
2034 /* Should be called with conf_lock held */
2036 bnad_enable_default_bcast(struct bnad *bnad)
2038 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2039 int ret;
2040 unsigned long flags;
2042 init_completion(&bnad->bnad_completions.mcast_comp);
2044 spin_lock_irqsave(&bnad->bna_lock, flags);
2045 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2046 bnad_cb_rx_mcast_add);
2047 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2049 if (ret == BNA_CB_SUCCESS)
2050 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2051 else
2052 return -ENODEV;
2054 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2055 return -ENODEV;
2057 return 0;
2060 /* Called with mutex_lock(&bnad->conf_mutex) held */
2061 void
2062 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2064 u16 vid;
2065 unsigned long flags;
2067 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2068 spin_lock_irqsave(&bnad->bna_lock, flags);
2069 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2070 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2074 /* Statistics utilities */
2075 void
2076 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2078 int i, j;
2080 for (i = 0; i < bnad->num_rx; i++) {
2081 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2082 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2083 stats->rx_packets += bnad->rx_info[i].
2084 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2085 stats->rx_bytes += bnad->rx_info[i].
2086 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2087 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2088 bnad->rx_info[i].rx_ctrl[j].ccb->
2089 rcb[1]->rxq) {
2090 stats->rx_packets +=
2091 bnad->rx_info[i].rx_ctrl[j].
2092 ccb->rcb[1]->rxq->rx_packets;
2093 stats->rx_bytes +=
2094 bnad->rx_info[i].rx_ctrl[j].
2095 ccb->rcb[1]->rxq->rx_bytes;
2100 for (i = 0; i < bnad->num_tx; i++) {
2101 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2102 if (bnad->tx_info[i].tcb[j]) {
2103 stats->tx_packets +=
2104 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2105 stats->tx_bytes +=
2106 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2113 * Must be called with the bna_lock held.
2115 void
2116 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2118 struct bfi_enet_stats_mac *mac_stats;
2119 u32 bmap;
2120 int i;
2122 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2123 stats->rx_errors =
2124 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2125 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2126 mac_stats->rx_undersize;
2127 stats->tx_errors = mac_stats->tx_fcs_error +
2128 mac_stats->tx_undersize;
2129 stats->rx_dropped = mac_stats->rx_drop;
2130 stats->tx_dropped = mac_stats->tx_drop;
2131 stats->multicast = mac_stats->rx_multicast;
2132 stats->collisions = mac_stats->tx_total_collision;
2134 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2136 /* receive ring buffer overflow ?? */
2138 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2139 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2140 /* recv'r fifo overrun */
2141 bmap = bna_rx_rid_mask(&bnad->bna);
2142 for (i = 0; bmap; i++) {
2143 if (bmap & 1) {
2144 stats->rx_fifo_errors +=
2145 bnad->stats.bna_stats->
2146 hw_stats.rxf_stats[i].frame_drops;
2147 break;
2149 bmap >>= 1;
2153 static void
2154 bnad_mbox_irq_sync(struct bnad *bnad)
2156 u32 irq;
2157 unsigned long flags;
2159 spin_lock_irqsave(&bnad->bna_lock, flags);
2160 if (bnad->cfg_flags & BNAD_CF_MSIX)
2161 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2162 else
2163 irq = bnad->pcidev->irq;
2164 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2166 synchronize_irq(irq);
2169 /* Utility used by bnad_start_xmit, for doing TSO */
2170 static int
2171 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2173 int err;
2175 if (skb_header_cloned(skb)) {
2176 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2177 if (err) {
2178 BNAD_UPDATE_CTR(bnad, tso_err);
2179 return err;
2184 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2185 * excluding the length field.
2187 if (skb->protocol == htons(ETH_P_IP)) {
2188 struct iphdr *iph = ip_hdr(skb);
2190 /* Do we really need these? */
2191 iph->tot_len = 0;
2192 iph->check = 0;
2194 tcp_hdr(skb)->check =
2195 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2196 IPPROTO_TCP, 0);
2197 BNAD_UPDATE_CTR(bnad, tso4);
2198 } else {
2199 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2201 ipv6h->payload_len = 0;
2202 tcp_hdr(skb)->check =
2203 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2204 IPPROTO_TCP, 0);
2205 BNAD_UPDATE_CTR(bnad, tso6);
2208 return 0;
2212 * Initialize Q numbers depending on Rx Paths
2213 * Called with bnad->bna_lock held, because of cfg_flags
2214 * access.
2216 static void
2217 bnad_q_num_init(struct bnad *bnad)
2219 int rxps;
2221 rxps = min((uint)num_online_cpus(),
2222 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2224 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2225 rxps = 1; /* INTx */
2227 bnad->num_rx = 1;
2228 bnad->num_tx = 1;
2229 bnad->num_rxp_per_rx = rxps;
2230 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2234 * Adjusts the Q numbers, given a number of msix vectors
2235 * Give preference to RSS as opposed to Tx priority Queues,
2236 * in such a case, just use 1 Tx Q
2237 * Called with bnad->bna_lock held b'cos of cfg_flags access
2239 static void
2240 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2242 bnad->num_txq_per_tx = 1;
2243 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2244 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2245 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2246 bnad->num_rxp_per_rx = msix_vectors -
2247 (bnad->num_tx * bnad->num_txq_per_tx) -
2248 BNAD_MAILBOX_MSIX_VECTORS;
2249 } else
2250 bnad->num_rxp_per_rx = 1;
2253 /* Enable / disable ioceth */
2254 static int
2255 bnad_ioceth_disable(struct bnad *bnad)
2257 unsigned long flags;
2258 int err = 0;
2260 spin_lock_irqsave(&bnad->bna_lock, flags);
2261 init_completion(&bnad->bnad_completions.ioc_comp);
2262 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2263 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2265 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2266 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2268 err = bnad->bnad_completions.ioc_comp_status;
2269 return err;
2272 static int
2273 bnad_ioceth_enable(struct bnad *bnad)
2275 int err = 0;
2276 unsigned long flags;
2278 spin_lock_irqsave(&bnad->bna_lock, flags);
2279 init_completion(&bnad->bnad_completions.ioc_comp);
2280 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2281 bna_ioceth_enable(&bnad->bna.ioceth);
2282 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2284 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2285 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2287 err = bnad->bnad_completions.ioc_comp_status;
2289 return err;
2292 /* Free BNA resources */
2293 static void
2294 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2295 u32 res_val_max)
2297 int i;
2299 for (i = 0; i < res_val_max; i++)
2300 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2303 /* Allocates memory and interrupt resources for BNA */
2304 static int
2305 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2306 u32 res_val_max)
2308 int i, err;
2310 for (i = 0; i < res_val_max; i++) {
2311 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2312 if (err)
2313 goto err_return;
2315 return 0;
2317 err_return:
2318 bnad_res_free(bnad, res_info, res_val_max);
2319 return err;
2322 /* Interrupt enable / disable */
2323 static void
2324 bnad_enable_msix(struct bnad *bnad)
2326 int i, ret;
2327 unsigned long flags;
2329 spin_lock_irqsave(&bnad->bna_lock, flags);
2330 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2331 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2332 return;
2334 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2336 if (bnad->msix_table)
2337 return;
2339 bnad->msix_table =
2340 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2342 if (!bnad->msix_table)
2343 goto intx_mode;
2345 for (i = 0; i < bnad->msix_num; i++)
2346 bnad->msix_table[i].entry = i;
2348 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2349 if (ret > 0) {
2350 /* Not enough MSI-X vectors. */
2351 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2352 ret, bnad->msix_num);
2354 spin_lock_irqsave(&bnad->bna_lock, flags);
2355 /* ret = #of vectors that we got */
2356 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2357 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2358 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2360 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2361 BNAD_MAILBOX_MSIX_VECTORS;
2363 if (bnad->msix_num > ret)
2364 goto intx_mode;
2366 /* Try once more with adjusted numbers */
2367 /* If this fails, fall back to INTx */
2368 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2369 bnad->msix_num);
2370 if (ret)
2371 goto intx_mode;
2373 } else if (ret < 0)
2374 goto intx_mode;
2376 pci_intx(bnad->pcidev, 0);
2378 return;
2380 intx_mode:
2381 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2383 kfree(bnad->msix_table);
2384 bnad->msix_table = NULL;
2385 bnad->msix_num = 0;
2386 spin_lock_irqsave(&bnad->bna_lock, flags);
2387 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2388 bnad_q_num_init(bnad);
2389 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2392 static void
2393 bnad_disable_msix(struct bnad *bnad)
2395 u32 cfg_flags;
2396 unsigned long flags;
2398 spin_lock_irqsave(&bnad->bna_lock, flags);
2399 cfg_flags = bnad->cfg_flags;
2400 if (bnad->cfg_flags & BNAD_CF_MSIX)
2401 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2402 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2404 if (cfg_flags & BNAD_CF_MSIX) {
2405 pci_disable_msix(bnad->pcidev);
2406 kfree(bnad->msix_table);
2407 bnad->msix_table = NULL;
2411 /* Netdev entry points */
2412 static int
2413 bnad_open(struct net_device *netdev)
2415 int err;
2416 struct bnad *bnad = netdev_priv(netdev);
2417 struct bna_pause_config pause_config;
2418 int mtu;
2419 unsigned long flags;
2421 mutex_lock(&bnad->conf_mutex);
2423 /* Tx */
2424 err = bnad_setup_tx(bnad, 0);
2425 if (err)
2426 goto err_return;
2428 /* Rx */
2429 err = bnad_setup_rx(bnad, 0);
2430 if (err)
2431 goto cleanup_tx;
2433 /* Port */
2434 pause_config.tx_pause = 0;
2435 pause_config.rx_pause = 0;
2437 mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2439 spin_lock_irqsave(&bnad->bna_lock, flags);
2440 bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2441 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2442 bna_enet_enable(&bnad->bna.enet);
2443 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2445 /* Enable broadcast */
2446 bnad_enable_default_bcast(bnad);
2448 /* Restore VLANs, if any */
2449 bnad_restore_vlans(bnad, 0);
2451 /* Set the UCAST address */
2452 spin_lock_irqsave(&bnad->bna_lock, flags);
2453 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2454 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2456 /* Start the stats timer */
2457 bnad_stats_timer_start(bnad);
2459 mutex_unlock(&bnad->conf_mutex);
2461 return 0;
2463 cleanup_tx:
2464 bnad_cleanup_tx(bnad, 0);
2466 err_return:
2467 mutex_unlock(&bnad->conf_mutex);
2468 return err;
2471 static int
2472 bnad_stop(struct net_device *netdev)
2474 struct bnad *bnad = netdev_priv(netdev);
2475 unsigned long flags;
2477 mutex_lock(&bnad->conf_mutex);
2479 /* Stop the stats timer */
2480 bnad_stats_timer_stop(bnad);
2482 init_completion(&bnad->bnad_completions.enet_comp);
2484 spin_lock_irqsave(&bnad->bna_lock, flags);
2485 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2486 bnad_cb_enet_disabled);
2487 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2489 wait_for_completion(&bnad->bnad_completions.enet_comp);
2491 bnad_cleanup_tx(bnad, 0);
2492 bnad_cleanup_rx(bnad, 0);
2494 /* Synchronize mailbox IRQ */
2495 bnad_mbox_irq_sync(bnad);
2497 mutex_unlock(&bnad->conf_mutex);
2499 return 0;
2502 /* TX */
2504 * bnad_start_xmit : Netdev entry point for Transmit
2505 * Called under lock held by net_device
2507 static netdev_tx_t
2508 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2510 struct bnad *bnad = netdev_priv(netdev);
2511 u32 txq_id = 0;
2512 struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
2514 u16 txq_prod, vlan_tag = 0;
2515 u32 unmap_prod, wis, wis_used, wi_range;
2516 u32 vectors, vect_id, i, acked;
2517 int err;
2518 unsigned int len;
2519 u32 gso_size;
2521 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
2522 dma_addr_t dma_addr;
2523 struct bna_txq_entry *txqent;
2524 u16 flags;
2526 if (unlikely(skb->len <= ETH_HLEN)) {
2527 dev_kfree_skb(skb);
2528 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2529 return NETDEV_TX_OK;
2531 if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
2532 dev_kfree_skb(skb);
2533 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2534 return NETDEV_TX_OK;
2536 if (unlikely(skb_headlen(skb) == 0)) {
2537 dev_kfree_skb(skb);
2538 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2539 return NETDEV_TX_OK;
2543 * Takes care of the Tx that is scheduled between clearing the flag
2544 * and the netif_tx_stop_all_queues() call.
2546 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2547 dev_kfree_skb(skb);
2548 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2549 return NETDEV_TX_OK;
2552 vectors = 1 + skb_shinfo(skb)->nr_frags;
2553 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2554 dev_kfree_skb(skb);
2555 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2556 return NETDEV_TX_OK;
2558 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2559 acked = 0;
2560 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2561 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2562 if ((u16) (*tcb->hw_consumer_index) !=
2563 tcb->consumer_index &&
2564 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2565 acked = bnad_free_txbufs(bnad, tcb);
2566 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2567 bna_ib_ack(tcb->i_dbell, acked);
2568 smp_mb__before_clear_bit();
2569 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2570 } else {
2571 netif_stop_queue(netdev);
2572 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2575 smp_mb();
2577 * Check again to deal with race condition between
2578 * netif_stop_queue here, and netif_wake_queue in
2579 * interrupt handler which is not inside netif tx lock.
2581 if (likely
2582 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2583 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2584 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2585 return NETDEV_TX_BUSY;
2586 } else {
2587 netif_wake_queue(netdev);
2588 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2592 unmap_prod = unmap_q->producer_index;
2593 flags = 0;
2595 txq_prod = tcb->producer_index;
2596 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2597 txqent->hdr.wi.reserved = 0;
2598 txqent->hdr.wi.num_vectors = vectors;
2600 if (vlan_tx_tag_present(skb)) {
2601 vlan_tag = (u16) vlan_tx_tag_get(skb);
2602 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2604 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2605 vlan_tag =
2606 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2607 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2610 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2612 if (skb_is_gso(skb)) {
2613 gso_size = skb_shinfo(skb)->gso_size;
2615 if (unlikely(gso_size > netdev->mtu)) {
2616 dev_kfree_skb(skb);
2617 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2618 return NETDEV_TX_OK;
2620 if (unlikely((gso_size + skb_transport_offset(skb) +
2621 tcp_hdrlen(skb)) >= skb->len)) {
2622 txqent->hdr.wi.opcode =
2623 __constant_htons(BNA_TXQ_WI_SEND);
2624 txqent->hdr.wi.lso_mss = 0;
2625 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2626 } else {
2627 txqent->hdr.wi.opcode =
2628 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2629 txqent->hdr.wi.lso_mss = htons(gso_size);
2632 err = bnad_tso_prepare(bnad, skb);
2633 if (unlikely(err)) {
2634 dev_kfree_skb(skb);
2635 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2636 return NETDEV_TX_OK;
2638 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2639 txqent->hdr.wi.l4_hdr_size_n_offset =
2640 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2641 (tcp_hdrlen(skb) >> 2,
2642 skb_transport_offset(skb)));
2643 } else {
2644 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
2645 txqent->hdr.wi.lso_mss = 0;
2647 if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2648 dev_kfree_skb(skb);
2649 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2650 return NETDEV_TX_OK;
2653 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2654 u8 proto = 0;
2656 if (skb->protocol == __constant_htons(ETH_P_IP))
2657 proto = ip_hdr(skb)->protocol;
2658 else if (skb->protocol ==
2659 __constant_htons(ETH_P_IPV6)) {
2660 /* nexthdr may not be TCP immediately. */
2661 proto = ipv6_hdr(skb)->nexthdr;
2663 if (proto == IPPROTO_TCP) {
2664 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2665 txqent->hdr.wi.l4_hdr_size_n_offset =
2666 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2667 (0, skb_transport_offset(skb)));
2669 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2671 if (unlikely(skb_headlen(skb) <
2672 skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2673 dev_kfree_skb(skb);
2674 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2675 return NETDEV_TX_OK;
2678 } else if (proto == IPPROTO_UDP) {
2679 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2680 txqent->hdr.wi.l4_hdr_size_n_offset =
2681 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2682 (0, skb_transport_offset(skb)));
2684 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2685 if (unlikely(skb_headlen(skb) <
2686 skb_transport_offset(skb) +
2687 sizeof(struct udphdr))) {
2688 dev_kfree_skb(skb);
2689 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2690 return NETDEV_TX_OK;
2692 } else {
2693 dev_kfree_skb(skb);
2694 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2695 return NETDEV_TX_OK;
2697 } else {
2698 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2702 txqent->hdr.wi.flags = htons(flags);
2704 txqent->hdr.wi.frame_length = htonl(skb->len);
2706 unmap_q->unmap_array[unmap_prod].skb = skb;
2707 len = skb_headlen(skb);
2708 txqent->vector[0].length = htons(len);
2709 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2710 skb_headlen(skb), DMA_TO_DEVICE);
2711 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2712 dma_addr);
2714 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
2715 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2717 vect_id = 0;
2718 wis_used = 1;
2720 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2721 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2722 u16 size = frag->size;
2724 if (unlikely(size == 0)) {
2725 unmap_prod = unmap_q->producer_index;
2727 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2728 unmap_q->unmap_array,
2729 unmap_prod, unmap_q->q_depth, skb,
2731 dev_kfree_skb(skb);
2732 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2733 return NETDEV_TX_OK;
2736 len += size;
2738 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2739 vect_id = 0;
2740 if (--wi_range)
2741 txqent++;
2742 else {
2743 BNA_QE_INDX_ADD(txq_prod, wis_used,
2744 tcb->q_depth);
2745 wis_used = 0;
2746 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2747 txqent, wi_range);
2749 wis_used++;
2750 txqent->hdr.wi_ext.opcode =
2751 __constant_htons(BNA_TXQ_WI_EXTENSION);
2754 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2755 txqent->vector[vect_id].length = htons(size);
2756 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2757 0, size, DMA_TO_DEVICE);
2758 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2759 dma_addr);
2760 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2761 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2764 if (unlikely(len != skb->len)) {
2765 unmap_prod = unmap_q->producer_index;
2767 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2768 unmap_q->unmap_array, unmap_prod,
2769 unmap_q->q_depth, skb,
2770 skb_shinfo(skb)->nr_frags);
2771 dev_kfree_skb(skb);
2772 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2773 return NETDEV_TX_OK;
2776 unmap_q->producer_index = unmap_prod;
2777 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2778 tcb->producer_index = txq_prod;
2780 smp_mb();
2782 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2783 return NETDEV_TX_OK;
2785 bna_txq_prod_indx_doorbell(tcb);
2786 smp_mb();
2788 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2789 tasklet_schedule(&bnad->tx_free_tasklet);
2791 return NETDEV_TX_OK;
2795 * Used spin_lock to synchronize reading of stats structures, which
2796 * is written by BNA under the same lock.
2798 static struct rtnl_link_stats64 *
2799 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2801 struct bnad *bnad = netdev_priv(netdev);
2802 unsigned long flags;
2804 spin_lock_irqsave(&bnad->bna_lock, flags);
2806 bnad_netdev_qstats_fill(bnad, stats);
2807 bnad_netdev_hwstats_fill(bnad, stats);
2809 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2811 return stats;
2814 void
2815 bnad_set_rx_mode(struct net_device *netdev)
2817 struct bnad *bnad = netdev_priv(netdev);
2818 u32 new_mask, valid_mask;
2819 unsigned long flags;
2821 spin_lock_irqsave(&bnad->bna_lock, flags);
2823 new_mask = valid_mask = 0;
2825 if (netdev->flags & IFF_PROMISC) {
2826 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2827 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2828 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2829 bnad->cfg_flags |= BNAD_CF_PROMISC;
2831 } else {
2832 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2833 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2834 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2835 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2839 if (netdev->flags & IFF_ALLMULTI) {
2840 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2841 new_mask |= BNA_RXMODE_ALLMULTI;
2842 valid_mask |= BNA_RXMODE_ALLMULTI;
2843 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2845 } else {
2846 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2847 new_mask &= ~BNA_RXMODE_ALLMULTI;
2848 valid_mask |= BNA_RXMODE_ALLMULTI;
2849 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2853 if (bnad->rx_info[0].rx == NULL)
2854 goto unlock;
2856 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2858 if (!netdev_mc_empty(netdev)) {
2859 u8 *mcaddr_list;
2860 int mc_count = netdev_mc_count(netdev);
2862 /* Index 0 holds the broadcast address */
2863 mcaddr_list =
2864 kzalloc((mc_count + 1) * ETH_ALEN,
2865 GFP_ATOMIC);
2866 if (!mcaddr_list)
2867 goto unlock;
2869 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2871 /* Copy rest of the MC addresses */
2872 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2874 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2875 mcaddr_list, NULL);
2877 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2878 kfree(mcaddr_list);
2880 unlock:
2881 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2885 * bna_lock is used to sync writes to netdev->addr
2886 * conf_lock cannot be used since this call may be made
2887 * in a non-blocking context.
2889 static int
2890 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2892 int err;
2893 struct bnad *bnad = netdev_priv(netdev);
2894 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2895 unsigned long flags;
2897 spin_lock_irqsave(&bnad->bna_lock, flags);
2899 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2901 if (!err)
2902 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2904 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2906 return err;
2909 static int
2910 bnad_mtu_set(struct bnad *bnad, int mtu)
2912 unsigned long flags;
2914 init_completion(&bnad->bnad_completions.mtu_comp);
2916 spin_lock_irqsave(&bnad->bna_lock, flags);
2917 bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2918 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2920 wait_for_completion(&bnad->bnad_completions.mtu_comp);
2922 return bnad->bnad_completions.mtu_comp_status;
2925 static int
2926 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2928 int err, mtu = netdev->mtu;
2929 struct bnad *bnad = netdev_priv(netdev);
2931 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2932 return -EINVAL;
2934 mutex_lock(&bnad->conf_mutex);
2936 netdev->mtu = new_mtu;
2938 mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2939 err = bnad_mtu_set(bnad, mtu);
2940 if (err)
2941 err = -EBUSY;
2943 mutex_unlock(&bnad->conf_mutex);
2944 return err;
2947 static void
2948 bnad_vlan_rx_add_vid(struct net_device *netdev,
2949 unsigned short vid)
2951 struct bnad *bnad = netdev_priv(netdev);
2952 unsigned long flags;
2954 if (!bnad->rx_info[0].rx)
2955 return;
2957 mutex_lock(&bnad->conf_mutex);
2959 spin_lock_irqsave(&bnad->bna_lock, flags);
2960 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2961 set_bit(vid, bnad->active_vlans);
2962 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2964 mutex_unlock(&bnad->conf_mutex);
2967 static void
2968 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2969 unsigned short vid)
2971 struct bnad *bnad = netdev_priv(netdev);
2972 unsigned long flags;
2974 if (!bnad->rx_info[0].rx)
2975 return;
2977 mutex_lock(&bnad->conf_mutex);
2979 spin_lock_irqsave(&bnad->bna_lock, flags);
2980 clear_bit(vid, bnad->active_vlans);
2981 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2982 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2984 mutex_unlock(&bnad->conf_mutex);
2987 #ifdef CONFIG_NET_POLL_CONTROLLER
2988 static void
2989 bnad_netpoll(struct net_device *netdev)
2991 struct bnad *bnad = netdev_priv(netdev);
2992 struct bnad_rx_info *rx_info;
2993 struct bnad_rx_ctrl *rx_ctrl;
2994 u32 curr_mask;
2995 int i, j;
2997 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2998 bna_intx_disable(&bnad->bna, curr_mask);
2999 bnad_isr(bnad->pcidev->irq, netdev);
3000 bna_intx_enable(&bnad->bna, curr_mask);
3001 } else {
3003 * Tx processing may happen in sending context, so no need
3004 * to explicitly process completions here
3007 /* Rx processing */
3008 for (i = 0; i < bnad->num_rx; i++) {
3009 rx_info = &bnad->rx_info[i];
3010 if (!rx_info->rx)
3011 continue;
3012 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3013 rx_ctrl = &rx_info->rx_ctrl[j];
3014 if (rx_ctrl->ccb)
3015 bnad_netif_rx_schedule_poll(bnad,
3016 rx_ctrl->ccb);
3021 #endif
3023 static const struct net_device_ops bnad_netdev_ops = {
3024 .ndo_open = bnad_open,
3025 .ndo_stop = bnad_stop,
3026 .ndo_start_xmit = bnad_start_xmit,
3027 .ndo_get_stats64 = bnad_get_stats64,
3028 .ndo_set_rx_mode = bnad_set_rx_mode,
3029 .ndo_validate_addr = eth_validate_addr,
3030 .ndo_set_mac_address = bnad_set_mac_address,
3031 .ndo_change_mtu = bnad_change_mtu,
3032 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3033 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3034 #ifdef CONFIG_NET_POLL_CONTROLLER
3035 .ndo_poll_controller = bnad_netpoll
3036 #endif
3039 static void
3040 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3042 struct net_device *netdev = bnad->netdev;
3044 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3045 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3046 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
3048 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3049 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3050 NETIF_F_TSO | NETIF_F_TSO6;
3052 netdev->features |= netdev->hw_features |
3053 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3055 if (using_dac)
3056 netdev->features |= NETIF_F_HIGHDMA;
3058 netdev->mem_start = bnad->mmio_start;
3059 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3061 netdev->netdev_ops = &bnad_netdev_ops;
3062 bnad_set_ethtool_ops(netdev);
3066 * 1. Initialize the bnad structure
3067 * 2. Setup netdev pointer in pci_dev
3068 * 3. Initialze Tx free tasklet
3069 * 4. Initialize no. of TxQ & CQs & MSIX vectors
3071 static int
3072 bnad_init(struct bnad *bnad,
3073 struct pci_dev *pdev, struct net_device *netdev)
3075 unsigned long flags;
3077 SET_NETDEV_DEV(netdev, &pdev->dev);
3078 pci_set_drvdata(pdev, netdev);
3080 bnad->netdev = netdev;
3081 bnad->pcidev = pdev;
3082 bnad->mmio_start = pci_resource_start(pdev, 0);
3083 bnad->mmio_len = pci_resource_len(pdev, 0);
3084 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3085 if (!bnad->bar0) {
3086 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3087 pci_set_drvdata(pdev, NULL);
3088 return -ENOMEM;
3090 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3091 (unsigned long long) bnad->mmio_len);
3093 spin_lock_irqsave(&bnad->bna_lock, flags);
3094 if (!bnad_msix_disable)
3095 bnad->cfg_flags = BNAD_CF_MSIX;
3097 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3099 bnad_q_num_init(bnad);
3100 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3102 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3103 (bnad->num_rx * bnad->num_rxp_per_rx) +
3104 BNAD_MAILBOX_MSIX_VECTORS;
3106 bnad->txq_depth = BNAD_TXQ_DEPTH;
3107 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3109 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3110 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3112 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
3113 (unsigned long)bnad);
3115 return 0;
3119 * Must be called after bnad_pci_uninit()
3120 * so that iounmap() and pci_set_drvdata(NULL)
3121 * happens only after PCI uninitialization.
3123 static void
3124 bnad_uninit(struct bnad *bnad)
3126 if (bnad->bar0)
3127 iounmap(bnad->bar0);
3128 pci_set_drvdata(bnad->pcidev, NULL);
3132 * Initialize locks
3133 a) Per ioceth mutes used for serializing configuration
3134 changes from OS interface
3135 b) spin lock used to protect bna state machine
3137 static void
3138 bnad_lock_init(struct bnad *bnad)
3140 spin_lock_init(&bnad->bna_lock);
3141 mutex_init(&bnad->conf_mutex);
3144 static void
3145 bnad_lock_uninit(struct bnad *bnad)
3147 mutex_destroy(&bnad->conf_mutex);
3150 /* PCI Initialization */
3151 static int
3152 bnad_pci_init(struct bnad *bnad,
3153 struct pci_dev *pdev, bool *using_dac)
3155 int err;
3157 err = pci_enable_device(pdev);
3158 if (err)
3159 return err;
3160 err = pci_request_regions(pdev, BNAD_NAME);
3161 if (err)
3162 goto disable_device;
3163 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3164 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3165 *using_dac = 1;
3166 } else {
3167 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3168 if (err) {
3169 err = dma_set_coherent_mask(&pdev->dev,
3170 DMA_BIT_MASK(32));
3171 if (err)
3172 goto release_regions;
3174 *using_dac = 0;
3176 pci_set_master(pdev);
3177 return 0;
3179 release_regions:
3180 pci_release_regions(pdev);
3181 disable_device:
3182 pci_disable_device(pdev);
3184 return err;
3187 static void
3188 bnad_pci_uninit(struct pci_dev *pdev)
3190 pci_release_regions(pdev);
3191 pci_disable_device(pdev);
3194 static int __devinit
3195 bnad_pci_probe(struct pci_dev *pdev,
3196 const struct pci_device_id *pcidev_id)
3198 bool using_dac;
3199 int err;
3200 struct bnad *bnad;
3201 struct bna *bna;
3202 struct net_device *netdev;
3203 struct bfa_pcidev pcidev_info;
3204 unsigned long flags;
3206 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3207 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3209 mutex_lock(&bnad_fwimg_mutex);
3210 if (!cna_get_firmware_buf(pdev)) {
3211 mutex_unlock(&bnad_fwimg_mutex);
3212 pr_warn("Failed to load Firmware Image!\n");
3213 return -ENODEV;
3215 mutex_unlock(&bnad_fwimg_mutex);
3218 * Allocates sizeof(struct net_device + struct bnad)
3219 * bnad = netdev->priv
3221 netdev = alloc_etherdev(sizeof(struct bnad));
3222 if (!netdev) {
3223 dev_err(&pdev->dev, "netdev allocation failed\n");
3224 err = -ENOMEM;
3225 return err;
3227 bnad = netdev_priv(netdev);
3229 bnad_lock_init(bnad);
3231 mutex_lock(&bnad->conf_mutex);
3233 * PCI initialization
3234 * Output : using_dac = 1 for 64 bit DMA
3235 * = 0 for 32 bit DMA
3237 err = bnad_pci_init(bnad, pdev, &using_dac);
3238 if (err)
3239 goto unlock_mutex;
3242 * Initialize bnad structure
3243 * Setup relation between pci_dev & netdev
3244 * Init Tx free tasklet
3246 err = bnad_init(bnad, pdev, netdev);
3247 if (err)
3248 goto pci_uninit;
3250 /* Initialize netdev structure, set up ethtool ops */
3251 bnad_netdev_init(bnad, using_dac);
3253 /* Set link to down state */
3254 netif_carrier_off(netdev);
3256 /* Get resource requirement form bna */
3257 spin_lock_irqsave(&bnad->bna_lock, flags);
3258 bna_res_req(&bnad->res_info[0]);
3259 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3261 /* Allocate resources from bna */
3262 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3263 if (err)
3264 goto drv_uninit;
3266 bna = &bnad->bna;
3268 /* Setup pcidev_info for bna_init() */
3269 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3270 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3271 pcidev_info.device_id = bnad->pcidev->device;
3272 pcidev_info.pci_bar_kva = bnad->bar0;
3274 spin_lock_irqsave(&bnad->bna_lock, flags);
3275 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3276 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3278 bnad->stats.bna_stats = &bna->stats;
3280 bnad_enable_msix(bnad);
3281 err = bnad_mbox_irq_alloc(bnad);
3282 if (err)
3283 goto res_free;
3286 /* Set up timers */
3287 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3288 ((unsigned long)bnad));
3289 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3290 ((unsigned long)bnad));
3291 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3292 ((unsigned long)bnad));
3293 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3294 ((unsigned long)bnad));
3296 /* Now start the timer before calling IOC */
3297 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3298 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3301 * Start the chip
3302 * If the call back comes with error, we bail out.
3303 * This is a catastrophic error.
3305 err = bnad_ioceth_enable(bnad);
3306 if (err) {
3307 pr_err("BNA: Initialization failed err=%d\n",
3308 err);
3309 goto probe_success;
3312 spin_lock_irqsave(&bnad->bna_lock, flags);
3313 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3314 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3315 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3316 bna_attr(bna)->num_rxp - 1);
3317 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3318 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3319 err = -EIO;
3321 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3322 if (err)
3323 goto disable_ioceth;
3325 spin_lock_irqsave(&bnad->bna_lock, flags);
3326 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3327 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3329 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3330 if (err) {
3331 err = -EIO;
3332 goto disable_ioceth;
3335 spin_lock_irqsave(&bnad->bna_lock, flags);
3336 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3337 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3339 /* Get the burnt-in mac */
3340 spin_lock_irqsave(&bnad->bna_lock, flags);
3341 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3342 bnad_set_netdev_perm_addr(bnad);
3343 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3345 mutex_unlock(&bnad->conf_mutex);
3347 /* Finally, reguister with net_device layer */
3348 err = register_netdev(netdev);
3349 if (err) {
3350 pr_err("BNA : Registering with netdev failed\n");
3351 goto probe_uninit;
3353 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3355 return 0;
3357 probe_success:
3358 mutex_unlock(&bnad->conf_mutex);
3359 return 0;
3361 probe_uninit:
3362 mutex_lock(&bnad->conf_mutex);
3363 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3364 disable_ioceth:
3365 bnad_ioceth_disable(bnad);
3366 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3367 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3368 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3369 spin_lock_irqsave(&bnad->bna_lock, flags);
3370 bna_uninit(bna);
3371 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3372 bnad_mbox_irq_free(bnad);
3373 bnad_disable_msix(bnad);
3374 res_free:
3375 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3376 drv_uninit:
3377 bnad_uninit(bnad);
3378 pci_uninit:
3379 bnad_pci_uninit(pdev);
3380 unlock_mutex:
3381 mutex_unlock(&bnad->conf_mutex);
3382 bnad_lock_uninit(bnad);
3383 free_netdev(netdev);
3384 return err;
3387 static void __devexit
3388 bnad_pci_remove(struct pci_dev *pdev)
3390 struct net_device *netdev = pci_get_drvdata(pdev);
3391 struct bnad *bnad;
3392 struct bna *bna;
3393 unsigned long flags;
3395 if (!netdev)
3396 return;
3398 pr_info("%s bnad_pci_remove\n", netdev->name);
3399 bnad = netdev_priv(netdev);
3400 bna = &bnad->bna;
3402 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3403 unregister_netdev(netdev);
3405 mutex_lock(&bnad->conf_mutex);
3406 bnad_ioceth_disable(bnad);
3407 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3408 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3409 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3410 spin_lock_irqsave(&bnad->bna_lock, flags);
3411 bna_uninit(bna);
3412 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3414 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3415 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3416 bnad_mbox_irq_free(bnad);
3417 bnad_disable_msix(bnad);
3418 bnad_pci_uninit(pdev);
3419 mutex_unlock(&bnad->conf_mutex);
3420 bnad_lock_uninit(bnad);
3421 bnad_uninit(bnad);
3422 free_netdev(netdev);
3425 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3427 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3428 PCI_DEVICE_ID_BROCADE_CT),
3429 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3430 .class_mask = 0xffff00
3433 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3434 BFA_PCI_DEVICE_ID_CT2),
3435 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3436 .class_mask = 0xffff00
3438 {0, },
3441 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3443 static struct pci_driver bnad_pci_driver = {
3444 .name = BNAD_NAME,
3445 .id_table = bnad_pci_id_table,
3446 .probe = bnad_pci_probe,
3447 .remove = __devexit_p(bnad_pci_remove),
3450 static int __init
3451 bnad_module_init(void)
3453 int err;
3455 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3456 BNAD_VERSION);
3458 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3460 err = pci_register_driver(&bnad_pci_driver);
3461 if (err < 0) {
3462 pr_err("bna : PCI registration failed in module init "
3463 "(%d)\n", err);
3464 return err;
3467 return 0;
3470 static void __exit
3471 bnad_module_exit(void)
3473 pci_unregister_driver(&bnad_pci_driver);
3475 if (bfi_fw)
3476 release_firmware(bfi_fw);
3479 module_init(bnad_module_init);
3480 module_exit(bnad_module_exit);
3482 MODULE_AUTHOR("Brocade");
3483 MODULE_LICENSE("GPL");
3484 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3485 MODULE_VERSION(BNAD_VERSION);
3486 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3487 MODULE_FIRMWARE(CNA_FW_FILE_CT2);