1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
26 #include "bnx2x_init.h"
28 static int bnx2x_setup_irqs(struct bnx2x
*bp
);
30 /* free skb in the packet ring at pos idx
31 * return idx of last bd freed
33 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
36 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
37 struct eth_tx_start_bd
*tx_start_bd
;
38 struct eth_tx_bd
*tx_data_bd
;
39 struct sk_buff
*skb
= tx_buf
->skb
;
40 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
43 /* prefetch skb end pointer to speedup dev_kfree_skb() */
46 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
50 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
51 tx_start_bd
= &fp
->tx_desc_ring
[bd_idx
].start_bd
;
52 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
53 BD_UNMAP_LEN(tx_start_bd
), DMA_TO_DEVICE
);
55 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
56 #ifdef BNX2X_STOP_ON_ERROR
57 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
58 BNX2X_ERR("BAD nbd!\n");
62 new_cons
= nbd
+ tx_buf
->first_bd
;
65 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
67 /* Skip a parse bd... */
69 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
71 /* ...and the TSO split header bd since they have no mapping */
72 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
74 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
80 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
81 tx_data_bd
= &fp
->tx_desc_ring
[bd_idx
].reg_bd
;
82 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
83 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
85 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
97 int bnx2x_tx_int(struct bnx2x_fastpath
*fp
)
99 struct bnx2x
*bp
= fp
->bp
;
100 struct netdev_queue
*txq
;
101 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
103 #ifdef BNX2X_STOP_ON_ERROR
104 if (unlikely(bp
->panic
))
108 txq
= netdev_get_tx_queue(bp
->dev
, fp
->index
);
109 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
110 sw_cons
= fp
->tx_pkt_cons
;
112 while (sw_cons
!= hw_cons
) {
115 pkt_cons
= TX_BD(sw_cons
);
117 DP(NETIF_MSG_TX_DONE
, "queue[%d]: hw_cons %u sw_cons %u "
119 fp
->index
, hw_cons
, sw_cons
, pkt_cons
);
121 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
125 fp
->tx_pkt_cons
= sw_cons
;
126 fp
->tx_bd_cons
= bd_cons
;
128 /* Need to make the tx_bd_cons update visible to start_xmit()
129 * before checking for netif_tx_queue_stopped(). Without the
130 * memory barrier, there is a small possibility that
131 * start_xmit() will miss it and cause the queue to be stopped
136 if (unlikely(netif_tx_queue_stopped(txq
))) {
137 /* Taking tx_lock() is needed to prevent reenabling the queue
138 * while it's empty. This could have happen if rx_action() gets
139 * suspended in bnx2x_tx_int() after the condition before
140 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
142 * stops the queue->sees fresh tx_bd_cons->releases the queue->
143 * sends some packets consuming the whole queue again->
147 __netif_tx_lock(txq
, smp_processor_id());
149 if ((netif_tx_queue_stopped(txq
)) &&
150 (bp
->state
== BNX2X_STATE_OPEN
) &&
151 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
152 netif_tx_wake_queue(txq
);
154 __netif_tx_unlock(txq
);
159 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
162 u16 last_max
= fp
->last_max_sge
;
164 if (SUB_S16(idx
, last_max
) > 0)
165 fp
->last_max_sge
= idx
;
168 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
169 struct eth_fast_path_rx_cqe
*fp_cqe
)
171 struct bnx2x
*bp
= fp
->bp
;
172 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
173 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
175 u16 last_max
, last_elem
, first_elem
;
182 /* First mark all used pages */
183 for (i
= 0; i
< sge_len
; i
++)
184 SGE_MASK_CLEAR_BIT(fp
,
185 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[i
])));
187 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
188 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
190 /* Here we assume that the last SGE index is the biggest */
191 prefetch((void *)(fp
->sge_mask
));
192 bnx2x_update_last_max_sge(fp
,
193 le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
195 last_max
= RX_SGE(fp
->last_max_sge
);
196 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
197 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
199 /* If ring is not full */
200 if (last_elem
+ 1 != first_elem
)
203 /* Now update the prod */
204 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
205 if (likely(fp
->sge_mask
[i
]))
208 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
209 delta
+= RX_SGE_MASK_ELEM_SZ
;
213 fp
->rx_sge_prod
+= delta
;
214 /* clear page-end entries */
215 bnx2x_clear_sge_mask_next_elems(fp
);
218 DP(NETIF_MSG_RX_STATUS
,
219 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
220 fp
->last_max_sge
, fp
->rx_sge_prod
);
223 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
224 struct sk_buff
*skb
, u16 cons
, u16 prod
)
226 struct bnx2x
*bp
= fp
->bp
;
227 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
228 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
229 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
232 /* move empty skb from pool to prod and map it */
233 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
234 mapping
= dma_map_single(&bp
->pdev
->dev
, fp
->tpa_pool
[queue
].skb
->data
,
235 bp
->rx_buf_size
, DMA_FROM_DEVICE
);
236 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
238 /* move partial skb from cons to pool (don't unmap yet) */
239 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
241 /* mark bin state as start - print error if current state != stop */
242 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
243 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
245 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
247 /* point prod_bd to new skb */
248 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
249 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
251 #ifdef BNX2X_STOP_ON_ERROR
252 fp
->tpa_queue_used
|= (1 << queue
);
253 #ifdef _ASM_GENERIC_INT_L64_H
254 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
256 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
262 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
264 struct eth_fast_path_rx_cqe
*fp_cqe
,
267 struct sw_rx_page
*rx_pg
, old_rx_pg
;
268 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
269 u32 i
, frag_len
, frag_size
, pages
;
273 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
274 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
276 /* This is needed in order to enable forwarding support */
278 skb_shinfo(skb
)->gso_size
= min((u32
)SGE_PAGE_SIZE
,
279 max(frag_size
, (u32
)len_on_bd
));
281 #ifdef BNX2X_STOP_ON_ERROR
282 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
)*SGE_PAGE_SIZE
*PAGES_PER_SGE
) {
283 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
285 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
286 fp_cqe
->pkt_len
, len_on_bd
);
292 /* Run through the SGL and compose the fragmented skb */
293 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
295 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[j
]));
297 /* FW gives the indices of the SGE as if the ring is an array
298 (meaning that "next" element will consume 2 indices) */
299 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
300 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
303 /* If we fail to allocate a substitute page, we simply stop
304 where we are and drop the whole packet */
305 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
307 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
311 /* Unmap the page as we r going to pass it to the stack */
312 dma_unmap_page(&bp
->pdev
->dev
,
313 dma_unmap_addr(&old_rx_pg
, mapping
),
314 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
316 /* Add one frag and update the appropriate fields in the skb */
317 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
319 skb
->data_len
+= frag_len
;
320 skb
->truesize
+= frag_len
;
321 skb
->len
+= frag_len
;
323 frag_size
-= frag_len
;
329 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
330 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
333 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
334 struct sk_buff
*skb
= rx_buf
->skb
;
336 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
338 /* Unmap skb in the pool anyway, as we are going to change
339 pool entry status to BNX2X_TPA_STOP even if new skb allocation
341 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
342 bp
->rx_buf_size
, DMA_FROM_DEVICE
);
344 if (likely(new_skb
)) {
345 /* fix ip xsum and give it to the stack */
346 /* (no need to map the new skb) */
349 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
351 #ifdef BNX2X_STOP_ON_ERROR
352 if (pad
+ len
> bp
->rx_buf_size
) {
353 BNX2X_ERR("skb_put is about to fail... "
354 "pad %d len %d rx_buf_size %d\n",
355 pad
, len
, bp
->rx_buf_size
);
361 skb_reserve(skb
, pad
);
364 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
365 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
370 iph
= (struct iphdr
*)skb
->data
;
372 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
375 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
376 &cqe
->fast_path_cqe
, cqe_idx
)) {
377 if ((le16_to_cpu(cqe
->fast_path_cqe
.
378 pars_flags
.flags
) & PARSING_FLAGS_VLAN
))
379 __vlan_hwaccel_put_tag(skb
,
380 le16_to_cpu(cqe
->fast_path_cqe
.
382 napi_gro_receive(&fp
->napi
, skb
);
384 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
385 " - dropping packet!\n");
390 /* put new skb in bin */
391 fp
->tpa_pool
[queue
].skb
= new_skb
;
394 /* else drop the packet and keep the buffer in the bin */
395 DP(NETIF_MSG_RX_STATUS
,
396 "Failed to allocate new skb - dropping packet!\n");
397 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
400 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
403 /* Set Toeplitz hash value in the skb using the value from the
404 * CQE (calculated by HW).
406 static inline void bnx2x_set_skb_rxhash(struct bnx2x
*bp
, union eth_rx_cqe
*cqe
,
409 /* Set Toeplitz hash from CQE */
410 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
411 (cqe
->fast_path_cqe
.status_flags
&
412 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
))
414 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
);
417 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
419 struct bnx2x
*bp
= fp
->bp
;
420 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
421 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
424 #ifdef BNX2X_STOP_ON_ERROR
425 if (unlikely(bp
->panic
))
429 /* CQ "next element" is of the size of the regular element,
430 that's why it's ok here */
431 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
432 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
435 bd_cons
= fp
->rx_bd_cons
;
436 bd_prod
= fp
->rx_bd_prod
;
437 bd_prod_fw
= bd_prod
;
438 sw_comp_cons
= fp
->rx_comp_cons
;
439 sw_comp_prod
= fp
->rx_comp_prod
;
441 /* Memory barrier necessary as speculative reads of the rx
442 * buffer can be ahead of the index in the status block
446 DP(NETIF_MSG_RX_STATUS
,
447 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
448 fp
->index
, hw_comp_cons
, sw_comp_cons
);
450 while (sw_comp_cons
!= hw_comp_cons
) {
451 struct sw_rx_bd
*rx_buf
= NULL
;
453 union eth_rx_cqe
*cqe
;
457 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
458 bd_prod
= RX_BD(bd_prod
);
459 bd_cons
= RX_BD(bd_cons
);
461 /* Prefetch the page containing the BD descriptor
462 at producer's index. It will be needed when new skb is
464 prefetch((void *)(PAGE_ALIGN((unsigned long)
465 (&fp
->rx_desc_ring
[bd_prod
])) -
468 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
469 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
471 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
472 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
473 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
474 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
),
475 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
476 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
478 /* is this a slowpath msg? */
479 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
480 bnx2x_sp_event(fp
, cqe
);
483 /* this is an rx packet */
485 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
488 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
489 pad
= cqe
->fast_path_cqe
.placement_offset
;
491 /* - If CQE is marked both TPA_START and TPA_END it is
493 * - FP CQE will always have either TPA_START or/and
494 * TPA_STOP flags set.
496 if ((!fp
->disable_tpa
) &&
497 (TPA_TYPE(cqe_fp_flags
) !=
498 (TPA_TYPE_START
| TPA_TYPE_END
))) {
499 u16 queue
= cqe
->fast_path_cqe
.queue_index
;
501 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
502 DP(NETIF_MSG_RX_STATUS
,
503 "calling tpa_start on queue %d\n",
506 bnx2x_tpa_start(fp
, queue
, skb
,
509 /* Set Toeplitz hash for an LRO skb */
510 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
513 } else { /* TPA_STOP */
514 DP(NETIF_MSG_RX_STATUS
,
515 "calling tpa_stop on queue %d\n",
518 if (!BNX2X_RX_SUM_FIX(cqe
))
519 BNX2X_ERR("STOP on none TCP "
522 /* This is a size of the linear data
524 len
= le16_to_cpu(cqe
->fast_path_cqe
.
526 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
527 len
, cqe
, comp_ring_cons
);
528 #ifdef BNX2X_STOP_ON_ERROR
533 bnx2x_update_sge_prod(fp
,
534 &cqe
->fast_path_cqe
);
539 dma_sync_single_for_device(&bp
->pdev
->dev
,
540 dma_unmap_addr(rx_buf
, mapping
),
541 pad
+ RX_COPY_THRESH
,
543 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
545 /* is this an error packet? */
546 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
548 "ERROR flags %x rx packet %u\n",
549 cqe_fp_flags
, sw_comp_cons
);
550 fp
->eth_q_stats
.rx_err_discard_pkt
++;
554 /* Since we don't have a jumbo ring
555 * copy small packets if mtu > 1500
557 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
558 (len
<= RX_COPY_THRESH
)) {
559 struct sk_buff
*new_skb
;
561 new_skb
= netdev_alloc_skb(bp
->dev
,
563 if (new_skb
== NULL
) {
565 "ERROR packet dropped "
566 "because of alloc failure\n");
567 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
572 skb_copy_from_linear_data_offset(skb
, pad
,
573 new_skb
->data
+ pad
, len
);
574 skb_reserve(new_skb
, pad
);
575 skb_put(new_skb
, len
);
577 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
582 if (likely(bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0)) {
583 dma_unmap_single(&bp
->pdev
->dev
,
584 dma_unmap_addr(rx_buf
, mapping
),
587 skb_reserve(skb
, pad
);
592 "ERROR packet dropped because "
593 "of alloc failure\n");
594 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
596 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
600 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
602 /* Set Toeplitz hash for a none-LRO skb */
603 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
605 skb_checksum_none_assert(skb
);
608 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
609 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
611 fp
->eth_q_stats
.hw_csum_err
++;
615 skb_record_rx_queue(skb
, fp
->index
);
617 if (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
619 __vlan_hwaccel_put_tag(skb
,
620 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
621 napi_gro_receive(&fp
->napi
, skb
);
627 bd_cons
= NEXT_RX_IDX(bd_cons
);
628 bd_prod
= NEXT_RX_IDX(bd_prod
);
629 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
632 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
633 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
635 if (rx_pkt
== budget
)
639 fp
->rx_bd_cons
= bd_cons
;
640 fp
->rx_bd_prod
= bd_prod_fw
;
641 fp
->rx_comp_cons
= sw_comp_cons
;
642 fp
->rx_comp_prod
= sw_comp_prod
;
644 /* Update producers */
645 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
648 fp
->rx_pkt
+= rx_pkt
;
654 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
656 struct bnx2x_fastpath
*fp
= fp_cookie
;
657 struct bnx2x
*bp
= fp
->bp
;
659 /* Return here if interrupt is disabled */
660 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
661 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
665 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB "
666 "[fp %d fw_sd %d igusb %d]\n",
667 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
668 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
670 #ifdef BNX2X_STOP_ON_ERROR
671 if (unlikely(bp
->panic
))
675 /* Handle Rx and Tx according to MSI-X vector */
676 prefetch(fp
->rx_cons_sb
);
677 prefetch(fp
->tx_cons_sb
);
678 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
679 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
684 /* HW Lock for shared dual port PHYs */
685 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
687 mutex_lock(&bp
->port
.phy_mutex
);
689 if (bp
->port
.need_hw_lock
)
690 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
693 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
695 if (bp
->port
.need_hw_lock
)
696 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
698 mutex_unlock(&bp
->port
.phy_mutex
);
701 void bnx2x_link_report(struct bnx2x
*bp
)
703 if (bp
->flags
& MF_FUNC_DIS
) {
704 netif_carrier_off(bp
->dev
);
705 netdev_err(bp
->dev
, "NIC Link is Down\n");
709 if (bp
->link_vars
.link_up
) {
712 if (bp
->state
== BNX2X_STATE_OPEN
)
713 netif_carrier_on(bp
->dev
);
714 netdev_info(bp
->dev
, "NIC Link is Up, ");
716 line_speed
= bp
->link_vars
.line_speed
;
721 ((bp
->mf_config
[BP_VN(bp
)] &
722 FUNC_MF_CFG_MAX_BW_MASK
) >>
723 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
724 if (vn_max_rate
< line_speed
)
725 line_speed
= vn_max_rate
;
727 pr_cont("%d Mbps ", line_speed
);
729 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
730 pr_cont("full duplex");
732 pr_cont("half duplex");
734 if (bp
->link_vars
.flow_ctrl
!= BNX2X_FLOW_CTRL_NONE
) {
735 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) {
736 pr_cont(", receive ");
737 if (bp
->link_vars
.flow_ctrl
&
739 pr_cont("& transmit ");
741 pr_cont(", transmit ");
743 pr_cont("flow control ON");
747 } else { /* link_down */
748 netif_carrier_off(bp
->dev
);
749 netdev_err(bp
->dev
, "NIC Link is Down\n");
753 /* Returns the number of actually allocated BDs */
754 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath
*fp
,
757 struct bnx2x
*bp
= fp
->bp
;
758 u16 ring_prod
, cqe_ring_prod
;
761 fp
->rx_comp_cons
= 0;
762 cqe_ring_prod
= ring_prod
= 0;
763 for (i
= 0; i
< rx_ring_size
; i
++) {
764 if (bnx2x_alloc_rx_skb(bp
, fp
, ring_prod
) < 0) {
765 BNX2X_ERR("was only able to allocate "
766 "%d rx skbs on queue[%d]\n", i
, fp
->index
);
767 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
770 ring_prod
= NEXT_RX_IDX(ring_prod
);
771 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
772 WARN_ON(ring_prod
<= i
);
775 fp
->rx_bd_prod
= ring_prod
;
776 /* Limit the CQE producer by the CQE ring size */
777 fp
->rx_comp_prod
= min_t(u16
, NUM_RCQ_RINGS
*RCQ_DESC_CNT
,
779 fp
->rx_pkt
= fp
->rx_calls
= 0;
784 static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath
*fp
)
786 struct bnx2x
*bp
= fp
->bp
;
787 int rx_ring_size
= bp
->rx_ring_size
? bp
->rx_ring_size
:
788 MAX_RX_AVAIL
/bp
->num_queues
;
790 rx_ring_size
= max_t(int, MIN_RX_AVAIL
, rx_ring_size
);
792 bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
795 * this will generate an interrupt (to the TSTORM)
796 * must only be done after chip is initialized
798 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
802 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
804 int func
= BP_FUNC(bp
);
805 int max_agg_queues
= CHIP_IS_E1(bp
) ? ETH_MAX_AGGREGATION_QUEUES_E1
:
806 ETH_MAX_AGGREGATION_QUEUES_E1H
;
810 bp
->rx_buf_size
= bp
->dev
->mtu
+ ETH_OVREHEAD
+ BNX2X_RX_ALIGN
+
811 IP_HEADER_ALIGNMENT_PADDING
;
814 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, bp
->rx_buf_size
);
816 for_each_queue(bp
, j
) {
817 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
819 if (!fp
->disable_tpa
) {
820 for (i
= 0; i
< max_agg_queues
; i
++) {
821 fp
->tpa_pool
[i
].skb
=
822 netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
823 if (!fp
->tpa_pool
[i
].skb
) {
824 BNX2X_ERR("Failed to allocate TPA "
825 "skb pool for queue[%d] - "
826 "disabling TPA on this "
828 bnx2x_free_tpa_pool(bp
, fp
, i
);
832 dma_unmap_addr_set((struct sw_rx_bd
*)
833 &bp
->fp
->tpa_pool
[i
],
835 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
838 /* "next page" elements initialization */
839 bnx2x_set_next_page_sgl(fp
);
841 /* set SGEs bit mask */
842 bnx2x_init_sge_ring_bit_mask(fp
);
844 /* Allocate SGEs and initialize the ring elements */
845 for (i
= 0, ring_prod
= 0;
846 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
848 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
849 BNX2X_ERR("was only able to allocate "
851 BNX2X_ERR("disabling TPA for"
853 /* Cleanup already allocated elements */
854 bnx2x_free_rx_sge_range(bp
,
856 bnx2x_free_tpa_pool(bp
,
862 ring_prod
= NEXT_SGE_IDX(ring_prod
);
865 fp
->rx_sge_prod
= ring_prod
;
869 for_each_queue(bp
, j
) {
870 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
874 bnx2x_set_next_page_rx_bd(fp
);
877 bnx2x_set_next_page_rx_cq(fp
);
879 /* Allocate BDs and initialize BD ring */
880 bnx2x_alloc_rx_bd_ring(fp
);
885 if (!CHIP_IS_E2(bp
)) {
886 REG_WR(bp
, BAR_USTRORM_INTMEM
+
887 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
888 U64_LO(fp
->rx_comp_mapping
));
889 REG_WR(bp
, BAR_USTRORM_INTMEM
+
890 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
891 U64_HI(fp
->rx_comp_mapping
));
896 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
900 for_each_queue(bp
, i
) {
901 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
903 u16 bd_cons
= fp
->tx_bd_cons
;
904 u16 sw_prod
= fp
->tx_pkt_prod
;
905 u16 sw_cons
= fp
->tx_pkt_cons
;
907 while (sw_cons
!= sw_prod
) {
908 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
914 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
918 for_each_queue(bp
, j
) {
919 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
921 for (i
= 0; i
< NUM_RX_BD
; i
++) {
922 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
923 struct sk_buff
*skb
= rx_buf
->skb
;
928 dma_unmap_single(&bp
->pdev
->dev
,
929 dma_unmap_addr(rx_buf
, mapping
),
930 bp
->rx_buf_size
, DMA_FROM_DEVICE
);
935 if (!fp
->disable_tpa
)
936 bnx2x_free_tpa_pool(bp
, fp
, CHIP_IS_E1(bp
) ?
937 ETH_MAX_AGGREGATION_QUEUES_E1
:
938 ETH_MAX_AGGREGATION_QUEUES_E1H
);
942 void bnx2x_free_skbs(struct bnx2x
*bp
)
944 bnx2x_free_tx_skbs(bp
);
945 bnx2x_free_rx_skbs(bp
);
948 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
952 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
953 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
954 bp
->msix_table
[0].vector
);
959 for_each_queue(bp
, i
) {
960 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
961 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
962 bnx2x_fp(bp
, i
, state
));
964 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
968 void bnx2x_free_irq(struct bnx2x
*bp
)
970 if (bp
->flags
& USING_MSIX_FLAG
)
971 bnx2x_free_msix_irqs(bp
);
972 else if (bp
->flags
& USING_MSI_FLAG
)
973 free_irq(bp
->pdev
->irq
, bp
->dev
);
975 free_irq(bp
->pdev
->irq
, bp
->dev
);
978 int bnx2x_enable_msix(struct bnx2x
*bp
)
980 int msix_vec
= 0, i
, rc
, req_cnt
;
982 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
983 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n",
984 bp
->msix_table
[0].entry
);
988 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
989 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d (CNIC)\n",
990 bp
->msix_table
[msix_vec
].entry
, bp
->msix_table
[msix_vec
].entry
);
993 for_each_queue(bp
, i
) {
994 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
995 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
996 "(fastpath #%u)\n", msix_vec
, msix_vec
, i
);
1000 req_cnt
= BNX2X_NUM_QUEUES(bp
) + CNIC_CONTEXT_USE
+ 1;
1002 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], req_cnt
);
1005 * reconfigure number of tx/rx queues according to available
1008 if (rc
>= BNX2X_MIN_MSIX_VEC_CNT
) {
1009 /* how less vectors we will have? */
1010 int diff
= req_cnt
- rc
;
1013 "Trying to use less MSI-X vectors: %d\n", rc
);
1015 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], rc
);
1019 "MSI-X is not attainable rc %d\n", rc
);
1023 * decrease number of queues by number of unallocated entries
1025 bp
->num_queues
-= diff
;
1027 DP(NETIF_MSG_IFUP
, "New queue configuration set: %d\n",
1030 /* fall to INTx if not enough memory */
1032 bp
->flags
|= DISABLE_MSI_FLAG
;
1033 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
1037 bp
->flags
|= USING_MSIX_FLAG
;
1042 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1044 int i
, rc
, offset
= 1;
1046 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
1047 bp
->dev
->name
, bp
->dev
);
1049 BNX2X_ERR("request sp irq failed\n");
1056 for_each_queue(bp
, i
) {
1057 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1058 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1061 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1062 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1064 BNX2X_ERR("request fp #%d irq failed rc %d\n", i
, rc
);
1065 bnx2x_free_msix_irqs(bp
);
1070 fp
->state
= BNX2X_FP_STATE_IRQ
;
1073 i
= BNX2X_NUM_QUEUES(bp
);
1074 offset
= 1 + CNIC_CONTEXT_USE
;
1075 netdev_info(bp
->dev
, "using MSI-X IRQs: sp %d fp[%d] %d"
1077 bp
->msix_table
[0].vector
,
1078 0, bp
->msix_table
[offset
].vector
,
1079 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1084 int bnx2x_enable_msi(struct bnx2x
*bp
)
1088 rc
= pci_enable_msi(bp
->pdev
);
1090 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
1093 bp
->flags
|= USING_MSI_FLAG
;
1098 static int bnx2x_req_irq(struct bnx2x
*bp
)
1100 unsigned long flags
;
1103 if (bp
->flags
& USING_MSI_FLAG
)
1106 flags
= IRQF_SHARED
;
1108 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
1109 bp
->dev
->name
, bp
->dev
);
1111 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
1116 static void bnx2x_napi_enable(struct bnx2x
*bp
)
1120 for_each_queue(bp
, i
)
1121 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1124 static void bnx2x_napi_disable(struct bnx2x
*bp
)
1128 for_each_queue(bp
, i
)
1129 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1132 void bnx2x_netif_start(struct bnx2x
*bp
)
1136 intr_sem
= atomic_dec_and_test(&bp
->intr_sem
);
1137 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1140 if (netif_running(bp
->dev
)) {
1141 bnx2x_napi_enable(bp
);
1142 bnx2x_int_enable(bp
);
1143 if (bp
->state
== BNX2X_STATE_OPEN
)
1144 netif_tx_wake_all_queues(bp
->dev
);
1149 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1151 bnx2x_int_disable_sync(bp
, disable_hw
);
1152 bnx2x_napi_disable(bp
);
1153 netif_tx_disable(bp
->dev
);
1156 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1158 switch (bp
->multi_mode
) {
1159 case ETH_RSS_MODE_DISABLED
:
1162 case ETH_RSS_MODE_REGULAR
:
1163 bp
->num_queues
= bnx2x_calc_num_queues(bp
);
1172 static void bnx2x_release_firmware(struct bnx2x
*bp
)
1174 kfree(bp
->init_ops_offsets
);
1175 kfree(bp
->init_ops
);
1176 kfree(bp
->init_data
);
1177 release_firmware(bp
->firmware
);
1180 /* must be called with rtnl_lock */
1181 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
1186 /* Set init arrays */
1187 rc
= bnx2x_init_firmware(bp
);
1189 BNX2X_ERR("Error loading firmware\n");
1193 #ifdef BNX2X_STOP_ON_ERROR
1194 if (unlikely(bp
->panic
))
1198 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
1200 /* must be called before memory allocation and HW init */
1201 bnx2x_ilt_set_info(bp
);
1203 if (bnx2x_alloc_mem(bp
))
1206 netif_set_real_num_tx_queues(bp
->dev
, bp
->num_queues
);
1207 rc
= netif_set_real_num_rx_queues(bp
->dev
, bp
->num_queues
);
1209 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1213 for_each_queue(bp
, i
)
1214 bnx2x_fp(bp
, i
, disable_tpa
) =
1215 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
1217 bnx2x_napi_enable(bp
);
1219 /* Send LOAD_REQUEST command to MCP
1220 Returns the type of LOAD command:
1221 if it is the first port to be initialized
1222 common blocks should be initialized, otherwise - not
1224 if (!BP_NOMCP(bp
)) {
1225 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
, 0);
1227 BNX2X_ERR("MCP response failure, aborting\n");
1231 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
1232 rc
= -EBUSY
; /* other port in diagnostic mode */
1237 int path
= BP_PATH(bp
);
1238 int port
= BP_PORT(bp
);
1240 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
1241 path
, load_count
[path
][0], load_count
[path
][1],
1242 load_count
[path
][2]);
1243 load_count
[path
][0]++;
1244 load_count
[path
][1 + port
]++;
1245 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
1246 path
, load_count
[path
][0], load_count
[path
][1],
1247 load_count
[path
][2]);
1248 if (load_count
[path
][0] == 1)
1249 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
1250 else if (load_count
[path
][1 + port
] == 1)
1251 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
1253 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
1256 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1257 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
1258 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
1262 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
1265 rc
= bnx2x_init_hw(bp
, load_code
);
1267 BNX2X_ERR("HW init failed, aborting\n");
1268 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1272 /* Connect to IRQs */
1273 rc
= bnx2x_setup_irqs(bp
);
1275 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1279 /* Setup NIC internals and enable interrupts */
1280 bnx2x_nic_init(bp
, load_code
);
1282 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1283 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
1284 (bp
->common
.shmem2_base
))
1285 SHMEM2_WR(bp
, dcc_support
,
1286 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
1287 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
1289 /* Send LOAD_DONE command to MCP */
1290 if (!BP_NOMCP(bp
)) {
1291 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1293 BNX2X_ERR("MCP response failure, aborting\n");
1299 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
1301 rc
= bnx2x_func_start(bp
);
1303 BNX2X_ERR("Function start failed!\n");
1304 #ifndef BNX2X_STOP_ON_ERROR
1312 rc
= bnx2x_setup_client(bp
, &bp
->fp
[0], 1 /* Leading */);
1314 BNX2X_ERR("Setup leading failed!\n");
1315 #ifndef BNX2X_STOP_ON_ERROR
1323 if (!CHIP_IS_E1(bp
) &&
1324 (bp
->mf_config
[BP_VN(bp
)] & FUNC_MF_CFG_FUNC_DISABLED
)) {
1325 DP(NETIF_MSG_IFUP
, "mf_cfg function disabled\n");
1326 bp
->flags
|= MF_FUNC_DIS
;
1330 /* Enable Timer scan */
1331 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 1);
1334 for_each_nondefault_queue(bp
, i
) {
1335 rc
= bnx2x_setup_client(bp
, &bp
->fp
[i
], 0);
1344 /* Now when Clients are configured we are ready to work */
1345 bp
->state
= BNX2X_STATE_OPEN
;
1347 bnx2x_set_eth_mac(bp
, 1);
1350 bnx2x_initial_phy_init(bp
, load_mode
);
1352 /* Start fast path */
1353 switch (load_mode
) {
1355 /* Tx queue should be only reenabled */
1356 netif_tx_wake_all_queues(bp
->dev
);
1357 /* Initialize the receive filter. */
1358 bnx2x_set_rx_mode(bp
->dev
);
1362 netif_tx_start_all_queues(bp
->dev
);
1363 smp_mb__after_clear_bit();
1364 /* Initialize the receive filter. */
1365 bnx2x_set_rx_mode(bp
->dev
);
1369 /* Initialize the receive filter. */
1370 bnx2x_set_rx_mode(bp
->dev
);
1371 bp
->state
= BNX2X_STATE_DIAG
;
1379 bnx2x__link_status_update(bp
);
1381 /* start the timer */
1382 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1385 bnx2x_setup_cnic_irq_info(bp
);
1386 if (bp
->state
== BNX2X_STATE_OPEN
)
1387 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
1389 bnx2x_inc_load_cnt(bp
);
1391 bnx2x_release_firmware(bp
);
1397 /* Disable Timer scan */
1398 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 0);
1401 bnx2x_int_disable_sync(bp
, 1);
1403 /* Free SKBs, SGEs, TPA pool and driver internals */
1404 bnx2x_free_skbs(bp
);
1405 for_each_queue(bp
, i
)
1406 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1411 if (!BP_NOMCP(bp
)) {
1412 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
1413 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
1418 bnx2x_napi_disable(bp
);
1422 bnx2x_release_firmware(bp
);
1427 /* must be called with rtnl_lock */
1428 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
1432 if (bp
->state
== BNX2X_STATE_CLOSED
) {
1433 /* Interface has been removed - nothing to recover */
1434 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
1436 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_RESERVED_08
);
1443 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
1445 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
1447 /* Set "drop all" */
1448 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
1449 bnx2x_set_storm_rx_mode(bp
);
1452 bnx2x_tx_disable(bp
);
1454 del_timer_sync(&bp
->timer
);
1456 SHMEM_WR(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_pulse_mb
,
1457 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
1459 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
1461 /* Cleanup the chip if needed */
1462 if (unload_mode
!= UNLOAD_RECOVERY
)
1463 bnx2x_chip_cleanup(bp
, unload_mode
);
1465 /* Disable HW interrupts, NAPI and Tx */
1466 bnx2x_netif_stop(bp
, 1);
1474 /* Free SKBs, SGEs, TPA pool and driver internals */
1475 bnx2x_free_skbs(bp
);
1476 for_each_queue(bp
, i
)
1477 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1481 bp
->state
= BNX2X_STATE_CLOSED
;
1483 /* The last driver must disable a "close the gate" if there is no
1484 * parity attention or "process kill" pending.
1486 if ((!bnx2x_dec_load_cnt(bp
)) && (!bnx2x_chk_parity_attn(bp
)) &&
1487 bnx2x_reset_is_done(bp
))
1488 bnx2x_disable_close_the_gate(bp
);
1490 /* Reset MCP mail box sequence if there is on going recovery */
1491 if (unload_mode
== UNLOAD_RECOVERY
)
1497 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
1501 /* If there is no power capability, silently succeed */
1503 DP(NETIF_MSG_HW
, "No power capability. Breaking.\n");
1507 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
1511 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
1512 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
1513 PCI_PM_CTRL_PME_STATUS
));
1515 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
1516 /* delay required during transition out of D3hot */
1521 /* If there are other clients above don't
1522 shut down the power */
1523 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
1525 /* Don't shut down the power for emulation and FPGA */
1526 if (CHIP_REV_IS_SLOW(bp
))
1529 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
1533 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
1535 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
1538 /* No more memory access after this point until
1539 * device is brought back to D0.
1550 * net_device service functions
1552 int bnx2x_poll(struct napi_struct
*napi
, int budget
)
1555 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
1557 struct bnx2x
*bp
= fp
->bp
;
1560 #ifdef BNX2X_STOP_ON_ERROR
1561 if (unlikely(bp
->panic
)) {
1562 napi_complete(napi
);
1567 if (bnx2x_has_tx_work(fp
))
1570 if (bnx2x_has_rx_work(fp
)) {
1571 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
1573 /* must not complete if we consumed full budget */
1574 if (work_done
>= budget
)
1578 /* Fall out from the NAPI loop if needed */
1579 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
1580 bnx2x_update_fpsb_idx(fp
);
1581 /* bnx2x_has_rx_work() reads the status block,
1582 * thus we need to ensure that status block indices
1583 * have been actually read (bnx2x_update_fpsb_idx)
1584 * prior to this check (bnx2x_has_rx_work) so that
1585 * we won't write the "newer" value of the status block
1586 * to IGU (if there was a DMA right after
1587 * bnx2x_has_rx_work and if there is no rmb, the memory
1588 * reading (bnx2x_update_fpsb_idx) may be postponed
1589 * to right before bnx2x_ack_sb). In this case there
1590 * will never be another interrupt until there is
1591 * another update of the status block, while there
1592 * is still unhandled work.
1596 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
1597 napi_complete(napi
);
1598 /* Re-enable interrupts */
1600 "Update index to %d\n", fp
->fp_hc_idx
);
1601 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
1602 le16_to_cpu(fp
->fp_hc_idx
),
1612 /* we split the first BD into headers and data BDs
1613 * to ease the pain of our fellow microcode engineers
1614 * we use one mapping for both BDs
1615 * So far this has only been observed to happen
1616 * in Other Operating Systems(TM)
1618 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
1619 struct bnx2x_fastpath
*fp
,
1620 struct sw_tx_bd
*tx_buf
,
1621 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
1622 u16 bd_prod
, int nbd
)
1624 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
1625 struct eth_tx_bd
*d_tx_bd
;
1627 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
1629 /* first fix first BD */
1630 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
1631 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
1633 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
1634 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
1635 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
1637 /* now get a new data BD
1638 * (after the pbd) and fill it */
1639 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
1640 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
1642 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
1643 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
1645 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1646 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1647 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
1649 /* this marks the BD as one that has no individual mapping */
1650 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
1652 DP(NETIF_MSG_TX_QUEUED
,
1653 "TSO split data size is %d (%x:%x)\n",
1654 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
1657 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
1662 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
1665 csum
= (u16
) ~csum_fold(csum_sub(csum
,
1666 csum_partial(t_header
- fix
, fix
, 0)));
1669 csum
= (u16
) ~csum_fold(csum_add(csum
,
1670 csum_partial(t_header
, -fix
, 0)));
1672 return swab16(csum
);
1675 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
1679 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1683 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) {
1685 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
1686 rc
|= XMIT_CSUM_TCP
;
1690 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
1691 rc
|= XMIT_CSUM_TCP
;
1695 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
1696 rc
|= (XMIT_GSO_V4
| XMIT_CSUM_V4
| XMIT_CSUM_TCP
);
1698 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
1699 rc
|= (XMIT_GSO_V6
| XMIT_CSUM_TCP
| XMIT_CSUM_V6
);
1704 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1705 /* check if packet requires linearization (packet is too fragmented)
1706 no need to check fragmentation if page size > 8K (there will be no
1707 violation to FW restrictions) */
1708 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
1713 int first_bd_sz
= 0;
1715 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1716 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
1718 if (xmit_type
& XMIT_GSO
) {
1719 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
1720 /* Check if LSO packet needs to be copied:
1721 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1722 int wnd_size
= MAX_FETCH_BD
- 3;
1723 /* Number of windows to check */
1724 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
1729 /* Headers length */
1730 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
1733 /* Amount of data (w/o headers) on linear part of SKB*/
1734 first_bd_sz
= skb_headlen(skb
) - hlen
;
1736 wnd_sum
= first_bd_sz
;
1738 /* Calculate the first sum - it's special */
1739 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
1741 skb_shinfo(skb
)->frags
[frag_idx
].size
;
1743 /* If there was data on linear skb data - check it */
1744 if (first_bd_sz
> 0) {
1745 if (unlikely(wnd_sum
< lso_mss
)) {
1750 wnd_sum
-= first_bd_sz
;
1753 /* Others are easier: run through the frag list and
1754 check all windows */
1755 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
1757 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
1759 if (unlikely(wnd_sum
< lso_mss
)) {
1764 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
1767 /* in non-LSO too fragmented packet should always
1774 if (unlikely(to_copy
))
1775 DP(NETIF_MSG_TX_QUEUED
,
1776 "Linearization IS REQUIRED for %s packet. "
1777 "num_frags %d hlen %d first_bd_sz %d\n",
1778 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
1779 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
1785 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff
*skb
,
1786 struct eth_tx_parse_bd_e2
*pbd
,
1789 pbd
->parsing_data
|= cpu_to_le16(skb_shinfo(skb
)->gso_size
) <<
1790 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
;
1791 if ((xmit_type
& XMIT_GSO_V6
) &&
1792 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
1793 pbd
->parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
1797 * Update PBD in GSO case.
1800 * @param tx_start_bd
1804 static inline void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
1805 struct eth_tx_parse_bd_e1x
*pbd
,
1808 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
1809 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
1810 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
1812 if (xmit_type
& XMIT_GSO_V4
) {
1813 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
1814 pbd
->tcp_pseudo_csum
=
1815 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
1817 0, IPPROTO_TCP
, 0));
1820 pbd
->tcp_pseudo_csum
=
1821 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
1822 &ipv6_hdr(skb
)->daddr
,
1823 0, IPPROTO_TCP
, 0));
1825 pbd
->global_data
|= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
;
1831 * @param tx_start_bd
1835 * @return header len
1837 static inline u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
1838 struct eth_tx_parse_bd_e2
*pbd
,
1841 pbd
->parsing_data
|= cpu_to_le16(tcp_hdrlen(skb
)/4) <<
1842 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
;
1844 pbd
->parsing_data
|= cpu_to_le16(((unsigned char *)tcp_hdr(skb
) -
1846 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT
;
1848 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
1854 * @param tx_start_bd
1858 * @return Header length
1860 static inline u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
1861 struct eth_tx_parse_bd_e1x
*pbd
,
1864 u8 hlen
= (skb_network_header(skb
) - skb
->data
) / 2;
1866 /* for now NS flag is not used in Linux */
1868 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
1869 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
1871 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
1872 skb_network_header(skb
)) / 2;
1874 hlen
+= pbd
->ip_hlen_w
+ tcp_hdrlen(skb
) / 2;
1876 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
1879 if (xmit_type
& XMIT_CSUM_TCP
) {
1880 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
1883 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
1885 DP(NETIF_MSG_TX_QUEUED
,
1886 "hlen %d fix %d csum before fix %x\n",
1887 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
1889 /* HW bug: fixup the CSUM */
1890 pbd
->tcp_pseudo_csum
=
1891 bnx2x_csum_fix(skb_transport_header(skb
),
1894 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
1895 pbd
->tcp_pseudo_csum
);
1901 /* called with netif_tx_lock
1902 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1903 * netif_wake_queue()
1905 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1907 struct bnx2x
*bp
= netdev_priv(dev
);
1908 struct bnx2x_fastpath
*fp
;
1909 struct netdev_queue
*txq
;
1910 struct sw_tx_bd
*tx_buf
;
1911 struct eth_tx_start_bd
*tx_start_bd
;
1912 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
1913 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
1914 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
1915 u16 pkt_prod
, bd_prod
;
1918 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
1921 __le16 pkt_size
= 0;
1923 u8 mac_type
= UNICAST_ADDRESS
;
1925 #ifdef BNX2X_STOP_ON_ERROR
1926 if (unlikely(bp
->panic
))
1927 return NETDEV_TX_BUSY
;
1930 fp_index
= skb_get_queue_mapping(skb
);
1931 txq
= netdev_get_tx_queue(dev
, fp_index
);
1933 fp
= &bp
->fp
[fp_index
];
1935 if (unlikely(bnx2x_tx_avail(fp
) < (skb_shinfo(skb
)->nr_frags
+ 3))) {
1936 fp
->eth_q_stats
.driver_xoff
++;
1937 netif_tx_stop_queue(txq
);
1938 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1939 return NETDEV_TX_BUSY
;
1942 DP(NETIF_MSG_TX_QUEUED
, "queue[%d]: SKB: summed %x protocol %x "
1943 "protocol(%x,%x) gso type %x xmit_type %x\n",
1944 fp_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
1945 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
1947 eth
= (struct ethhdr
*)skb
->data
;
1949 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1950 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
1951 if (is_broadcast_ether_addr(eth
->h_dest
))
1952 mac_type
= BROADCAST_ADDRESS
;
1954 mac_type
= MULTICAST_ADDRESS
;
1957 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1958 /* First, check if we need to linearize the skb (due to FW
1959 restrictions). No need to check fragmentation if page size > 8K
1960 (there will be no violation to FW restrictions) */
1961 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
1962 /* Statistics of linearization */
1964 if (skb_linearize(skb
) != 0) {
1965 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
1966 "silently dropping this SKB\n");
1967 dev_kfree_skb_any(skb
);
1968 return NETDEV_TX_OK
;
1974 Please read carefully. First we use one BD which we mark as start,
1975 then we have a parsing info BD (used for TSO or xsum),
1976 and only then we have the rest of the TSO BDs.
1977 (don't forget to mark the last one as last,
1978 and to unmap only AFTER you write to the BD ...)
1979 And above all, all pdb sizes are in words - NOT DWORDS!
1982 pkt_prod
= fp
->tx_pkt_prod
++;
1983 bd_prod
= TX_BD(fp
->tx_bd_prod
);
1985 /* get a tx_buf and first BD */
1986 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
1987 tx_start_bd
= &fp
->tx_desc_ring
[bd_prod
].start_bd
;
1989 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
1990 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_ETH_ADDR_TYPE
,
1994 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_HDR_NBDS
, 1);
1996 /* remember the first BD of the packet */
1997 tx_buf
->first_bd
= fp
->tx_bd_prod
;
2001 DP(NETIF_MSG_TX_QUEUED
,
2002 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2003 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
2005 if (vlan_tx_tag_present(skb
)) {
2006 tx_start_bd
->vlan_or_ethertype
=
2007 cpu_to_le16(vlan_tx_tag_get(skb
));
2008 tx_start_bd
->bd_flags
.as_bitfield
|=
2009 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
2011 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
2013 /* turn on parsing and get a BD */
2014 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2016 if (xmit_type
& XMIT_CSUM
) {
2017 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
2019 if (xmit_type
& XMIT_CSUM_V4
)
2020 tx_start_bd
->bd_flags
.as_bitfield
|=
2021 ETH_TX_BD_FLAGS_IP_CSUM
;
2023 tx_start_bd
->bd_flags
.as_bitfield
|=
2024 ETH_TX_BD_FLAGS_IPV6
;
2026 if (!(xmit_type
& XMIT_CSUM_TCP
))
2027 tx_start_bd
->bd_flags
.as_bitfield
|=
2028 ETH_TX_BD_FLAGS_IS_UDP
;
2031 if (CHIP_IS_E2(bp
)) {
2032 pbd_e2
= &fp
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
2033 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
2034 /* Set PBD in checksum offload case */
2035 if (xmit_type
& XMIT_CSUM
)
2036 hlen
= bnx2x_set_pbd_csum_e2(bp
,
2037 skb
, pbd_e2
, xmit_type
);
2039 pbd_e1x
= &fp
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
2040 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
2041 /* Set PBD in checksum offload case */
2042 if (xmit_type
& XMIT_CSUM
)
2043 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
2047 /* Map skb linear data for DMA */
2048 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
2049 skb_headlen(skb
), DMA_TO_DEVICE
);
2051 /* Setup the data pointer of the first BD of the packet */
2052 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2053 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2054 nbd
= skb_shinfo(skb
)->nr_frags
+ 2; /* start_bd + pbd + frags */
2055 tx_start_bd
->nbd
= cpu_to_le16(nbd
);
2056 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
2057 pkt_size
= tx_start_bd
->nbytes
;
2059 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
2060 " nbytes %d flags %x vlan %x\n",
2061 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
2062 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
2063 tx_start_bd
->bd_flags
.as_bitfield
,
2064 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
2066 if (xmit_type
& XMIT_GSO
) {
2068 DP(NETIF_MSG_TX_QUEUED
,
2069 "TSO packet len %d hlen %d total len %d tso size %d\n",
2070 skb
->len
, hlen
, skb_headlen(skb
),
2071 skb_shinfo(skb
)->gso_size
);
2073 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
2075 if (unlikely(skb_headlen(skb
) > hlen
))
2076 bd_prod
= bnx2x_tx_split(bp
, fp
, tx_buf
, &tx_start_bd
,
2077 hlen
, bd_prod
, ++nbd
);
2079 bnx2x_set_pbd_gso_e2(skb
, pbd_e2
, xmit_type
);
2081 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
2083 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
2085 /* Handle fragmented skb */
2086 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2087 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2089 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2090 tx_data_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
2091 if (total_pkt_bd
== NULL
)
2092 total_pkt_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
2094 mapping
= dma_map_page(&bp
->pdev
->dev
, frag
->page
,
2096 frag
->size
, DMA_TO_DEVICE
);
2098 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2099 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2100 tx_data_bd
->nbytes
= cpu_to_le16(frag
->size
);
2101 le16_add_cpu(&pkt_size
, frag
->size
);
2103 DP(NETIF_MSG_TX_QUEUED
,
2104 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2105 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
2106 le16_to_cpu(tx_data_bd
->nbytes
));
2109 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
2111 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2113 /* now send a tx doorbell, counting the next BD
2114 * if the packet contains or ends with it
2116 if (TX_BD_POFF(bd_prod
) < nbd
)
2119 if (total_pkt_bd
!= NULL
)
2120 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
2123 DP(NETIF_MSG_TX_QUEUED
,
2124 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2125 " tcp_flags %x xsum %x seq %u hlen %u\n",
2126 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
2127 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
2128 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
2129 le16_to_cpu(pbd_e1x
->total_hlen_w
));
2131 DP(NETIF_MSG_TX_QUEUED
,
2132 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2133 pbd_e2
, pbd_e2
->dst_mac_addr_hi
, pbd_e2
->dst_mac_addr_mid
,
2134 pbd_e2
->dst_mac_addr_lo
, pbd_e2
->src_mac_addr_hi
,
2135 pbd_e2
->src_mac_addr_mid
, pbd_e2
->src_mac_addr_lo
,
2136 pbd_e2
->parsing_data
);
2137 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
2140 * Make sure that the BD data is updated before updating the producer
2141 * since FW might read the BD right after the producer is updated.
2142 * This is only applicable for weak-ordered memory model archs such
2143 * as IA-64. The following barrier is also mandatory since FW will
2144 * assumes packets must have BDs.
2148 fp
->tx_db
.data
.prod
+= nbd
;
2151 DOORBELL(bp
, fp
->cid
, fp
->tx_db
.raw
);
2155 fp
->tx_bd_prod
+= nbd
;
2157 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
2158 netif_tx_stop_queue(txq
);
2160 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2161 * ordering of set_bit() in netif_tx_stop_queue() and read of
2165 fp
->eth_q_stats
.driver_xoff
++;
2166 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
2167 netif_tx_wake_queue(txq
);
2171 return NETDEV_TX_OK
;
2174 /* called with rtnl_lock */
2175 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
2177 struct sockaddr
*addr
= p
;
2178 struct bnx2x
*bp
= netdev_priv(dev
);
2180 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
2183 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2184 if (netif_running(dev
))
2185 bnx2x_set_eth_mac(bp
, 1);
2191 static int bnx2x_setup_irqs(struct bnx2x
*bp
)
2194 if (bp
->flags
& USING_MSIX_FLAG
) {
2195 rc
= bnx2x_req_msix_irqs(bp
);
2200 rc
= bnx2x_req_irq(bp
);
2202 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
2205 if (bp
->flags
& USING_MSI_FLAG
) {
2206 bp
->dev
->irq
= bp
->pdev
->irq
;
2207 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
2215 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
2218 kfree(bp
->msix_table
);
2222 int __devinit
bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
2224 struct bnx2x_fastpath
*fp
;
2225 struct msix_entry
*tbl
;
2226 struct bnx2x_ilt
*ilt
;
2229 fp
= kzalloc(L2_FP_COUNT(bp
->l2_cid_count
)*sizeof(*fp
), GFP_KERNEL
);
2235 tbl
= kzalloc((bp
->l2_cid_count
+ 1) * sizeof(*tbl
),
2239 bp
->msix_table
= tbl
;
2242 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
2249 bnx2x_free_mem_bp(bp
);
2254 /* called with rtnl_lock */
2255 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
2257 struct bnx2x
*bp
= netdev_priv(dev
);
2260 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
2261 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
2265 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
2266 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
2269 /* This does not race with packet allocation
2270 * because the actual alloc size is
2271 * only updated as part of load
2275 if (netif_running(dev
)) {
2276 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
2277 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
2283 void bnx2x_tx_timeout(struct net_device
*dev
)
2285 struct bnx2x
*bp
= netdev_priv(dev
);
2287 #ifdef BNX2X_STOP_ON_ERROR
2291 /* This allows the netif to be shutdown gracefully before resetting */
2292 schedule_delayed_work(&bp
->reset_task
, 0);
2295 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2297 struct net_device
*dev
= pci_get_drvdata(pdev
);
2301 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
2304 bp
= netdev_priv(dev
);
2308 pci_save_state(pdev
);
2310 if (!netif_running(dev
)) {
2315 netif_device_detach(dev
);
2317 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
2319 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
2326 int bnx2x_resume(struct pci_dev
*pdev
)
2328 struct net_device
*dev
= pci_get_drvdata(pdev
);
2333 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
2336 bp
= netdev_priv(dev
);
2338 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
2339 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
2345 pci_restore_state(pdev
);
2347 if (!netif_running(dev
)) {
2352 bnx2x_set_power_state(bp
, PCI_D0
);
2353 netif_device_attach(dev
);
2355 /* Since the chip was reset, clear the FW sequence number */
2357 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);