1 /* bnx2x_cmn.h: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
20 #include <linux/types.h>
21 #include <linux/netdevice.h>
26 extern int num_queues
;
28 /*********************** Interfaces ****************************
29 * Functions that need to be implemented by each driver version
33 * Initialize link parameters structure variables.
40 u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
);
43 * Configure hw according to link parameters structure.
47 void bnx2x_link_set(struct bnx2x
*bp
);
55 * @return 0 - link is UP
57 u8
bnx2x_link_test(struct bnx2x
*bp
, u8 is_serdes
);
60 * Handles link status change
64 void bnx2x__link_status_update(struct bnx2x
*bp
);
67 * Report link status to upper layer
73 void bnx2x_link_report(struct bnx2x
*bp
);
76 * MSI-X slowpath interrupt handler
83 irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
);
86 * non MSI-X interrupt handler
93 irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
);
97 * Send command to cnic driver
102 int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
);
105 * Provides cnic information for proper interrupt handling
109 void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
);
113 * Enable HW interrupts.
117 void bnx2x_int_enable(struct bnx2x
*bp
);
120 * Disable interrupts. This function ensures that there are no
121 * ISRs or SP DPCs (sp_task) are running after it returns.
124 * @param disable_hw if true, disable HW interrupts.
126 void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
);
129 * Loads device firmware
135 int bnx2x_init_firmware(struct bnx2x
*bp
);
138 * Init HW blocks according to current initialization stage:
139 * COMMON, PORT or FUNCTION.
142 * @param load_code: COMMON, PORT or FUNCTION
146 int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
);
149 * Init driver internals:
155 * @param load_code COMMON, PORT or FUNCTION
157 void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
);
160 * Allocate driver's memory.
166 int bnx2x_alloc_mem(struct bnx2x
*bp
);
169 * Release driver's memory.
173 void bnx2x_free_mem(struct bnx2x
*bp
);
184 int bnx2x_setup_client(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
188 * Set number of queues according to mode
193 void bnx2x_set_num_queues(struct bnx2x
*bp
);
196 * Cleanup chip internals:
197 * - Cleanup MAC configuration.
204 void bnx2x_chip_cleanup(struct bnx2x
*bp
, int unload_mode
);
210 * @param resource Resource bit which was locked
214 int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
);
219 * @param bp driver handle
220 * @param resource Resource bit which was locked
224 int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
);
227 * Configure eth MAC address in the HW according to the value in
230 * @param bp driver handle
233 void bnx2x_set_eth_mac(struct bnx2x
*bp
, int set
);
236 * Set MAC filtering configurations.
238 * @remarks called with netif_tx_lock from dev_mcast.c
240 * @param dev net_device
242 void bnx2x_set_rx_mode(struct net_device
*dev
);
245 * Configure MAC filtering rules in a FW.
247 * @param bp driver handle
249 void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
);
251 /* Parity errors related */
252 void bnx2x_inc_load_cnt(struct bnx2x
*bp
);
253 u32
bnx2x_dec_load_cnt(struct bnx2x
*bp
);
254 bool bnx2x_chk_parity_attn(struct bnx2x
*bp
);
255 bool bnx2x_reset_is_done(struct bnx2x
*bp
);
256 void bnx2x_disable_close_the_gate(struct bnx2x
*bp
);
259 * Perform statistics handling according to event
261 * @param bp driver handle
262 * @param event bnx2x_stats_event
264 void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
);
267 * Handle ramrods completion
269 * @param fp fastpath handle for the event
270 * @param rr_cqe eth_rx_cqe
272 void bnx2x_sp_event(struct bnx2x_fastpath
*fp
, union eth_rx_cqe
*rr_cqe
);
275 * Init/halt function before/after sending
276 * CLIENT_SETUP/CFC_DEL for the first/last client.
282 int bnx2x_func_start(struct bnx2x
*bp
);
285 * Prepare ILT configurations according to current driver
290 void bnx2x_ilt_set_info(struct bnx2x
*bp
);
293 * Set power state to the requested value. Currently only D0 and
294 * D3hot are supported.
297 * @param state D0 or D3hot
301 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
);
303 /* dev_close main block */
304 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
);
306 /* dev_open main block */
307 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
);
309 /* hard_xmit callback */
310 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
312 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
);
314 /* NAPI poll Rx part */
315 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
);
317 /* NAPI poll Tx part */
318 int bnx2x_tx_int(struct bnx2x_fastpath
*fp
);
320 /* suspend/resume callbacks */
321 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
);
322 int bnx2x_resume(struct pci_dev
*pdev
);
324 /* Release IRQ vectors */
325 void bnx2x_free_irq(struct bnx2x
*bp
);
327 void bnx2x_init_rx_rings(struct bnx2x
*bp
);
328 void bnx2x_free_skbs(struct bnx2x
*bp
);
329 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
);
330 void bnx2x_netif_start(struct bnx2x
*bp
);
333 * Fill msix_table, request vectors, update num_queues according
334 * to number of available vectors
340 int bnx2x_enable_msix(struct bnx2x
*bp
);
343 * Request msi mode from OS, updated internals accordingly
349 int bnx2x_enable_msi(struct bnx2x
*bp
);
359 int bnx2x_poll(struct napi_struct
*napi
, int budget
);
362 * Allocate/release memories outsize main driver structure
368 int __devinit
bnx2x_alloc_mem_bp(struct bnx2x
*bp
);
369 void bnx2x_free_mem_bp(struct bnx2x
*bp
);
372 * Change mtu netdev callback
379 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
);
382 * tx timeout netdev callback
389 void bnx2x_tx_timeout(struct net_device
*dev
);
393 * vlan rx register netdev callback
400 void bnx2x_vlan_rx_register(struct net_device
*dev
,
401 struct vlan_group
*vlgrp
);
405 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
407 barrier(); /* status block is written to by the chip */
408 fp
->fp_hc_idx
= fp
->sb_running_index
[SM_RX_ID
];
411 static inline void bnx2x_update_rx_prod(struct bnx2x
*bp
,
412 struct bnx2x_fastpath
*fp
,
413 u16 bd_prod
, u16 rx_comp_prod
,
416 struct ustorm_eth_rx_producers rx_prods
= {0};
419 /* Update producers */
420 rx_prods
.bd_prod
= bd_prod
;
421 rx_prods
.cqe_prod
= rx_comp_prod
;
422 rx_prods
.sge_prod
= rx_sge_prod
;
425 * Make sure that the BD and SGE data is updated before updating the
426 * producers since FW might read the BD/SGE right after the producer
428 * This is only applicable for weak-ordered memory model archs such
429 * as IA-64. The following barrier is also mandatory since FW will
430 * assumes BDs must have buffers.
434 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
)/4; i
++)
436 BAR_USTRORM_INTMEM
+ fp
->ustorm_rx_prods_offset
+ i
*4,
437 ((u32
*)&rx_prods
)[i
]);
439 mmiowb(); /* keep prod updates ordered */
441 DP(NETIF_MSG_RX_STATUS
,
442 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
443 fp
->index
, bd_prod
, rx_comp_prod
, rx_sge_prod
);
446 static inline void bnx2x_igu_ack_sb_gen(struct bnx2x
*bp
, u8 igu_sb_id
,
447 u8 segment
, u16 index
, u8 op
,
448 u8 update
, u32 igu_addr
)
450 struct igu_regular cmd_data
= {0};
452 cmd_data
.sb_id_and_flags
=
453 ((index
<< IGU_REGULAR_SB_INDEX_SHIFT
) |
454 (segment
<< IGU_REGULAR_SEGMENT_ACCESS_SHIFT
) |
455 (update
<< IGU_REGULAR_BUPDATE_SHIFT
) |
456 (op
<< IGU_REGULAR_ENABLE_INT_SHIFT
));
458 DP(NETIF_MSG_HW
, "write 0x%08x to IGU addr 0x%x\n",
459 cmd_data
.sb_id_and_flags
, igu_addr
);
460 REG_WR(bp
, igu_addr
, cmd_data
.sb_id_and_flags
);
462 /* Make sure that ACK is written */
467 static inline void bnx2x_igu_clear_sb_gen(struct bnx2x
*bp
,
468 u8 idu_sb_id
, bool is_Pf
)
470 u32 data
, ctl
, cnt
= 100;
471 u32 igu_addr_data
= IGU_REG_COMMAND_REG_32LSB_DATA
;
472 u32 igu_addr_ctl
= IGU_REG_COMMAND_REG_CTRL
;
473 u32 igu_addr_ack
= IGU_REG_CSTORM_TYPE_0_SB_CLEANUP
+ (idu_sb_id
/32)*4;
474 u32 sb_bit
= 1 << (idu_sb_id
%32);
475 u32 func_encode
= BP_FUNC(bp
) |
476 ((is_Pf
== true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT
);
477 u32 addr_encode
= IGU_CMD_E2_PROD_UPD_BASE
+ idu_sb_id
;
479 /* Not supported in BC mode */
480 if (CHIP_INT_MODE_IS_BC(bp
))
483 data
= (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
484 << IGU_REGULAR_CLEANUP_TYPE_SHIFT
) |
485 IGU_REGULAR_CLEANUP_SET
|
486 IGU_REGULAR_BCLEANUP
;
488 ctl
= addr_encode
<< IGU_CTRL_REG_ADDRESS_SHIFT
|
489 func_encode
<< IGU_CTRL_REG_FID_SHIFT
|
490 IGU_CTRL_CMD_TYPE_WR
<< IGU_CTRL_REG_TYPE_SHIFT
;
492 DP(NETIF_MSG_HW
, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
493 data
, igu_addr_data
);
494 REG_WR(bp
, igu_addr_data
, data
);
497 DP(NETIF_MSG_HW
, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
499 REG_WR(bp
, igu_addr_ctl
, ctl
);
503 /* wait for clean up to finish */
504 while (!(REG_RD(bp
, igu_addr_ack
) & sb_bit
) && --cnt
)
508 if (!(REG_RD(bp
, igu_addr_ack
) & sb_bit
)) {
509 DP(NETIF_MSG_HW
, "Unable to finish IGU cleanup: "
510 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
511 idu_sb_id
, idu_sb_id
/32, idu_sb_id
%32, cnt
);
515 static inline void bnx2x_hc_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
516 u8 storm
, u16 index
, u8 op
, u8 update
)
518 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
519 COMMAND_REG_INT_ACK
);
520 struct igu_ack_register igu_ack
;
522 igu_ack
.status_block_index
= index
;
523 igu_ack
.sb_id_and_flags
=
524 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
525 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
526 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
527 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
529 DP(BNX2X_MSG_OFF
, "write 0x%08x to HC addr 0x%x\n",
530 (*(u32
*)&igu_ack
), hc_addr
);
531 REG_WR(bp
, hc_addr
, (*(u32
*)&igu_ack
));
533 /* Make sure that ACK is written */
538 static inline void bnx2x_igu_ack_sb(struct bnx2x
*bp
, u8 igu_sb_id
, u8 segment
,
539 u16 index
, u8 op
, u8 update
)
541 u32 igu_addr
= BAR_IGU_INTMEM
+ (IGU_CMD_INT_ACK_BASE
+ igu_sb_id
)*8;
543 bnx2x_igu_ack_sb_gen(bp
, igu_sb_id
, segment
, index
, op
, update
,
547 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 igu_sb_id
, u8 storm
,
548 u16 index
, u8 op
, u8 update
)
550 if (bp
->common
.int_block
== INT_BLOCK_HC
)
551 bnx2x_hc_ack_sb(bp
, igu_sb_id
, storm
, index
, op
, update
);
555 if (CHIP_INT_MODE_IS_BC(bp
))
557 else if (igu_sb_id
!= bp
->igu_dsb_id
)
558 segment
= IGU_SEG_ACCESS_DEF
;
559 else if (storm
== ATTENTION_ID
)
560 segment
= IGU_SEG_ACCESS_ATTN
;
562 segment
= IGU_SEG_ACCESS_DEF
;
563 bnx2x_igu_ack_sb(bp
, igu_sb_id
, segment
, index
, op
, update
);
567 static inline u16
bnx2x_hc_ack_int(struct bnx2x
*bp
)
569 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
570 COMMAND_REG_SIMD_MASK
);
571 u32 result
= REG_RD(bp
, hc_addr
);
573 DP(BNX2X_MSG_OFF
, "read 0x%08x from HC addr 0x%x\n",
580 static inline u16
bnx2x_igu_ack_int(struct bnx2x
*bp
)
582 u32 igu_addr
= (BAR_IGU_INTMEM
+ IGU_REG_SISR_MDPC_WMASK_LSB_UPPER
*8);
583 u32 result
= REG_RD(bp
, igu_addr
);
585 DP(NETIF_MSG_HW
, "read 0x%08x from IGU addr 0x%x\n",
592 static inline u16
bnx2x_ack_int(struct bnx2x
*bp
)
595 if (bp
->common
.int_block
== INT_BLOCK_HC
)
596 return bnx2x_hc_ack_int(bp
);
598 return bnx2x_igu_ack_int(bp
);
601 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath
*fp
)
603 /* Tell compiler that consumer and producer can change */
605 return fp
->tx_pkt_prod
!= fp
->tx_pkt_cons
;
608 static inline u16
bnx2x_tx_avail(struct bnx2x_fastpath
*fp
)
614 prod
= fp
->tx_bd_prod
;
615 cons
= fp
->tx_bd_cons
;
617 /* NUM_TX_RINGS = number of "next-page" entries
618 It will be used as a threshold */
619 used
= SUB_S16(prod
, cons
) + (s16
)NUM_TX_RINGS
;
621 #ifdef BNX2X_STOP_ON_ERROR
623 WARN_ON(used
> fp
->bp
->tx_ring_size
);
624 WARN_ON((fp
->bp
->tx_ring_size
- used
) > MAX_TX_AVAIL
);
627 return (s16
)(fp
->bp
->tx_ring_size
) - used
;
630 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath
*fp
)
634 /* Tell compiler that status block fields can change */
636 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
637 return hw_cons
!= fp
->tx_pkt_cons
;
640 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath
*fp
)
644 /* Tell compiler that status block fields can change */
646 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
647 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
649 return (fp
->rx_comp_cons
!= rx_cons_sb
);
653 * disables tx from stack point of view
657 static inline void bnx2x_tx_disable(struct bnx2x
*bp
)
659 netif_tx_disable(bp
->dev
);
660 netif_carrier_off(bp
->dev
);
663 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
664 struct bnx2x_fastpath
*fp
, u16 index
)
666 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
667 struct page
*page
= sw_buf
->page
;
668 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
670 /* Skip "next page" elements */
674 dma_unmap_page(&bp
->pdev
->dev
, dma_unmap_addr(sw_buf
, mapping
),
675 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
676 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
683 static inline void bnx2x_add_all_napi(struct bnx2x
*bp
)
687 /* Add NAPI objects */
688 for_each_queue(bp
, i
)
689 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
690 bnx2x_poll
, BNX2X_NAPI_WEIGHT
);
693 static inline void bnx2x_del_all_napi(struct bnx2x
*bp
)
697 for_each_queue(bp
, i
)
698 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
701 static inline void bnx2x_disable_msi(struct bnx2x
*bp
)
703 if (bp
->flags
& USING_MSIX_FLAG
) {
704 pci_disable_msix(bp
->pdev
);
705 bp
->flags
&= ~USING_MSIX_FLAG
;
706 } else if (bp
->flags
& USING_MSI_FLAG
) {
707 pci_disable_msi(bp
->pdev
);
708 bp
->flags
&= ~USING_MSI_FLAG
;
712 static inline int bnx2x_calc_num_queues(struct bnx2x
*bp
)
715 min_t(int, num_queues
, BNX2X_MAX_QUEUES(bp
)) :
716 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp
));
719 static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
723 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
724 int idx
= RX_SGE_CNT
* i
- 1;
726 for (j
= 0; j
< 2; j
++) {
727 SGE_MASK_CLEAR_BIT(fp
, idx
);
733 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
735 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
736 memset(fp
->sge_mask
, 0xff,
737 (NUM_RX_SGE
>> RX_SGE_MASK_ELEM_SHIFT
)*sizeof(u64
));
739 /* Clear the two last indices in the page to 1:
740 these are the indices that correspond to the "next" element,
741 hence will never be indicated and should be removed from
743 bnx2x_clear_sge_mask_next_elems(fp
);
746 static inline int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
747 struct bnx2x_fastpath
*fp
, u16 index
)
749 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
750 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
751 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
754 if (unlikely(page
== NULL
))
757 mapping
= dma_map_page(&bp
->pdev
->dev
, page
, 0,
758 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
759 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
760 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
765 dma_unmap_addr_set(sw_buf
, mapping
, mapping
);
767 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
768 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
773 static inline int bnx2x_alloc_rx_skb(struct bnx2x
*bp
,
774 struct bnx2x_fastpath
*fp
, u16 index
)
777 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
778 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
781 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
782 if (unlikely(skb
== NULL
))
785 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
, bp
->rx_buf_size
,
787 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
793 dma_unmap_addr_set(rx_buf
, mapping
, mapping
);
795 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
796 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
801 /* note that we are not allocating a new skb,
802 * we are just moving one from cons to prod
803 * we are not creating a new mapping,
804 * so there is no need to check for dma_mapping_error().
806 static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath
*fp
,
809 struct bnx2x
*bp
= fp
->bp
;
810 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
811 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
812 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
813 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
815 dma_sync_single_for_device(&bp
->pdev
->dev
,
816 dma_unmap_addr(cons_rx_buf
, mapping
),
817 RX_COPY_THRESH
, DMA_FROM_DEVICE
);
819 prod_rx_buf
->skb
= cons_rx_buf
->skb
;
820 dma_unmap_addr_set(prod_rx_buf
, mapping
,
821 dma_unmap_addr(cons_rx_buf
, mapping
));
825 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
826 struct bnx2x_fastpath
*fp
, int last
)
830 for (i
= 0; i
< last
; i
++)
831 bnx2x_free_rx_sge(bp
, fp
, i
);
834 static inline void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
835 struct bnx2x_fastpath
*fp
, int last
)
839 for (i
= 0; i
< last
; i
++) {
840 struct sw_rx_bd
*rx_buf
= &(fp
->tpa_pool
[i
]);
841 struct sk_buff
*skb
= rx_buf
->skb
;
844 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
848 if (fp
->tpa_state
[i
] == BNX2X_TPA_START
)
849 dma_unmap_single(&bp
->pdev
->dev
,
850 dma_unmap_addr(rx_buf
, mapping
),
851 bp
->rx_buf_size
, DMA_FROM_DEVICE
);
859 static inline void bnx2x_init_tx_rings(struct bnx2x
*bp
)
863 for_each_queue(bp
, j
) {
864 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
866 for (i
= 1; i
<= NUM_TX_RINGS
; i
++) {
867 struct eth_tx_next_bd
*tx_next_bd
=
868 &fp
->tx_desc_ring
[TX_DESC_CNT
* i
- 1].next_bd
;
870 tx_next_bd
->addr_hi
=
871 cpu_to_le32(U64_HI(fp
->tx_desc_mapping
+
872 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
873 tx_next_bd
->addr_lo
=
874 cpu_to_le32(U64_LO(fp
->tx_desc_mapping
+
875 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
878 SET_FLAG(fp
->tx_db
.data
.header
.header
, DOORBELL_HDR_DB_TYPE
, 1);
879 fp
->tx_db
.data
.zero_fill1
= 0;
880 fp
->tx_db
.data
.prod
= 0;
890 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath
*fp
)
894 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
895 struct eth_rx_bd
*rx_bd
;
897 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
899 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
900 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
902 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
903 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
907 static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath
*fp
)
911 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
912 struct eth_rx_sge
*sge
;
914 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
916 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
917 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
920 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
921 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
925 static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath
*fp
)
928 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
929 struct eth_rx_cqe_next_page
*nextpg
;
931 nextpg
= (struct eth_rx_cqe_next_page
*)
932 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
934 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
935 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
937 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
938 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
944 static inline void __storm_memset_struct(struct bnx2x
*bp
,
945 u32 addr
, size_t size
, u32
*data
)
948 for (i
= 0; i
< size
/4; i
++)
949 REG_WR(bp
, addr
+ (i
* 4), data
[i
]);
952 static inline void storm_memset_mac_filters(struct bnx2x
*bp
,
953 struct tstorm_eth_mac_filter_config
*mac_filters
,
956 size_t size
= sizeof(struct tstorm_eth_mac_filter_config
);
958 u32 addr
= BAR_TSTRORM_INTMEM
+
959 TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid
);
961 __storm_memset_struct(bp
, addr
, size
, (u32
*)mac_filters
);
964 static inline void storm_memset_cmng(struct bnx2x
*bp
,
965 struct cmng_struct_per_port
*cmng
,
968 size_t size
= sizeof(struct cmng_struct_per_port
);
970 u32 addr
= BAR_XSTRORM_INTMEM
+
971 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
);
973 __storm_memset_struct(bp
, addr
, size
, (u32
*)cmng
);
976 /* HW Lock for shared dual port PHYs */
977 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
);
978 void bnx2x_release_phy_lock(struct bnx2x
*bp
);
980 #endif /* BNX2X_CMN_H */