1 /* bnx2x_cmn.h: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
20 #include <linux/types.h>
21 #include <linux/netdevice.h>
27 /*********************** Interfaces ****************************
28 * Functions that need to be implemented by each driver version
32 * Initialize link parameters structure variables.
39 u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
);
42 * Configure hw according to link parameters structure.
46 void bnx2x_link_set(struct bnx2x
*bp
);
54 * @return 0 - link is UP
56 u8
bnx2x_link_test(struct bnx2x
*bp
, u8 is_serdes
);
59 * Handles link status change
63 void bnx2x__link_status_update(struct bnx2x
*bp
);
66 * MSI-X slowpath interrupt handler
73 irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
);
76 * non MSI-X interrupt handler
83 irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
);
87 * Send command to cnic driver
92 int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
);
95 * Provides cnic information for proper interrupt handling
99 void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
);
103 * Enable HW interrupts.
107 void bnx2x_int_enable(struct bnx2x
*bp
);
110 * Disable HW interrupts.
114 void bnx2x_int_disable(struct bnx2x
*bp
);
117 * Disable interrupts. This function ensures that there are no
118 * ISRs or SP DPCs (sp_task) are running after it returns.
121 * @param disable_hw if true, disable HW interrupts.
123 void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
);
126 * Loads device firmware
132 int bnx2x_init_firmware(struct bnx2x
*bp
);
135 * Init HW blocks according to current initialization stage:
136 * COMMON, PORT or FUNCTION.
139 * @param load_code: COMMON, PORT or FUNCTION
143 int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
);
146 * Init driver internals:
152 * @param load_code COMMON, PORT or FUNCTION
154 void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
);
157 * Allocate driver's memory.
163 int bnx2x_alloc_mem(struct bnx2x
*bp
);
166 * Release driver's memory.
170 void bnx2x_free_mem(struct bnx2x
*bp
);
181 int bnx2x_setup_client(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
185 * Bring down an eth client.
192 int bnx2x_stop_fw_client(struct bnx2x
*bp
,
193 struct bnx2x_client_ramrod_params
*p
);
196 * Set number of quueus according to mode
201 void bnx2x_set_num_queues_msix(struct bnx2x
*bp
);
204 * Cleanup chip internals:
205 * - Cleanup MAC configuration.
212 void bnx2x_chip_cleanup(struct bnx2x
*bp
, int unload_mode
);
218 * @param resource Resource bit which was locked
222 int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
);
227 * @param bp driver handle
228 * @param resource Resource bit which was locked
232 int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
);
235 * Configure eth MAC address in the HW according to the value in
236 * netdev->dev_addr for 57711
238 * @param bp driver handle
241 void bnx2x_set_eth_mac(struct bnx2x
*bp
, int set
);
245 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
246 * MAC(s). The function will wait until the ramrod completion
249 * @param bp driver handle
250 * @param set set or clear the CAM entry
252 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
254 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x
*bp
, int set
);
258 * Initialize status block in FW and HW
260 * @param bp driver handle
261 * @param dma_addr_t mapping
265 * @param int fw_sb_id
266 * @param int igu_sb_id
268 void bnx2x_init_sb(struct bnx2x
*bp
, dma_addr_t mapping
, int vfid
,
269 u8 vf_valid
, int fw_sb_id
, int igu_sb_id
);
272 * Reconfigure FW/HW according to dev->flags rx mode
274 * @param dev net_device
277 void bnx2x_set_rx_mode(struct net_device
*dev
);
280 * Configure MAC filtering rules in a FW.
282 * @param bp driver handle
284 void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
);
286 /* Parity errors related */
287 void bnx2x_inc_load_cnt(struct bnx2x
*bp
);
288 u32
bnx2x_dec_load_cnt(struct bnx2x
*bp
);
289 bool bnx2x_chk_parity_attn(struct bnx2x
*bp
);
290 bool bnx2x_reset_is_done(struct bnx2x
*bp
);
291 void bnx2x_disable_close_the_gate(struct bnx2x
*bp
);
294 * Perform statistics handling according to event
296 * @param bp driver handle
297 * @param even tbnx2x_stats_event
299 void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
);
304 * @param fp fastpath handle for the event
305 * @param rr_cqe eth_rx_cqe
307 void bnx2x_sp_event(struct bnx2x_fastpath
*fp
, union eth_rx_cqe
*rr_cqe
);
310 * Init/halt function before/after sending
311 * CLIENT_SETUP/CFC_DEL for the first/last client.
317 int bnx2x_func_start(struct bnx2x
*bp
);
318 int bnx2x_func_stop(struct bnx2x
*bp
);
321 * Prepare ILT configurations according to current driver
326 void bnx2x_ilt_set_info(struct bnx2x
*bp
);
328 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
330 barrier(); /* status block is written to by the chip */
331 fp
->fp_hc_idx
= fp
->sb_running_index
[SM_RX_ID
];
334 static inline void bnx2x_update_rx_prod(struct bnx2x
*bp
,
335 struct bnx2x_fastpath
*fp
,
336 u16 bd_prod
, u16 rx_comp_prod
,
339 struct ustorm_eth_rx_producers rx_prods
= {0};
342 /* Update producers */
343 rx_prods
.bd_prod
= bd_prod
;
344 rx_prods
.cqe_prod
= rx_comp_prod
;
345 rx_prods
.sge_prod
= rx_sge_prod
;
348 * Make sure that the BD and SGE data is updated before updating the
349 * producers since FW might read the BD/SGE right after the producer
351 * This is only applicable for weak-ordered memory model archs such
352 * as IA-64. The following barrier is also mandatory since FW will
353 * assumes BDs must have buffers.
357 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
)/4; i
++)
359 BAR_USTRORM_INTMEM
+ fp
->ustorm_rx_prods_offset
+ i
*4,
360 ((u32
*)&rx_prods
)[i
]);
362 mmiowb(); /* keep prod updates ordered */
364 DP(NETIF_MSG_RX_STATUS
,
365 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
366 fp
->index
, bd_prod
, rx_comp_prod
, rx_sge_prod
);
369 static inline void bnx2x_igu_ack_sb_gen(struct bnx2x
*bp
, u8 igu_sb_id
,
370 u8 segment
, u16 index
, u8 op
,
371 u8 update
, u32 igu_addr
)
373 struct igu_regular cmd_data
= {0};
375 cmd_data
.sb_id_and_flags
=
376 ((index
<< IGU_REGULAR_SB_INDEX_SHIFT
) |
377 (segment
<< IGU_REGULAR_SEGMENT_ACCESS_SHIFT
) |
378 (update
<< IGU_REGULAR_BUPDATE_SHIFT
) |
379 (op
<< IGU_REGULAR_ENABLE_INT_SHIFT
));
381 DP(NETIF_MSG_HW
, "write 0x%08x to IGU addr 0x%x\n",
382 cmd_data
.sb_id_and_flags
, igu_addr
);
383 REG_WR(bp
, igu_addr
, cmd_data
.sb_id_and_flags
);
385 /* Make sure that ACK is written */
390 static inline void bnx2x_igu_clear_sb_gen(struct bnx2x
*bp
,
391 u8 idu_sb_id
, bool is_Pf
)
393 u32 data
, ctl
, cnt
= 100;
394 u32 igu_addr_data
= IGU_REG_COMMAND_REG_32LSB_DATA
;
395 u32 igu_addr_ctl
= IGU_REG_COMMAND_REG_CTRL
;
396 u32 igu_addr_ack
= IGU_REG_CSTORM_TYPE_0_SB_CLEANUP
+ (idu_sb_id
/32)*4;
397 u32 sb_bit
= 1 << (idu_sb_id
%32);
398 u32 func_encode
= BP_FUNC(bp
) |
399 ((is_Pf
== true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT
);
400 u32 addr_encode
= IGU_CMD_E2_PROD_UPD_BASE
+ idu_sb_id
;
402 /* Not supported in BC mode */
403 if (CHIP_INT_MODE_IS_BC(bp
))
406 data
= (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
407 << IGU_REGULAR_CLEANUP_TYPE_SHIFT
) |
408 IGU_REGULAR_CLEANUP_SET
|
409 IGU_REGULAR_BCLEANUP
;
411 ctl
= addr_encode
<< IGU_CTRL_REG_ADDRESS_SHIFT
|
412 func_encode
<< IGU_CTRL_REG_FID_SHIFT
|
413 IGU_CTRL_CMD_TYPE_WR
<< IGU_CTRL_REG_TYPE_SHIFT
;
415 DP(NETIF_MSG_HW
, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
416 data
, igu_addr_data
);
417 REG_WR(bp
, igu_addr_data
, data
);
420 DP(NETIF_MSG_HW
, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
422 REG_WR(bp
, igu_addr_ctl
, ctl
);
426 /* wait for clean up to finish */
427 while (!(REG_RD(bp
, igu_addr_ack
) & sb_bit
) && --cnt
)
431 if (!(REG_RD(bp
, igu_addr_ack
) & sb_bit
)) {
432 DP(NETIF_MSG_HW
, "Unable to finish IGU cleanup: "
433 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
434 idu_sb_id
, idu_sb_id
/32, idu_sb_id
%32, cnt
);
438 static inline void bnx2x_hc_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
439 u8 storm
, u16 index
, u8 op
, u8 update
)
441 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
442 COMMAND_REG_INT_ACK
);
443 struct igu_ack_register igu_ack
;
445 igu_ack
.status_block_index
= index
;
446 igu_ack
.sb_id_and_flags
=
447 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
448 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
449 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
450 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
452 DP(BNX2X_MSG_OFF
, "write 0x%08x to HC addr 0x%x\n",
453 (*(u32
*)&igu_ack
), hc_addr
);
454 REG_WR(bp
, hc_addr
, (*(u32
*)&igu_ack
));
456 /* Make sure that ACK is written */
461 static inline void bnx2x_igu_ack_sb(struct bnx2x
*bp
, u8 igu_sb_id
, u8 segment
,
462 u16 index
, u8 op
, u8 update
)
464 u32 igu_addr
= BAR_IGU_INTMEM
+ (IGU_CMD_INT_ACK_BASE
+ igu_sb_id
)*8;
466 bnx2x_igu_ack_sb_gen(bp
, igu_sb_id
, segment
, index
, op
, update
,
470 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 igu_sb_id
, u8 storm
,
471 u16 index
, u8 op
, u8 update
)
473 if (bp
->common
.int_block
== INT_BLOCK_HC
)
474 bnx2x_hc_ack_sb(bp
, igu_sb_id
, storm
, index
, op
, update
);
478 if (CHIP_INT_MODE_IS_BC(bp
))
480 else if (igu_sb_id
!= bp
->igu_dsb_id
)
481 segment
= IGU_SEG_ACCESS_DEF
;
482 else if (storm
== ATTENTION_ID
)
483 segment
= IGU_SEG_ACCESS_ATTN
;
485 segment
= IGU_SEG_ACCESS_DEF
;
486 bnx2x_igu_ack_sb(bp
, igu_sb_id
, segment
, index
, op
, update
);
490 static inline u16
bnx2x_hc_ack_int(struct bnx2x
*bp
)
492 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
493 COMMAND_REG_SIMD_MASK
);
494 u32 result
= REG_RD(bp
, hc_addr
);
496 DP(BNX2X_MSG_OFF
, "read 0x%08x from HC addr 0x%x\n",
503 static inline u16
bnx2x_igu_ack_int(struct bnx2x
*bp
)
505 u32 igu_addr
= (BAR_IGU_INTMEM
+ IGU_REG_SISR_MDPC_WMASK_LSB_UPPER
*8);
506 u32 result
= REG_RD(bp
, igu_addr
);
508 DP(NETIF_MSG_HW
, "read 0x%08x from IGU addr 0x%x\n",
515 static inline u16
bnx2x_ack_int(struct bnx2x
*bp
)
518 if (bp
->common
.int_block
== INT_BLOCK_HC
)
519 return bnx2x_hc_ack_int(bp
);
521 return bnx2x_igu_ack_int(bp
);
525 * fast path service functions
527 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath
*fp
)
529 /* Tell compiler that consumer and producer can change */
531 return fp
->tx_pkt_prod
!= fp
->tx_pkt_cons
;
534 static inline u16
bnx2x_tx_avail(struct bnx2x_fastpath
*fp
)
540 prod
= fp
->tx_bd_prod
;
541 cons
= fp
->tx_bd_cons
;
543 /* NUM_TX_RINGS = number of "next-page" entries
544 It will be used as a threshold */
545 used
= SUB_S16(prod
, cons
) + (s16
)NUM_TX_RINGS
;
547 #ifdef BNX2X_STOP_ON_ERROR
549 WARN_ON(used
> fp
->bp
->tx_ring_size
);
550 WARN_ON((fp
->bp
->tx_ring_size
- used
) > MAX_TX_AVAIL
);
553 return (s16
)(fp
->bp
->tx_ring_size
) - used
;
556 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath
*fp
)
560 /* Tell compiler that status block fields can change */
562 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
563 return hw_cons
!= fp
->tx_pkt_cons
;
566 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath
*fp
)
570 /* Tell compiler that status block fields can change */
572 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
573 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
575 return (fp
->rx_comp_cons
!= rx_cons_sb
);
578 * disables tx from stack point of view
582 static inline void bnx2x_tx_disable(struct bnx2x
*bp
)
584 netif_tx_disable(bp
->dev
);
585 netif_carrier_off(bp
->dev
);
588 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
589 struct bnx2x_fastpath
*fp
, u16 index
)
591 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
592 struct page
*page
= sw_buf
->page
;
593 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
595 /* Skip "next page" elements */
599 dma_unmap_page(&bp
->pdev
->dev
, dma_unmap_addr(sw_buf
, mapping
),
600 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
601 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
612 static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
616 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
617 int idx
= RX_SGE_CNT
* i
- 1;
619 for (j
= 0; j
< 2; j
++) {
620 SGE_MASK_CLEAR_BIT(fp
, idx
);
626 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
628 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
629 memset(fp
->sge_mask
, 0xff,
630 (NUM_RX_SGE
>> RX_SGE_MASK_ELEM_SHIFT
)*sizeof(u64
));
632 /* Clear the two last indices in the page to 1:
633 these are the indices that correspond to the "next" element,
634 hence will never be indicated and should be removed from
636 bnx2x_clear_sge_mask_next_elems(fp
);
639 static inline int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
640 struct bnx2x_fastpath
*fp
, u16 index
)
642 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
643 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
644 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
647 if (unlikely(page
== NULL
))
650 mapping
= dma_map_page(&bp
->pdev
->dev
, page
, 0,
651 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
652 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
653 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
658 dma_unmap_addr_set(sw_buf
, mapping
, mapping
);
660 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
661 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
665 static inline int bnx2x_alloc_rx_skb(struct bnx2x
*bp
,
666 struct bnx2x_fastpath
*fp
, u16 index
)
669 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
670 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
673 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
674 if (unlikely(skb
== NULL
))
677 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
, bp
->rx_buf_size
,
679 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
685 dma_unmap_addr_set(rx_buf
, mapping
, mapping
);
687 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
688 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
693 /* note that we are not allocating a new skb,
694 * we are just moving one from cons to prod
695 * we are not creating a new mapping,
696 * so there is no need to check for dma_mapping_error().
698 static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath
*fp
,
701 struct bnx2x
*bp
= fp
->bp
;
702 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
703 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
704 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
705 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
707 dma_sync_single_for_device(&bp
->pdev
->dev
,
708 dma_unmap_addr(cons_rx_buf
, mapping
),
709 RX_COPY_THRESH
, DMA_FROM_DEVICE
);
711 prod_rx_buf
->skb
= cons_rx_buf
->skb
;
712 dma_unmap_addr_set(prod_rx_buf
, mapping
,
713 dma_unmap_addr(cons_rx_buf
, mapping
));
716 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
717 struct bnx2x_fastpath
*fp
, int last
)
721 for (i
= 0; i
< last
; i
++)
722 bnx2x_free_rx_sge(bp
, fp
, i
);
725 static inline void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
726 struct bnx2x_fastpath
*fp
, int last
)
730 for (i
= 0; i
< last
; i
++) {
731 struct sw_rx_bd
*rx_buf
= &(fp
->tpa_pool
[i
]);
732 struct sk_buff
*skb
= rx_buf
->skb
;
735 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
739 if (fp
->tpa_state
[i
] == BNX2X_TPA_START
)
740 dma_unmap_single(&bp
->pdev
->dev
,
741 dma_unmap_addr(rx_buf
, mapping
),
742 bp
->rx_buf_size
, DMA_FROM_DEVICE
);
750 static inline void bnx2x_init_tx_rings(struct bnx2x
*bp
)
754 for_each_queue(bp
, j
) {
755 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
757 for (i
= 1; i
<= NUM_TX_RINGS
; i
++) {
758 struct eth_tx_next_bd
*tx_next_bd
=
759 &fp
->tx_desc_ring
[TX_DESC_CNT
* i
- 1].next_bd
;
761 tx_next_bd
->addr_hi
=
762 cpu_to_le32(U64_HI(fp
->tx_desc_mapping
+
763 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
764 tx_next_bd
->addr_lo
=
765 cpu_to_le32(U64_LO(fp
->tx_desc_mapping
+
766 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
769 SET_FLAG(fp
->tx_db
.data
.header
.header
, DOORBELL_HDR_DB_TYPE
, 1);
770 fp
->tx_db
.data
.zero_fill1
= 0;
771 fp
->tx_db
.data
.prod
= 0;
780 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath
*fp
)
784 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
785 struct eth_rx_bd
*rx_bd
;
787 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
789 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
790 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
792 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
793 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
797 static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath
*fp
)
801 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
802 struct eth_rx_sge
*sge
;
804 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
806 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
807 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
810 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
811 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
815 static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath
*fp
)
818 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
819 struct eth_rx_cqe_next_page
*nextpg
;
821 nextpg
= (struct eth_rx_cqe_next_page
*)
822 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
824 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
825 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
827 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
828 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
834 static inline void __storm_memset_struct(struct bnx2x
*bp
,
835 u32 addr
, size_t size
, u32
*data
)
838 for (i
= 0; i
< size
/4; i
++)
839 REG_WR(bp
, addr
+ (i
* 4), data
[i
]);
842 static inline void storm_memset_mac_filters(struct bnx2x
*bp
,
843 struct tstorm_eth_mac_filter_config
*mac_filters
,
846 size_t size
= sizeof(struct tstorm_eth_mac_filter_config
);
848 u32 addr
= BAR_TSTRORM_INTMEM
+
849 TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid
);
851 __storm_memset_struct(bp
, addr
, size
, (u32
*)mac_filters
);
854 static inline void storm_memset_cmng(struct bnx2x
*bp
,
855 struct cmng_struct_per_port
*cmng
,
858 size_t size
= sizeof(struct cmng_struct_per_port
);
860 u32 addr
= BAR_XSTRORM_INTMEM
+
861 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
);
863 __storm_memset_struct(bp
, addr
, size
, (u32
*)cmng
);
865 /* HW Lock for shared dual port PHYs */
866 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
);
867 void bnx2x_release_phy_lock(struct bnx2x
*bp
);
869 void bnx2x_link_report(struct bnx2x
*bp
);
870 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
);
871 int bnx2x_tx_int(struct bnx2x_fastpath
*fp
);
872 void bnx2x_init_rx_rings(struct bnx2x
*bp
);
873 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
875 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
);
876 void bnx2x_tx_timeout(struct net_device
*dev
);
877 void bnx2x_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*vlgrp
);
878 void bnx2x_netif_start(struct bnx2x
*bp
);
879 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
);
880 void bnx2x_free_irq(struct bnx2x
*bp
, bool disable_only
);
881 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
);
882 int bnx2x_resume(struct pci_dev
*pdev
);
883 void bnx2x_free_skbs(struct bnx2x
*bp
);
884 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
);
885 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
);
886 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
);
887 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
);
890 * Allocate/release memories outsize main driver structure
896 int __devinit
bnx2x_alloc_mem_bp(struct bnx2x
*bp
);
897 void bnx2x_free_mem_bp(struct bnx2x
*bp
);
899 #define BNX2X_FW_IP_HDR_ALIGN_PAD 2 /* FW places hdr with this padding */
901 #endif /* BNX2X_CMN_H */