1 /* bnx2x_sp.c: Broadcom Everest network driver.
3 * Copyright 2011 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
19 #include <linux/module.h>
20 #include <linux/crc32.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/crc32c.h>
25 #include "bnx2x_cmn.h"
28 #define BNX2X_MAX_EMUL_MULTI 16
30 /**** Exe Queue interfaces ****/
33 * bnx2x_exe_queue_init - init the Exe Queue object
35 * @o: poiter to the object
37 * @owner: poiter to the owner
38 * @validate: validate function pointer
39 * @optimize: optimize function pointer
40 * @exec: execute function pointer
41 * @get: get function pointer
43 static inline void bnx2x_exe_queue_init(struct bnx2x
*bp
,
44 struct bnx2x_exe_queue_obj
*o
,
46 union bnx2x_qable_obj
*owner
,
47 exe_q_validate validate
,
48 exe_q_optimize optimize
,
52 memset(o
, 0, sizeof(*o
));
54 INIT_LIST_HEAD(&o
->exe_queue
);
55 INIT_LIST_HEAD(&o
->pending_comp
);
57 spin_lock_init(&o
->lock
);
59 o
->exe_chunk_len
= exe_len
;
62 /* Owner specific callbacks */
63 o
->validate
= validate
;
64 o
->optimize
= optimize
;
68 DP(BNX2X_MSG_SP
, "Setup the execution queue with the chunk "
69 "length of %d\n", exe_len
);
72 static inline void bnx2x_exe_queue_free_elem(struct bnx2x
*bp
,
73 struct bnx2x_exeq_elem
*elem
)
75 DP(BNX2X_MSG_SP
, "Deleting an exe_queue element\n");
79 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj
*o
)
81 struct bnx2x_exeq_elem
*elem
;
84 spin_lock_bh(&o
->lock
);
86 list_for_each_entry(elem
, &o
->exe_queue
, link
)
89 spin_unlock_bh(&o
->lock
);
95 * bnx2x_exe_queue_add - add a new element to the execution queue
99 * @cmd: new command to add
100 * @restore: true - do not optimize the command
102 * If the element is optimized or is illegal, frees it.
104 static inline int bnx2x_exe_queue_add(struct bnx2x
*bp
,
105 struct bnx2x_exe_queue_obj
*o
,
106 struct bnx2x_exeq_elem
*elem
,
111 spin_lock_bh(&o
->lock
);
114 /* Try to cancel this element queue */
115 rc
= o
->optimize(bp
, o
->owner
, elem
);
119 /* Check if this request is ok */
120 rc
= o
->validate(bp
, o
->owner
, elem
);
122 BNX2X_ERR("Preamble failed: %d\n", rc
);
127 /* If so, add it to the execution queue */
128 list_add_tail(&elem
->link
, &o
->exe_queue
);
130 spin_unlock_bh(&o
->lock
);
135 bnx2x_exe_queue_free_elem(bp
, elem
);
137 spin_unlock_bh(&o
->lock
);
143 static inline void __bnx2x_exe_queue_reset_pending(
145 struct bnx2x_exe_queue_obj
*o
)
147 struct bnx2x_exeq_elem
*elem
;
149 while (!list_empty(&o
->pending_comp
)) {
150 elem
= list_first_entry(&o
->pending_comp
,
151 struct bnx2x_exeq_elem
, link
);
153 list_del(&elem
->link
);
154 bnx2x_exe_queue_free_elem(bp
, elem
);
158 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x
*bp
,
159 struct bnx2x_exe_queue_obj
*o
)
162 spin_lock_bh(&o
->lock
);
164 __bnx2x_exe_queue_reset_pending(bp
, o
);
166 spin_unlock_bh(&o
->lock
);
171 * bnx2x_exe_queue_step - execute one execution chunk atomically
175 * @ramrod_flags: flags
177 * (Atomicy is ensured using the exe_queue->lock).
179 static inline int bnx2x_exe_queue_step(struct bnx2x
*bp
,
180 struct bnx2x_exe_queue_obj
*o
,
181 unsigned long *ramrod_flags
)
183 struct bnx2x_exeq_elem
*elem
, spacer
;
186 memset(&spacer
, 0, sizeof(spacer
));
188 spin_lock_bh(&o
->lock
);
191 * Next step should not be performed until the current is finished,
192 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
193 * properly clear object internals without sending any command to the FW
194 * which also implies there won't be any completion to clear the
197 if (!list_empty(&o
->pending_comp
)) {
198 if (test_bit(RAMROD_DRV_CLR_ONLY
, ramrod_flags
)) {
199 DP(BNX2X_MSG_SP
, "RAMROD_DRV_CLR_ONLY requested: "
200 "resetting pending_comp\n");
201 __bnx2x_exe_queue_reset_pending(bp
, o
);
203 spin_unlock_bh(&o
->lock
);
209 * Run through the pending commands list and create a next
212 while (!list_empty(&o
->exe_queue
)) {
213 elem
= list_first_entry(&o
->exe_queue
, struct bnx2x_exeq_elem
,
215 WARN_ON(!elem
->cmd_len
);
217 if (cur_len
+ elem
->cmd_len
<= o
->exe_chunk_len
) {
218 cur_len
+= elem
->cmd_len
;
220 * Prevent from both lists being empty when moving an
221 * element. This will allow the call of
222 * bnx2x_exe_queue_empty() without locking.
224 list_add_tail(&spacer
.link
, &o
->pending_comp
);
226 list_del(&elem
->link
);
227 list_add_tail(&elem
->link
, &o
->pending_comp
);
228 list_del(&spacer
.link
);
235 spin_unlock_bh(&o
->lock
);
239 rc
= o
->execute(bp
, o
->owner
, &o
->pending_comp
, ramrod_flags
);
242 * In case of an error return the commands back to the queue
243 * and reset the pending_comp.
245 list_splice_init(&o
->pending_comp
, &o
->exe_queue
);
248 * If zero is returned, means there are no outstanding pending
249 * completions and we may dismiss the pending list.
251 __bnx2x_exe_queue_reset_pending(bp
, o
);
253 spin_unlock_bh(&o
->lock
);
257 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj
*o
)
259 bool empty
= list_empty(&o
->exe_queue
);
261 /* Don't reorder!!! */
264 return empty
&& list_empty(&o
->pending_comp
);
267 static inline struct bnx2x_exeq_elem
*bnx2x_exe_queue_alloc_elem(
270 DP(BNX2X_MSG_SP
, "Allocating a new exe_queue element\n");
271 return kzalloc(sizeof(struct bnx2x_exeq_elem
), GFP_ATOMIC
);
274 /************************ raw_obj functions ***********************************/
275 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj
*o
)
277 return !!test_bit(o
->state
, o
->pstate
);
280 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj
*o
)
282 smp_mb__before_clear_bit();
283 clear_bit(o
->state
, o
->pstate
);
284 smp_mb__after_clear_bit();
287 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj
*o
)
289 smp_mb__before_clear_bit();
290 set_bit(o
->state
, o
->pstate
);
291 smp_mb__after_clear_bit();
295 * bnx2x_state_wait - wait until the given bit(state) is cleared
298 * @state: state which is to be cleared
299 * @state_p: state buffer
302 static inline int bnx2x_state_wait(struct bnx2x
*bp
, int state
,
303 unsigned long *pstate
)
305 /* can take a while if any port is running */
309 if (CHIP_REV_IS_EMUL(bp
))
312 DP(BNX2X_MSG_SP
, "waiting for state to become %d\n", state
);
316 if (!test_bit(state
, pstate
)) {
317 #ifdef BNX2X_STOP_ON_ERROR
318 DP(BNX2X_MSG_SP
, "exit (cnt %d)\n", 5000 - cnt
);
323 usleep_range(1000, 1000);
330 BNX2X_ERR("timeout waiting for state %d\n", state
);
331 #ifdef BNX2X_STOP_ON_ERROR
338 static int bnx2x_raw_wait(struct bnx2x
*bp
, struct bnx2x_raw_obj
*raw
)
340 return bnx2x_state_wait(bp
, raw
->state
, raw
->pstate
);
343 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
344 /* credit handling callbacks */
345 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj
*o
, int *offset
)
347 struct bnx2x_credit_pool_obj
*mp
= o
->macs_pool
;
351 return mp
->get_entry(mp
, offset
);
354 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj
*o
)
356 struct bnx2x_credit_pool_obj
*mp
= o
->macs_pool
;
360 return mp
->get(mp
, 1);
363 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj
*o
, int *offset
)
365 struct bnx2x_credit_pool_obj
*vp
= o
->vlans_pool
;
369 return vp
->get_entry(vp
, offset
);
372 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj
*o
)
374 struct bnx2x_credit_pool_obj
*vp
= o
->vlans_pool
;
378 return vp
->get(vp
, 1);
381 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj
*o
)
383 struct bnx2x_credit_pool_obj
*mp
= o
->macs_pool
;
384 struct bnx2x_credit_pool_obj
*vp
= o
->vlans_pool
;
389 if (!vp
->get(vp
, 1)) {
397 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj
*o
, int offset
)
399 struct bnx2x_credit_pool_obj
*mp
= o
->macs_pool
;
401 return mp
->put_entry(mp
, offset
);
404 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj
*o
)
406 struct bnx2x_credit_pool_obj
*mp
= o
->macs_pool
;
408 return mp
->put(mp
, 1);
411 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj
*o
, int offset
)
413 struct bnx2x_credit_pool_obj
*vp
= o
->vlans_pool
;
415 return vp
->put_entry(vp
, offset
);
418 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj
*o
)
420 struct bnx2x_credit_pool_obj
*vp
= o
->vlans_pool
;
422 return vp
->put(vp
, 1);
425 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj
*o
)
427 struct bnx2x_credit_pool_obj
*mp
= o
->macs_pool
;
428 struct bnx2x_credit_pool_obj
*vp
= o
->vlans_pool
;
433 if (!vp
->put(vp
, 1)) {
441 /* check_add() callbacks */
442 static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj
*o
,
443 union bnx2x_classification_ramrod_data
*data
)
445 struct bnx2x_vlan_mac_registry_elem
*pos
;
447 if (!is_valid_ether_addr(data
->mac
.mac
))
450 /* Check if a requested MAC already exists */
451 list_for_each_entry(pos
, &o
->head
, link
)
452 if (!memcmp(data
->mac
.mac
, pos
->u
.mac
.mac
, ETH_ALEN
))
458 static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj
*o
,
459 union bnx2x_classification_ramrod_data
*data
)
461 struct bnx2x_vlan_mac_registry_elem
*pos
;
463 list_for_each_entry(pos
, &o
->head
, link
)
464 if (data
->vlan
.vlan
== pos
->u
.vlan
.vlan
)
470 static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj
*o
,
471 union bnx2x_classification_ramrod_data
*data
)
473 struct bnx2x_vlan_mac_registry_elem
*pos
;
475 list_for_each_entry(pos
, &o
->head
, link
)
476 if ((data
->vlan_mac
.vlan
== pos
->u
.vlan_mac
.vlan
) &&
477 (!memcmp(data
->vlan_mac
.mac
, pos
->u
.vlan_mac
.mac
,
485 /* check_del() callbacks */
486 static struct bnx2x_vlan_mac_registry_elem
*
487 bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj
*o
,
488 union bnx2x_classification_ramrod_data
*data
)
490 struct bnx2x_vlan_mac_registry_elem
*pos
;
492 list_for_each_entry(pos
, &o
->head
, link
)
493 if (!memcmp(data
->mac
.mac
, pos
->u
.mac
.mac
, ETH_ALEN
))
499 static struct bnx2x_vlan_mac_registry_elem
*
500 bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj
*o
,
501 union bnx2x_classification_ramrod_data
*data
)
503 struct bnx2x_vlan_mac_registry_elem
*pos
;
505 list_for_each_entry(pos
, &o
->head
, link
)
506 if (data
->vlan
.vlan
== pos
->u
.vlan
.vlan
)
512 static struct bnx2x_vlan_mac_registry_elem
*
513 bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj
*o
,
514 union bnx2x_classification_ramrod_data
*data
)
516 struct bnx2x_vlan_mac_registry_elem
*pos
;
518 list_for_each_entry(pos
, &o
->head
, link
)
519 if ((data
->vlan_mac
.vlan
== pos
->u
.vlan_mac
.vlan
) &&
520 (!memcmp(data
->vlan_mac
.mac
, pos
->u
.vlan_mac
.mac
,
527 /* check_move() callback */
528 static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj
*src_o
,
529 struct bnx2x_vlan_mac_obj
*dst_o
,
530 union bnx2x_classification_ramrod_data
*data
)
532 struct bnx2x_vlan_mac_registry_elem
*pos
;
535 /* Check if we can delete the requested configuration from the first
538 pos
= src_o
->check_del(src_o
, data
);
540 /* check if configuration can be added */
541 rc
= dst_o
->check_add(dst_o
, data
);
543 /* If this classification can not be added (is already set)
544 * or can't be deleted - return an error.
552 static bool bnx2x_check_move_always_err(
553 struct bnx2x_vlan_mac_obj
*src_o
,
554 struct bnx2x_vlan_mac_obj
*dst_o
,
555 union bnx2x_classification_ramrod_data
*data
)
561 static inline u8
bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj
*o
)
563 struct bnx2x_raw_obj
*raw
= &o
->raw
;
566 if ((raw
->obj_type
== BNX2X_OBJ_TYPE_TX
) ||
567 (raw
->obj_type
== BNX2X_OBJ_TYPE_RX_TX
))
568 rx_tx_flag
|= ETH_CLASSIFY_CMD_HEADER_TX_CMD
;
570 if ((raw
->obj_type
== BNX2X_OBJ_TYPE_RX
) ||
571 (raw
->obj_type
== BNX2X_OBJ_TYPE_RX_TX
))
572 rx_tx_flag
|= ETH_CLASSIFY_CMD_HEADER_RX_CMD
;
577 /* LLH CAM line allocations */
579 LLH_CAM_ISCSI_ETH_LINE
= 0,
581 LLH_CAM_MAX_PF_LINE
= NIG_REG_LLH1_FUNC_MEM_SIZE
/ 2
584 static inline void bnx2x_set_mac_in_nig(struct bnx2x
*bp
,
585 bool add
, unsigned char *dev_addr
, int index
)
588 u32 reg_offset
= BP_PORT(bp
) ? NIG_REG_LLH1_FUNC_MEM
:
589 NIG_REG_LLH0_FUNC_MEM
;
591 if (!IS_MF_SI(bp
) || index
> LLH_CAM_MAX_PF_LINE
)
594 DP(BNX2X_MSG_SP
, "Going to %s LLH configuration at entry %d\n",
595 (add
? "ADD" : "DELETE"), index
);
598 /* LLH_FUNC_MEM is a u64 WB register */
599 reg_offset
+= 8*index
;
601 wb_data
[0] = ((dev_addr
[2] << 24) | (dev_addr
[3] << 16) |
602 (dev_addr
[4] << 8) | dev_addr
[5]);
603 wb_data
[1] = ((dev_addr
[0] << 8) | dev_addr
[1]);
605 REG_WR_DMAE(bp
, reg_offset
, wb_data
, 2);
608 REG_WR(bp
, (BP_PORT(bp
) ? NIG_REG_LLH1_FUNC_MEM_ENABLE
:
609 NIG_REG_LLH0_FUNC_MEM_ENABLE
) + 4*index
, add
);
613 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
616 * @o: queue for which we want to configure this rule
617 * @add: if true the command is an ADD command, DEL otherwise
618 * @opcode: CLASSIFY_RULE_OPCODE_XXX
619 * @hdr: pointer to a header to setup
622 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x
*bp
,
623 struct bnx2x_vlan_mac_obj
*o
, bool add
, int opcode
,
624 struct eth_classify_cmd_header
*hdr
)
626 struct bnx2x_raw_obj
*raw
= &o
->raw
;
628 hdr
->client_id
= raw
->cl_id
;
629 hdr
->func_id
= raw
->func_id
;
631 /* Rx or/and Tx (internal switching) configuration ? */
632 hdr
->cmd_general_data
|=
633 bnx2x_vlan_mac_get_rx_tx_flag(o
);
636 hdr
->cmd_general_data
|= ETH_CLASSIFY_CMD_HEADER_IS_ADD
;
638 hdr
->cmd_general_data
|=
639 (opcode
<< ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT
);
643 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
645 * @cid: connection id
646 * @type: BNX2X_FILTER_XXX_PENDING
647 * @hdr: poiter to header to setup
650 * currently we always configure one rule and echo field to contain a CID and an
653 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid
, int type
,
654 struct eth_classify_header
*hdr
, int rule_cnt
)
656 hdr
->echo
= (cid
& BNX2X_SWCID_MASK
) | (type
<< BNX2X_SWCID_SHIFT
);
657 hdr
->rule_cnt
= (u8
)rule_cnt
;
661 /* hw_config() callbacks */
662 static void bnx2x_set_one_mac_e2(struct bnx2x
*bp
,
663 struct bnx2x_vlan_mac_obj
*o
,
664 struct bnx2x_exeq_elem
*elem
, int rule_idx
,
667 struct bnx2x_raw_obj
*raw
= &o
->raw
;
668 struct eth_classify_rules_ramrod_data
*data
=
669 (struct eth_classify_rules_ramrod_data
*)(raw
->rdata
);
670 int rule_cnt
= rule_idx
+ 1, cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
671 union eth_classify_rule_cmd
*rule_entry
= &data
->rules
[rule_idx
];
672 bool add
= (cmd
== BNX2X_VLAN_MAC_ADD
) ? true : false;
673 unsigned long *vlan_mac_flags
= &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
;
674 u8
*mac
= elem
->cmd_data
.vlan_mac
.u
.mac
.mac
;
677 * Set LLH CAM entry: currently only iSCSI and ETH macs are
678 * relevant. In addition, current implementation is tuned for a
681 * When multiple unicast ETH MACs PF configuration in switch
682 * independent mode is required (NetQ, multiple netdev MACs,
683 * etc.), consider better utilisation of 8 per function MAC
684 * entries in the LLH register. There is also
685 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
686 * total number of CAM entries to 16.
688 * Currently we won't configure NIG for MACs other than a primary ETH
689 * MAC and iSCSI L2 MAC.
691 * If this MAC is moving from one Queue to another, no need to change
694 if (cmd
!= BNX2X_VLAN_MAC_MOVE
) {
695 if (test_bit(BNX2X_ISCSI_ETH_MAC
, vlan_mac_flags
))
696 bnx2x_set_mac_in_nig(bp
, add
, mac
,
697 LLH_CAM_ISCSI_ETH_LINE
);
698 else if (test_bit(BNX2X_ETH_MAC
, vlan_mac_flags
))
699 bnx2x_set_mac_in_nig(bp
, add
, mac
, LLH_CAM_ETH_LINE
);
702 /* Reset the ramrod data buffer for the first rule */
704 memset(data
, 0, sizeof(*data
));
706 /* Setup a command header */
707 bnx2x_vlan_mac_set_cmd_hdr_e2(bp
, o
, add
, CLASSIFY_RULE_OPCODE_MAC
,
708 &rule_entry
->mac
.header
);
710 DP(BNX2X_MSG_SP
, "About to %s MAC "BNX2X_MAC_FMT
" for "
711 "Queue %d\n", (add
? "add" : "delete"),
712 BNX2X_MAC_PRN_LIST(mac
), raw
->cl_id
);
714 /* Set a MAC itself */
715 bnx2x_set_fw_mac_addr(&rule_entry
->mac
.mac_msb
,
716 &rule_entry
->mac
.mac_mid
,
717 &rule_entry
->mac
.mac_lsb
, mac
);
719 /* MOVE: Add a rule that will add this MAC to the target Queue */
720 if (cmd
== BNX2X_VLAN_MAC_MOVE
) {
724 /* Setup ramrod data */
725 bnx2x_vlan_mac_set_cmd_hdr_e2(bp
,
726 elem
->cmd_data
.vlan_mac
.target_obj
,
727 true, CLASSIFY_RULE_OPCODE_MAC
,
728 &rule_entry
->mac
.header
);
730 /* Set a MAC itself */
731 bnx2x_set_fw_mac_addr(&rule_entry
->mac
.mac_msb
,
732 &rule_entry
->mac
.mac_mid
,
733 &rule_entry
->mac
.mac_lsb
, mac
);
736 /* Set the ramrod data header */
737 /* TODO: take this to the higher level in order to prevent multiple
739 bnx2x_vlan_mac_set_rdata_hdr_e2(raw
->cid
, raw
->state
, &data
->header
,
744 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
749 * @cam_offset: offset in cam memory
750 * @hdr: pointer to a header to setup
754 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x
*bp
,
755 struct bnx2x_vlan_mac_obj
*o
, int type
, int cam_offset
,
756 struct mac_configuration_hdr
*hdr
)
758 struct bnx2x_raw_obj
*r
= &o
->raw
;
761 hdr
->offset
= (u8
)cam_offset
;
762 hdr
->client_id
= 0xff;
763 hdr
->echo
= ((r
->cid
& BNX2X_SWCID_MASK
) | (type
<< BNX2X_SWCID_SHIFT
));
766 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x
*bp
,
767 struct bnx2x_vlan_mac_obj
*o
, bool add
, int opcode
, u8
*mac
,
768 u16 vlan_id
, struct mac_configuration_entry
*cfg_entry
)
770 struct bnx2x_raw_obj
*r
= &o
->raw
;
771 u32 cl_bit_vec
= (1 << r
->cl_id
);
773 cfg_entry
->clients_bit_vector
= cpu_to_le32(cl_bit_vec
);
774 cfg_entry
->pf_id
= r
->func_id
;
775 cfg_entry
->vlan_id
= cpu_to_le16(vlan_id
);
778 SET_FLAG(cfg_entry
->flags
, MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
779 T_ETH_MAC_COMMAND_SET
);
780 SET_FLAG(cfg_entry
->flags
,
781 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE
, opcode
);
783 /* Set a MAC in a ramrod data */
784 bnx2x_set_fw_mac_addr(&cfg_entry
->msb_mac_addr
,
785 &cfg_entry
->middle_mac_addr
,
786 &cfg_entry
->lsb_mac_addr
, mac
);
788 SET_FLAG(cfg_entry
->flags
, MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
789 T_ETH_MAC_COMMAND_INVALIDATE
);
792 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x
*bp
,
793 struct bnx2x_vlan_mac_obj
*o
, int type
, int cam_offset
, bool add
,
794 u8
*mac
, u16 vlan_id
, int opcode
, struct mac_configuration_cmd
*config
)
796 struct mac_configuration_entry
*cfg_entry
= &config
->config_table
[0];
797 struct bnx2x_raw_obj
*raw
= &o
->raw
;
799 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp
, o
, type
, cam_offset
,
801 bnx2x_vlan_mac_set_cfg_entry_e1x(bp
, o
, add
, opcode
, mac
, vlan_id
,
804 DP(BNX2X_MSG_SP
, "%s MAC "BNX2X_MAC_FMT
" CLID %d CAM offset %d\n",
805 (add
? "setting" : "clearing"),
806 BNX2X_MAC_PRN_LIST(mac
), raw
->cl_id
, cam_offset
);
810 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
813 * @o: bnx2x_vlan_mac_obj
814 * @elem: bnx2x_exeq_elem
815 * @rule_idx: rule_idx
816 * @cam_offset: cam_offset
818 static void bnx2x_set_one_mac_e1x(struct bnx2x
*bp
,
819 struct bnx2x_vlan_mac_obj
*o
,
820 struct bnx2x_exeq_elem
*elem
, int rule_idx
,
823 struct bnx2x_raw_obj
*raw
= &o
->raw
;
824 struct mac_configuration_cmd
*config
=
825 (struct mac_configuration_cmd
*)(raw
->rdata
);
827 * 57710 and 57711 do not support MOVE command,
828 * so it's either ADD or DEL
830 bool add
= (elem
->cmd_data
.vlan_mac
.cmd
== BNX2X_VLAN_MAC_ADD
) ?
833 /* Reset the ramrod data buffer */
834 memset(config
, 0, sizeof(*config
));
836 bnx2x_vlan_mac_set_rdata_e1x(bp
, o
, BNX2X_FILTER_MAC_PENDING
,
838 elem
->cmd_data
.vlan_mac
.u
.mac
.mac
, 0,
839 ETH_VLAN_FILTER_ANY_VLAN
, config
);
842 static void bnx2x_set_one_vlan_e2(struct bnx2x
*bp
,
843 struct bnx2x_vlan_mac_obj
*o
,
844 struct bnx2x_exeq_elem
*elem
, int rule_idx
,
847 struct bnx2x_raw_obj
*raw
= &o
->raw
;
848 struct eth_classify_rules_ramrod_data
*data
=
849 (struct eth_classify_rules_ramrod_data
*)(raw
->rdata
);
850 int rule_cnt
= rule_idx
+ 1;
851 union eth_classify_rule_cmd
*rule_entry
= &data
->rules
[rule_idx
];
852 int cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
853 bool add
= (cmd
== BNX2X_VLAN_MAC_ADD
) ? true : false;
854 u16 vlan
= elem
->cmd_data
.vlan_mac
.u
.vlan
.vlan
;
856 /* Reset the ramrod data buffer for the first rule */
858 memset(data
, 0, sizeof(*data
));
860 /* Set a rule header */
861 bnx2x_vlan_mac_set_cmd_hdr_e2(bp
, o
, add
, CLASSIFY_RULE_OPCODE_VLAN
,
862 &rule_entry
->vlan
.header
);
864 DP(BNX2X_MSG_SP
, "About to %s VLAN %d\n", (add
? "add" : "delete"),
867 /* Set a VLAN itself */
868 rule_entry
->vlan
.vlan
= cpu_to_le16(vlan
);
870 /* MOVE: Add a rule that will add this MAC to the target Queue */
871 if (cmd
== BNX2X_VLAN_MAC_MOVE
) {
875 /* Setup ramrod data */
876 bnx2x_vlan_mac_set_cmd_hdr_e2(bp
,
877 elem
->cmd_data
.vlan_mac
.target_obj
,
878 true, CLASSIFY_RULE_OPCODE_VLAN
,
879 &rule_entry
->vlan
.header
);
881 /* Set a VLAN itself */
882 rule_entry
->vlan
.vlan
= cpu_to_le16(vlan
);
885 /* Set the ramrod data header */
886 /* TODO: take this to the higher level in order to prevent multiple
888 bnx2x_vlan_mac_set_rdata_hdr_e2(raw
->cid
, raw
->state
, &data
->header
,
892 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x
*bp
,
893 struct bnx2x_vlan_mac_obj
*o
,
894 struct bnx2x_exeq_elem
*elem
,
895 int rule_idx
, int cam_offset
)
897 struct bnx2x_raw_obj
*raw
= &o
->raw
;
898 struct eth_classify_rules_ramrod_data
*data
=
899 (struct eth_classify_rules_ramrod_data
*)(raw
->rdata
);
900 int rule_cnt
= rule_idx
+ 1;
901 union eth_classify_rule_cmd
*rule_entry
= &data
->rules
[rule_idx
];
902 int cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
903 bool add
= (cmd
== BNX2X_VLAN_MAC_ADD
) ? true : false;
904 u16 vlan
= elem
->cmd_data
.vlan_mac
.u
.vlan_mac
.vlan
;
905 u8
*mac
= elem
->cmd_data
.vlan_mac
.u
.vlan_mac
.mac
;
908 /* Reset the ramrod data buffer for the first rule */
910 memset(data
, 0, sizeof(*data
));
912 /* Set a rule header */
913 bnx2x_vlan_mac_set_cmd_hdr_e2(bp
, o
, add
, CLASSIFY_RULE_OPCODE_PAIR
,
914 &rule_entry
->pair
.header
);
916 /* Set VLAN and MAC themselvs */
917 rule_entry
->pair
.vlan
= cpu_to_le16(vlan
);
918 bnx2x_set_fw_mac_addr(&rule_entry
->pair
.mac_msb
,
919 &rule_entry
->pair
.mac_mid
,
920 &rule_entry
->pair
.mac_lsb
, mac
);
922 /* MOVE: Add a rule that will add this MAC to the target Queue */
923 if (cmd
== BNX2X_VLAN_MAC_MOVE
) {
927 /* Setup ramrod data */
928 bnx2x_vlan_mac_set_cmd_hdr_e2(bp
,
929 elem
->cmd_data
.vlan_mac
.target_obj
,
930 true, CLASSIFY_RULE_OPCODE_PAIR
,
931 &rule_entry
->pair
.header
);
933 /* Set a VLAN itself */
934 rule_entry
->pair
.vlan
= cpu_to_le16(vlan
);
935 bnx2x_set_fw_mac_addr(&rule_entry
->pair
.mac_msb
,
936 &rule_entry
->pair
.mac_mid
,
937 &rule_entry
->pair
.mac_lsb
, mac
);
940 /* Set the ramrod data header */
941 /* TODO: take this to the higher level in order to prevent multiple
943 bnx2x_vlan_mac_set_rdata_hdr_e2(raw
->cid
, raw
->state
, &data
->header
,
948 * bnx2x_set_one_vlan_mac_e1h -
951 * @o: bnx2x_vlan_mac_obj
952 * @elem: bnx2x_exeq_elem
953 * @rule_idx: rule_idx
954 * @cam_offset: cam_offset
956 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x
*bp
,
957 struct bnx2x_vlan_mac_obj
*o
,
958 struct bnx2x_exeq_elem
*elem
,
959 int rule_idx
, int cam_offset
)
961 struct bnx2x_raw_obj
*raw
= &o
->raw
;
962 struct mac_configuration_cmd
*config
=
963 (struct mac_configuration_cmd
*)(raw
->rdata
);
965 * 57710 and 57711 do not support MOVE command,
966 * so it's either ADD or DEL
968 bool add
= (elem
->cmd_data
.vlan_mac
.cmd
== BNX2X_VLAN_MAC_ADD
) ?
971 /* Reset the ramrod data buffer */
972 memset(config
, 0, sizeof(*config
));
974 bnx2x_vlan_mac_set_rdata_e1x(bp
, o
, BNX2X_FILTER_VLAN_MAC_PENDING
,
976 elem
->cmd_data
.vlan_mac
.u
.vlan_mac
.mac
,
977 elem
->cmd_data
.vlan_mac
.u
.vlan_mac
.vlan
,
978 ETH_VLAN_FILTER_CLASSIFY
, config
);
981 #define list_next_entry(pos, member) \
982 list_entry((pos)->member.next, typeof(*(pos)), member)
985 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
988 * @p: command parameters
989 * @ppos: pointer to the cooky
991 * reconfigure next MAC/VLAN/VLAN-MAC element from the
992 * previously configured elements list.
994 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
997 * pointer to the cooky - that should be given back in the next call to make
998 * function handle the next element. If *ppos is set to NULL it will restart the
999 * iterator. If returned *ppos == NULL this means that the last element has been
1003 static int bnx2x_vlan_mac_restore(struct bnx2x
*bp
,
1004 struct bnx2x_vlan_mac_ramrod_params
*p
,
1005 struct bnx2x_vlan_mac_registry_elem
**ppos
)
1007 struct bnx2x_vlan_mac_registry_elem
*pos
;
1008 struct bnx2x_vlan_mac_obj
*o
= p
->vlan_mac_obj
;
1010 /* If list is empty - there is nothing to do here */
1011 if (list_empty(&o
->head
)) {
1016 /* make a step... */
1018 *ppos
= list_first_entry(&o
->head
,
1019 struct bnx2x_vlan_mac_registry_elem
,
1022 *ppos
= list_next_entry(*ppos
, link
);
1026 /* If it's the last step - return NULL */
1027 if (list_is_last(&pos
->link
, &o
->head
))
1030 /* Prepare a 'user_req' */
1031 memcpy(&p
->user_req
.u
, &pos
->u
, sizeof(pos
->u
));
1033 /* Set the command */
1034 p
->user_req
.cmd
= BNX2X_VLAN_MAC_ADD
;
1036 /* Set vlan_mac_flags */
1037 p
->user_req
.vlan_mac_flags
= pos
->vlan_mac_flags
;
1039 /* Set a restore bit */
1040 __set_bit(RAMROD_RESTORE
, &p
->ramrod_flags
);
1042 return bnx2x_config_vlan_mac(bp
, p
);
1046 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1047 * pointer to an element with a specific criteria and NULL if such an element
1048 * hasn't been found.
1050 static struct bnx2x_exeq_elem
*bnx2x_exeq_get_mac(
1051 struct bnx2x_exe_queue_obj
*o
,
1052 struct bnx2x_exeq_elem
*elem
)
1054 struct bnx2x_exeq_elem
*pos
;
1055 struct bnx2x_mac_ramrod_data
*data
= &elem
->cmd_data
.vlan_mac
.u
.mac
;
1057 /* Check pending for execution commands */
1058 list_for_each_entry(pos
, &o
->exe_queue
, link
)
1059 if (!memcmp(&pos
->cmd_data
.vlan_mac
.u
.mac
, data
,
1061 (pos
->cmd_data
.vlan_mac
.cmd
== elem
->cmd_data
.vlan_mac
.cmd
))
1067 static struct bnx2x_exeq_elem
*bnx2x_exeq_get_vlan(
1068 struct bnx2x_exe_queue_obj
*o
,
1069 struct bnx2x_exeq_elem
*elem
)
1071 struct bnx2x_exeq_elem
*pos
;
1072 struct bnx2x_vlan_ramrod_data
*data
= &elem
->cmd_data
.vlan_mac
.u
.vlan
;
1074 /* Check pending for execution commands */
1075 list_for_each_entry(pos
, &o
->exe_queue
, link
)
1076 if (!memcmp(&pos
->cmd_data
.vlan_mac
.u
.vlan
, data
,
1078 (pos
->cmd_data
.vlan_mac
.cmd
== elem
->cmd_data
.vlan_mac
.cmd
))
1084 static struct bnx2x_exeq_elem
*bnx2x_exeq_get_vlan_mac(
1085 struct bnx2x_exe_queue_obj
*o
,
1086 struct bnx2x_exeq_elem
*elem
)
1088 struct bnx2x_exeq_elem
*pos
;
1089 struct bnx2x_vlan_mac_ramrod_data
*data
=
1090 &elem
->cmd_data
.vlan_mac
.u
.vlan_mac
;
1092 /* Check pending for execution commands */
1093 list_for_each_entry(pos
, &o
->exe_queue
, link
)
1094 if (!memcmp(&pos
->cmd_data
.vlan_mac
.u
.vlan_mac
, data
,
1096 (pos
->cmd_data
.vlan_mac
.cmd
== elem
->cmd_data
.vlan_mac
.cmd
))
1103 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1105 * @bp: device handle
1106 * @qo: bnx2x_qable_obj
1107 * @elem: bnx2x_exeq_elem
1109 * Checks that the requested configuration can be added. If yes and if
1110 * requested, consume CAM credit.
1112 * The 'validate' is run after the 'optimize'.
1115 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x
*bp
,
1116 union bnx2x_qable_obj
*qo
,
1117 struct bnx2x_exeq_elem
*elem
)
1119 struct bnx2x_vlan_mac_obj
*o
= &qo
->vlan_mac
;
1120 struct bnx2x_exe_queue_obj
*exeq
= &o
->exe_queue
;
1123 /* Check the registry */
1124 rc
= o
->check_add(o
, &elem
->cmd_data
.vlan_mac
.u
);
1126 DP(BNX2X_MSG_SP
, "ADD command is not allowed considering "
1127 "current registry state\n");
1132 * Check if there is a pending ADD command for this
1133 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1135 if (exeq
->get(exeq
, elem
)) {
1136 DP(BNX2X_MSG_SP
, "There is a pending ADD command already\n");
1141 * TODO: Check the pending MOVE from other objects where this
1142 * object is a destination object.
1145 /* Consume the credit if not requested not to */
1146 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT
,
1147 &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
) ||
1155 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1157 * @bp: device handle
1158 * @qo: quable object to check
1159 * @elem: element that needs to be deleted
1161 * Checks that the requested configuration can be deleted. If yes and if
1162 * requested, returns a CAM credit.
1164 * The 'validate' is run after the 'optimize'.
1166 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x
*bp
,
1167 union bnx2x_qable_obj
*qo
,
1168 struct bnx2x_exeq_elem
*elem
)
1170 struct bnx2x_vlan_mac_obj
*o
= &qo
->vlan_mac
;
1171 struct bnx2x_vlan_mac_registry_elem
*pos
;
1172 struct bnx2x_exe_queue_obj
*exeq
= &o
->exe_queue
;
1173 struct bnx2x_exeq_elem query_elem
;
1175 /* If this classification can not be deleted (doesn't exist)
1176 * - return a BNX2X_EXIST.
1178 pos
= o
->check_del(o
, &elem
->cmd_data
.vlan_mac
.u
);
1180 DP(BNX2X_MSG_SP
, "DEL command is not allowed considering "
1181 "current registry state\n");
1186 * Check if there are pending DEL or MOVE commands for this
1187 * MAC/VLAN/VLAN-MAC. Return an error if so.
1189 memcpy(&query_elem
, elem
, sizeof(query_elem
));
1191 /* Check for MOVE commands */
1192 query_elem
.cmd_data
.vlan_mac
.cmd
= BNX2X_VLAN_MAC_MOVE
;
1193 if (exeq
->get(exeq
, &query_elem
)) {
1194 BNX2X_ERR("There is a pending MOVE command already\n");
1198 /* Check for DEL commands */
1199 if (exeq
->get(exeq
, elem
)) {
1200 DP(BNX2X_MSG_SP
, "There is a pending DEL command already\n");
1204 /* Return the credit to the credit pool if not requested not to */
1205 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT
,
1206 &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
) ||
1207 o
->put_credit(o
))) {
1208 BNX2X_ERR("Failed to return a credit\n");
1216 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1218 * @bp: device handle
1219 * @qo: quable object to check (source)
1220 * @elem: element that needs to be moved
1222 * Checks that the requested configuration can be moved. If yes and if
1223 * requested, returns a CAM credit.
1225 * The 'validate' is run after the 'optimize'.
1227 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x
*bp
,
1228 union bnx2x_qable_obj
*qo
,
1229 struct bnx2x_exeq_elem
*elem
)
1231 struct bnx2x_vlan_mac_obj
*src_o
= &qo
->vlan_mac
;
1232 struct bnx2x_vlan_mac_obj
*dest_o
= elem
->cmd_data
.vlan_mac
.target_obj
;
1233 struct bnx2x_exeq_elem query_elem
;
1234 struct bnx2x_exe_queue_obj
*src_exeq
= &src_o
->exe_queue
;
1235 struct bnx2x_exe_queue_obj
*dest_exeq
= &dest_o
->exe_queue
;
1238 * Check if we can perform this operation based on the current registry
1241 if (!src_o
->check_move(src_o
, dest_o
, &elem
->cmd_data
.vlan_mac
.u
)) {
1242 DP(BNX2X_MSG_SP
, "MOVE command is not allowed considering "
1243 "current registry state\n");
1248 * Check if there is an already pending DEL or MOVE command for the
1249 * source object or ADD command for a destination object. Return an
1252 memcpy(&query_elem
, elem
, sizeof(query_elem
));
1254 /* Check DEL on source */
1255 query_elem
.cmd_data
.vlan_mac
.cmd
= BNX2X_VLAN_MAC_DEL
;
1256 if (src_exeq
->get(src_exeq
, &query_elem
)) {
1257 BNX2X_ERR("There is a pending DEL command on the source "
1262 /* Check MOVE on source */
1263 if (src_exeq
->get(src_exeq
, elem
)) {
1264 DP(BNX2X_MSG_SP
, "There is a pending MOVE command already\n");
1268 /* Check ADD on destination */
1269 query_elem
.cmd_data
.vlan_mac
.cmd
= BNX2X_VLAN_MAC_ADD
;
1270 if (dest_exeq
->get(dest_exeq
, &query_elem
)) {
1271 BNX2X_ERR("There is a pending ADD command on the "
1272 "destination queue already\n");
1276 /* Consume the credit if not requested not to */
1277 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST
,
1278 &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
) ||
1279 dest_o
->get_credit(dest_o
)))
1282 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT
,
1283 &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
) ||
1284 src_o
->put_credit(src_o
))) {
1285 /* return the credit taken from dest... */
1286 dest_o
->put_credit(dest_o
);
1293 static int bnx2x_validate_vlan_mac(struct bnx2x
*bp
,
1294 union bnx2x_qable_obj
*qo
,
1295 struct bnx2x_exeq_elem
*elem
)
1297 switch (elem
->cmd_data
.vlan_mac
.cmd
) {
1298 case BNX2X_VLAN_MAC_ADD
:
1299 return bnx2x_validate_vlan_mac_add(bp
, qo
, elem
);
1300 case BNX2X_VLAN_MAC_DEL
:
1301 return bnx2x_validate_vlan_mac_del(bp
, qo
, elem
);
1302 case BNX2X_VLAN_MAC_MOVE
:
1303 return bnx2x_validate_vlan_mac_move(bp
, qo
, elem
);
1310 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1312 * @bp: device handle
1313 * @o: bnx2x_vlan_mac_obj
1316 static int bnx2x_wait_vlan_mac(struct bnx2x
*bp
,
1317 struct bnx2x_vlan_mac_obj
*o
)
1320 struct bnx2x_exe_queue_obj
*exeq
= &o
->exe_queue
;
1321 struct bnx2x_raw_obj
*raw
= &o
->raw
;
1324 /* Wait for the current command to complete */
1325 rc
= raw
->wait_comp(bp
, raw
);
1329 /* Wait until there are no pending commands */
1330 if (!bnx2x_exe_queue_empty(exeq
))
1331 usleep_range(1000, 1000);
1340 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1342 * @bp: device handle
1343 * @o: bnx2x_vlan_mac_obj
1345 * @cont: if true schedule next execution chunk
1348 static int bnx2x_complete_vlan_mac(struct bnx2x
*bp
,
1349 struct bnx2x_vlan_mac_obj
*o
,
1350 union event_ring_elem
*cqe
,
1351 unsigned long *ramrod_flags
)
1353 struct bnx2x_raw_obj
*r
= &o
->raw
;
1356 /* Reset pending list */
1357 bnx2x_exe_queue_reset_pending(bp
, &o
->exe_queue
);
1360 r
->clear_pending(r
);
1362 /* If ramrod failed this is most likely a SW bug */
1363 if (cqe
->message
.error
)
1366 /* Run the next bulk of pending commands if requeted */
1367 if (test_bit(RAMROD_CONT
, ramrod_flags
)) {
1368 rc
= bnx2x_exe_queue_step(bp
, &o
->exe_queue
, ramrod_flags
);
1373 /* If there is more work to do return PENDING */
1374 if (!bnx2x_exe_queue_empty(&o
->exe_queue
))
1381 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1383 * @bp: device handle
1384 * @o: bnx2x_qable_obj
1385 * @elem: bnx2x_exeq_elem
1387 static int bnx2x_optimize_vlan_mac(struct bnx2x
*bp
,
1388 union bnx2x_qable_obj
*qo
,
1389 struct bnx2x_exeq_elem
*elem
)
1391 struct bnx2x_exeq_elem query
, *pos
;
1392 struct bnx2x_vlan_mac_obj
*o
= &qo
->vlan_mac
;
1393 struct bnx2x_exe_queue_obj
*exeq
= &o
->exe_queue
;
1395 memcpy(&query
, elem
, sizeof(query
));
1397 switch (elem
->cmd_data
.vlan_mac
.cmd
) {
1398 case BNX2X_VLAN_MAC_ADD
:
1399 query
.cmd_data
.vlan_mac
.cmd
= BNX2X_VLAN_MAC_DEL
;
1401 case BNX2X_VLAN_MAC_DEL
:
1402 query
.cmd_data
.vlan_mac
.cmd
= BNX2X_VLAN_MAC_ADD
;
1405 /* Don't handle anything other than ADD or DEL */
1409 /* If we found the appropriate element - delete it */
1410 pos
= exeq
->get(exeq
, &query
);
1413 /* Return the credit of the optimized command */
1414 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT
,
1415 &pos
->cmd_data
.vlan_mac
.vlan_mac_flags
)) {
1416 if ((query
.cmd_data
.vlan_mac
.cmd
==
1417 BNX2X_VLAN_MAC_ADD
) && !o
->put_credit(o
)) {
1418 BNX2X_ERR("Failed to return the credit for the "
1419 "optimized ADD command\n");
1421 } else if (!o
->get_credit(o
)) { /* VLAN_MAC_DEL */
1422 BNX2X_ERR("Failed to recover the credit from "
1423 "the optimized DEL command\n");
1428 DP(BNX2X_MSG_SP
, "Optimizing %s command\n",
1429 (elem
->cmd_data
.vlan_mac
.cmd
== BNX2X_VLAN_MAC_ADD
) ?
1432 list_del(&pos
->link
);
1433 bnx2x_exe_queue_free_elem(bp
, pos
);
1441 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1443 * @bp: device handle
1449 * prepare a registry element according to the current command request.
1451 static inline int bnx2x_vlan_mac_get_registry_elem(
1453 struct bnx2x_vlan_mac_obj
*o
,
1454 struct bnx2x_exeq_elem
*elem
,
1456 struct bnx2x_vlan_mac_registry_elem
**re
)
1458 int cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
1459 struct bnx2x_vlan_mac_registry_elem
*reg_elem
;
1461 /* Allocate a new registry element if needed. */
1463 ((cmd
== BNX2X_VLAN_MAC_ADD
) || (cmd
== BNX2X_VLAN_MAC_MOVE
))) {
1464 reg_elem
= kzalloc(sizeof(*reg_elem
), GFP_ATOMIC
);
1468 /* Get a new CAM offset */
1469 if (!o
->get_cam_offset(o
, ®_elem
->cam_offset
)) {
1471 * This shell never happen, because we have checked the
1472 * CAM availiability in the 'validate'.
1479 DP(BNX2X_MSG_SP
, "Got cam offset %d\n", reg_elem
->cam_offset
);
1481 /* Set a VLAN-MAC data */
1482 memcpy(®_elem
->u
, &elem
->cmd_data
.vlan_mac
.u
,
1483 sizeof(reg_elem
->u
));
1485 /* Copy the flags (needed for DEL and RESTORE flows) */
1486 reg_elem
->vlan_mac_flags
=
1487 elem
->cmd_data
.vlan_mac
.vlan_mac_flags
;
1488 } else /* DEL, RESTORE */
1489 reg_elem
= o
->check_del(o
, &elem
->cmd_data
.vlan_mac
.u
);
1496 * bnx2x_execute_vlan_mac - execute vlan mac command
1498 * @bp: device handle
1503 * go and send a ramrod!
1505 static int bnx2x_execute_vlan_mac(struct bnx2x
*bp
,
1506 union bnx2x_qable_obj
*qo
,
1507 struct list_head
*exe_chunk
,
1508 unsigned long *ramrod_flags
)
1510 struct bnx2x_exeq_elem
*elem
;
1511 struct bnx2x_vlan_mac_obj
*o
= &qo
->vlan_mac
, *cam_obj
;
1512 struct bnx2x_raw_obj
*r
= &o
->raw
;
1514 bool restore
= test_bit(RAMROD_RESTORE
, ramrod_flags
);
1515 bool drv_only
= test_bit(RAMROD_DRV_CLR_ONLY
, ramrod_flags
);
1516 struct bnx2x_vlan_mac_registry_elem
*reg_elem
;
1520 * If DRIVER_ONLY execution is requested, cleanup a registry
1521 * and exit. Otherwise send a ramrod to FW.
1524 WARN_ON(r
->check_pending(r
));
1529 /* Fill tha ramrod data */
1530 list_for_each_entry(elem
, exe_chunk
, link
) {
1531 cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
1533 * We will add to the target object in MOVE command, so
1534 * change the object for a CAM search.
1536 if (cmd
== BNX2X_VLAN_MAC_MOVE
)
1537 cam_obj
= elem
->cmd_data
.vlan_mac
.target_obj
;
1541 rc
= bnx2x_vlan_mac_get_registry_elem(bp
, cam_obj
,
1549 /* Push a new entry into the registry */
1551 ((cmd
== BNX2X_VLAN_MAC_ADD
) ||
1552 (cmd
== BNX2X_VLAN_MAC_MOVE
)))
1553 list_add(®_elem
->link
, &cam_obj
->head
);
1555 /* Configure a single command in a ramrod data buffer */
1556 o
->set_one_rule(bp
, o
, elem
, idx
,
1557 reg_elem
->cam_offset
);
1559 /* MOVE command consumes 2 entries in the ramrod data */
1560 if (cmd
== BNX2X_VLAN_MAC_MOVE
)
1567 * No need for an explicit memory barrier here as long we would
1568 * need to ensure the ordering of writing to the SPQ element
1569 * and updating of the SPQ producer which involves a memory
1570 * read and we will have to put a full memory barrier there
1571 * (inside bnx2x_sp_post()).
1574 rc
= bnx2x_sp_post(bp
, o
->ramrod_cmd
, r
->cid
,
1575 U64_HI(r
->rdata_mapping
),
1576 U64_LO(r
->rdata_mapping
),
1577 ETH_CONNECTION_TYPE
);
1582 /* Now, when we are done with the ramrod - clean up the registry */
1583 list_for_each_entry(elem
, exe_chunk
, link
) {
1584 cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
1585 if ((cmd
== BNX2X_VLAN_MAC_DEL
) ||
1586 (cmd
== BNX2X_VLAN_MAC_MOVE
)) {
1587 reg_elem
= o
->check_del(o
, &elem
->cmd_data
.vlan_mac
.u
);
1591 o
->put_cam_offset(o
, reg_elem
->cam_offset
);
1592 list_del(®_elem
->link
);
1603 r
->clear_pending(r
);
1605 /* Cleanup a registry in case of a failure */
1606 list_for_each_entry(elem
, exe_chunk
, link
) {
1607 cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
1609 if (cmd
== BNX2X_VLAN_MAC_MOVE
)
1610 cam_obj
= elem
->cmd_data
.vlan_mac
.target_obj
;
1614 /* Delete all newly added above entries */
1616 ((cmd
== BNX2X_VLAN_MAC_ADD
) ||
1617 (cmd
== BNX2X_VLAN_MAC_MOVE
))) {
1618 reg_elem
= o
->check_del(cam_obj
,
1619 &elem
->cmd_data
.vlan_mac
.u
);
1621 list_del(®_elem
->link
);
1630 static inline int bnx2x_vlan_mac_push_new_cmd(
1632 struct bnx2x_vlan_mac_ramrod_params
*p
)
1634 struct bnx2x_exeq_elem
*elem
;
1635 struct bnx2x_vlan_mac_obj
*o
= p
->vlan_mac_obj
;
1636 bool restore
= test_bit(RAMROD_RESTORE
, &p
->ramrod_flags
);
1638 /* Allocate the execution queue element */
1639 elem
= bnx2x_exe_queue_alloc_elem(bp
);
1643 /* Set the command 'length' */
1644 switch (p
->user_req
.cmd
) {
1645 case BNX2X_VLAN_MAC_MOVE
:
1652 /* Fill the object specific info */
1653 memcpy(&elem
->cmd_data
.vlan_mac
, &p
->user_req
, sizeof(p
->user_req
));
1655 /* Try to add a new command to the pending list */
1656 return bnx2x_exe_queue_add(bp
, &o
->exe_queue
, elem
, restore
);
1660 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1662 * @bp: device handle
1666 int bnx2x_config_vlan_mac(
1668 struct bnx2x_vlan_mac_ramrod_params
*p
)
1671 struct bnx2x_vlan_mac_obj
*o
= p
->vlan_mac_obj
;
1672 unsigned long *ramrod_flags
= &p
->ramrod_flags
;
1673 bool cont
= test_bit(RAMROD_CONT
, ramrod_flags
);
1674 struct bnx2x_raw_obj
*raw
= &o
->raw
;
1677 * Add new elements to the execution list for commands that require it.
1680 rc
= bnx2x_vlan_mac_push_new_cmd(bp
, p
);
1686 * If nothing will be executed further in this iteration we want to
1687 * return PENDING if there are pending commands
1689 if (!bnx2x_exe_queue_empty(&o
->exe_queue
))
1692 if (test_bit(RAMROD_DRV_CLR_ONLY
, ramrod_flags
)) {
1693 DP(BNX2X_MSG_SP
, "RAMROD_DRV_CLR_ONLY requested: "
1694 "clearing a pending bit.\n");
1695 raw
->clear_pending(raw
);
1698 /* Execute commands if required */
1699 if (cont
|| test_bit(RAMROD_EXEC
, ramrod_flags
) ||
1700 test_bit(RAMROD_COMP_WAIT
, ramrod_flags
)) {
1701 rc
= bnx2x_exe_queue_step(bp
, &o
->exe_queue
, ramrod_flags
);
1707 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1708 * then user want to wait until the last command is done.
1710 if (test_bit(RAMROD_COMP_WAIT
, &p
->ramrod_flags
)) {
1712 * Wait maximum for the current exe_queue length iterations plus
1713 * one (for the current pending command).
1715 int max_iterations
= bnx2x_exe_queue_length(&o
->exe_queue
) + 1;
1717 while (!bnx2x_exe_queue_empty(&o
->exe_queue
) &&
1720 /* Wait for the current command to complete */
1721 rc
= raw
->wait_comp(bp
, raw
);
1725 /* Make a next step */
1726 rc
= bnx2x_exe_queue_step(bp
, &o
->exe_queue
,
1741 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1743 * @bp: device handle
1746 * @ramrod_flags: execution flags to be used for this deletion
1748 * if the last operation has completed successfully and there are no
1749 * moreelements left, positive value if the last operation has completed
1750 * successfully and there are more previously configured elements, negative
1751 * value is current operation has failed.
1753 static int bnx2x_vlan_mac_del_all(struct bnx2x
*bp
,
1754 struct bnx2x_vlan_mac_obj
*o
,
1755 unsigned long *vlan_mac_flags
,
1756 unsigned long *ramrod_flags
)
1758 struct bnx2x_vlan_mac_registry_elem
*pos
= NULL
;
1760 struct bnx2x_vlan_mac_ramrod_params p
;
1761 struct bnx2x_exe_queue_obj
*exeq
= &o
->exe_queue
;
1762 struct bnx2x_exeq_elem
*exeq_pos
, *exeq_pos_n
;
1764 /* Clear pending commands first */
1766 spin_lock_bh(&exeq
->lock
);
1768 list_for_each_entry_safe(exeq_pos
, exeq_pos_n
, &exeq
->exe_queue
, link
) {
1769 if (exeq_pos
->cmd_data
.vlan_mac
.vlan_mac_flags
==
1771 list_del(&exeq_pos
->link
);
1774 spin_unlock_bh(&exeq
->lock
);
1776 /* Prepare a command request */
1777 memset(&p
, 0, sizeof(p
));
1779 p
.ramrod_flags
= *ramrod_flags
;
1780 p
.user_req
.cmd
= BNX2X_VLAN_MAC_DEL
;
1783 * Add all but the last VLAN-MAC to the execution queue without actually
1784 * execution anything.
1786 __clear_bit(RAMROD_COMP_WAIT
, &p
.ramrod_flags
);
1787 __clear_bit(RAMROD_EXEC
, &p
.ramrod_flags
);
1788 __clear_bit(RAMROD_CONT
, &p
.ramrod_flags
);
1790 list_for_each_entry(pos
, &o
->head
, link
) {
1791 if (pos
->vlan_mac_flags
== *vlan_mac_flags
) {
1792 p
.user_req
.vlan_mac_flags
= pos
->vlan_mac_flags
;
1793 memcpy(&p
.user_req
.u
, &pos
->u
, sizeof(pos
->u
));
1794 rc
= bnx2x_config_vlan_mac(bp
, &p
);
1796 BNX2X_ERR("Failed to add a new DEL command\n");
1802 p
.ramrod_flags
= *ramrod_flags
;
1803 __set_bit(RAMROD_CONT
, &p
.ramrod_flags
);
1805 return bnx2x_config_vlan_mac(bp
, &p
);
1808 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj
*raw
, u8 cl_id
,
1809 u32 cid
, u8 func_id
, void *rdata
, dma_addr_t rdata_mapping
, int state
,
1810 unsigned long *pstate
, bnx2x_obj_type type
)
1812 raw
->func_id
= func_id
;
1816 raw
->rdata_mapping
= rdata_mapping
;
1818 raw
->pstate
= pstate
;
1819 raw
->obj_type
= type
;
1820 raw
->check_pending
= bnx2x_raw_check_pending
;
1821 raw
->clear_pending
= bnx2x_raw_clear_pending
;
1822 raw
->set_pending
= bnx2x_raw_set_pending
;
1823 raw
->wait_comp
= bnx2x_raw_wait
;
1826 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj
*o
,
1827 u8 cl_id
, u32 cid
, u8 func_id
, void *rdata
, dma_addr_t rdata_mapping
,
1828 int state
, unsigned long *pstate
, bnx2x_obj_type type
,
1829 struct bnx2x_credit_pool_obj
*macs_pool
,
1830 struct bnx2x_credit_pool_obj
*vlans_pool
)
1832 INIT_LIST_HEAD(&o
->head
);
1834 o
->macs_pool
= macs_pool
;
1835 o
->vlans_pool
= vlans_pool
;
1837 o
->delete_all
= bnx2x_vlan_mac_del_all
;
1838 o
->restore
= bnx2x_vlan_mac_restore
;
1839 o
->complete
= bnx2x_complete_vlan_mac
;
1840 o
->wait
= bnx2x_wait_vlan_mac
;
1842 bnx2x_init_raw_obj(&o
->raw
, cl_id
, cid
, func_id
, rdata
, rdata_mapping
,
1843 state
, pstate
, type
);
1847 void bnx2x_init_mac_obj(struct bnx2x
*bp
,
1848 struct bnx2x_vlan_mac_obj
*mac_obj
,
1849 u8 cl_id
, u32 cid
, u8 func_id
, void *rdata
,
1850 dma_addr_t rdata_mapping
, int state
,
1851 unsigned long *pstate
, bnx2x_obj_type type
,
1852 struct bnx2x_credit_pool_obj
*macs_pool
)
1854 union bnx2x_qable_obj
*qable_obj
= (union bnx2x_qable_obj
*)mac_obj
;
1856 bnx2x_init_vlan_mac_common(mac_obj
, cl_id
, cid
, func_id
, rdata
,
1857 rdata_mapping
, state
, pstate
, type
,
1860 /* CAM credit pool handling */
1861 mac_obj
->get_credit
= bnx2x_get_credit_mac
;
1862 mac_obj
->put_credit
= bnx2x_put_credit_mac
;
1863 mac_obj
->get_cam_offset
= bnx2x_get_cam_offset_mac
;
1864 mac_obj
->put_cam_offset
= bnx2x_put_cam_offset_mac
;
1866 if (CHIP_IS_E1x(bp
)) {
1867 mac_obj
->set_one_rule
= bnx2x_set_one_mac_e1x
;
1868 mac_obj
->check_del
= bnx2x_check_mac_del
;
1869 mac_obj
->check_add
= bnx2x_check_mac_add
;
1870 mac_obj
->check_move
= bnx2x_check_move_always_err
;
1871 mac_obj
->ramrod_cmd
= RAMROD_CMD_ID_ETH_SET_MAC
;
1874 bnx2x_exe_queue_init(bp
,
1875 &mac_obj
->exe_queue
, 1, qable_obj
,
1876 bnx2x_validate_vlan_mac
,
1877 bnx2x_optimize_vlan_mac
,
1878 bnx2x_execute_vlan_mac
,
1879 bnx2x_exeq_get_mac
);
1881 mac_obj
->set_one_rule
= bnx2x_set_one_mac_e2
;
1882 mac_obj
->check_del
= bnx2x_check_mac_del
;
1883 mac_obj
->check_add
= bnx2x_check_mac_add
;
1884 mac_obj
->check_move
= bnx2x_check_move
;
1885 mac_obj
->ramrod_cmd
=
1886 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES
;
1889 bnx2x_exe_queue_init(bp
,
1890 &mac_obj
->exe_queue
, CLASSIFY_RULES_COUNT
,
1891 qable_obj
, bnx2x_validate_vlan_mac
,
1892 bnx2x_optimize_vlan_mac
,
1893 bnx2x_execute_vlan_mac
,
1894 bnx2x_exeq_get_mac
);
1898 void bnx2x_init_vlan_obj(struct bnx2x
*bp
,
1899 struct bnx2x_vlan_mac_obj
*vlan_obj
,
1900 u8 cl_id
, u32 cid
, u8 func_id
, void *rdata
,
1901 dma_addr_t rdata_mapping
, int state
,
1902 unsigned long *pstate
, bnx2x_obj_type type
,
1903 struct bnx2x_credit_pool_obj
*vlans_pool
)
1905 union bnx2x_qable_obj
*qable_obj
= (union bnx2x_qable_obj
*)vlan_obj
;
1907 bnx2x_init_vlan_mac_common(vlan_obj
, cl_id
, cid
, func_id
, rdata
,
1908 rdata_mapping
, state
, pstate
, type
, NULL
,
1911 vlan_obj
->get_credit
= bnx2x_get_credit_vlan
;
1912 vlan_obj
->put_credit
= bnx2x_put_credit_vlan
;
1913 vlan_obj
->get_cam_offset
= bnx2x_get_cam_offset_vlan
;
1914 vlan_obj
->put_cam_offset
= bnx2x_put_cam_offset_vlan
;
1916 if (CHIP_IS_E1x(bp
)) {
1917 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1920 vlan_obj
->set_one_rule
= bnx2x_set_one_vlan_e2
;
1921 vlan_obj
->check_del
= bnx2x_check_vlan_del
;
1922 vlan_obj
->check_add
= bnx2x_check_vlan_add
;
1923 vlan_obj
->check_move
= bnx2x_check_move
;
1924 vlan_obj
->ramrod_cmd
=
1925 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES
;
1928 bnx2x_exe_queue_init(bp
,
1929 &vlan_obj
->exe_queue
, CLASSIFY_RULES_COUNT
,
1930 qable_obj
, bnx2x_validate_vlan_mac
,
1931 bnx2x_optimize_vlan_mac
,
1932 bnx2x_execute_vlan_mac
,
1933 bnx2x_exeq_get_vlan
);
1937 void bnx2x_init_vlan_mac_obj(struct bnx2x
*bp
,
1938 struct bnx2x_vlan_mac_obj
*vlan_mac_obj
,
1939 u8 cl_id
, u32 cid
, u8 func_id
, void *rdata
,
1940 dma_addr_t rdata_mapping
, int state
,
1941 unsigned long *pstate
, bnx2x_obj_type type
,
1942 struct bnx2x_credit_pool_obj
*macs_pool
,
1943 struct bnx2x_credit_pool_obj
*vlans_pool
)
1945 union bnx2x_qable_obj
*qable_obj
=
1946 (union bnx2x_qable_obj
*)vlan_mac_obj
;
1948 bnx2x_init_vlan_mac_common(vlan_mac_obj
, cl_id
, cid
, func_id
, rdata
,
1949 rdata_mapping
, state
, pstate
, type
,
1950 macs_pool
, vlans_pool
);
1952 /* CAM pool handling */
1953 vlan_mac_obj
->get_credit
= bnx2x_get_credit_vlan_mac
;
1954 vlan_mac_obj
->put_credit
= bnx2x_put_credit_vlan_mac
;
1956 * CAM offset is relevant for 57710 and 57711 chips only which have a
1957 * single CAM for both MACs and VLAN-MAC pairs. So the offset
1958 * will be taken from MACs' pool object only.
1960 vlan_mac_obj
->get_cam_offset
= bnx2x_get_cam_offset_mac
;
1961 vlan_mac_obj
->put_cam_offset
= bnx2x_put_cam_offset_mac
;
1963 if (CHIP_IS_E1(bp
)) {
1964 BNX2X_ERR("Do not support chips others than E2\n");
1966 } else if (CHIP_IS_E1H(bp
)) {
1967 vlan_mac_obj
->set_one_rule
= bnx2x_set_one_vlan_mac_e1h
;
1968 vlan_mac_obj
->check_del
= bnx2x_check_vlan_mac_del
;
1969 vlan_mac_obj
->check_add
= bnx2x_check_vlan_mac_add
;
1970 vlan_mac_obj
->check_move
= bnx2x_check_move_always_err
;
1971 vlan_mac_obj
->ramrod_cmd
= RAMROD_CMD_ID_ETH_SET_MAC
;
1974 bnx2x_exe_queue_init(bp
,
1975 &vlan_mac_obj
->exe_queue
, 1, qable_obj
,
1976 bnx2x_validate_vlan_mac
,
1977 bnx2x_optimize_vlan_mac
,
1978 bnx2x_execute_vlan_mac
,
1979 bnx2x_exeq_get_vlan_mac
);
1981 vlan_mac_obj
->set_one_rule
= bnx2x_set_one_vlan_mac_e2
;
1982 vlan_mac_obj
->check_del
= bnx2x_check_vlan_mac_del
;
1983 vlan_mac_obj
->check_add
= bnx2x_check_vlan_mac_add
;
1984 vlan_mac_obj
->check_move
= bnx2x_check_move
;
1985 vlan_mac_obj
->ramrod_cmd
=
1986 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES
;
1989 bnx2x_exe_queue_init(bp
,
1990 &vlan_mac_obj
->exe_queue
,
1991 CLASSIFY_RULES_COUNT
,
1992 qable_obj
, bnx2x_validate_vlan_mac
,
1993 bnx2x_optimize_vlan_mac
,
1994 bnx2x_execute_vlan_mac
,
1995 bnx2x_exeq_get_vlan_mac
);
2000 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2001 static inline void __storm_memset_mac_filters(struct bnx2x
*bp
,
2002 struct tstorm_eth_mac_filter_config
*mac_filters
,
2005 size_t size
= sizeof(struct tstorm_eth_mac_filter_config
);
2007 u32 addr
= BAR_TSTRORM_INTMEM
+
2008 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id
);
2010 __storm_memset_struct(bp
, addr
, size
, (u32
*)mac_filters
);
2013 static int bnx2x_set_rx_mode_e1x(struct bnx2x
*bp
,
2014 struct bnx2x_rx_mode_ramrod_params
*p
)
2016 /* update the bp MAC filter structure */
2017 u32 mask
= (1 << p
->cl_id
);
2019 struct tstorm_eth_mac_filter_config
*mac_filters
=
2020 (struct tstorm_eth_mac_filter_config
*)p
->rdata
;
2022 /* initial seeting is drop-all */
2023 u8 drop_all_ucast
= 1, drop_all_mcast
= 1;
2024 u8 accp_all_ucast
= 0, accp_all_bcast
= 0, accp_all_mcast
= 0;
2025 u8 unmatched_unicast
= 0;
2027 /* In e1x there we only take into account rx acceot flag since tx switching
2029 if (test_bit(BNX2X_ACCEPT_UNICAST
, &p
->rx_accept_flags
))
2030 /* accept matched ucast */
2033 if (test_bit(BNX2X_ACCEPT_MULTICAST
, &p
->rx_accept_flags
))
2034 /* accept matched mcast */
2037 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST
, &p
->rx_accept_flags
)) {
2038 /* accept all mcast */
2042 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST
, &p
->rx_accept_flags
)) {
2043 /* accept all mcast */
2047 if (test_bit(BNX2X_ACCEPT_BROADCAST
, &p
->rx_accept_flags
))
2048 /* accept (all) bcast */
2050 if (test_bit(BNX2X_ACCEPT_UNMATCHED
, &p
->rx_accept_flags
))
2051 /* accept unmatched unicasts */
2052 unmatched_unicast
= 1;
2054 mac_filters
->ucast_drop_all
= drop_all_ucast
?
2055 mac_filters
->ucast_drop_all
| mask
:
2056 mac_filters
->ucast_drop_all
& ~mask
;
2058 mac_filters
->mcast_drop_all
= drop_all_mcast
?
2059 mac_filters
->mcast_drop_all
| mask
:
2060 mac_filters
->mcast_drop_all
& ~mask
;
2062 mac_filters
->ucast_accept_all
= accp_all_ucast
?
2063 mac_filters
->ucast_accept_all
| mask
:
2064 mac_filters
->ucast_accept_all
& ~mask
;
2066 mac_filters
->mcast_accept_all
= accp_all_mcast
?
2067 mac_filters
->mcast_accept_all
| mask
:
2068 mac_filters
->mcast_accept_all
& ~mask
;
2070 mac_filters
->bcast_accept_all
= accp_all_bcast
?
2071 mac_filters
->bcast_accept_all
| mask
:
2072 mac_filters
->bcast_accept_all
& ~mask
;
2074 mac_filters
->unmatched_unicast
= unmatched_unicast
?
2075 mac_filters
->unmatched_unicast
| mask
:
2076 mac_filters
->unmatched_unicast
& ~mask
;
2078 DP(BNX2X_MSG_SP
, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2079 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2080 mac_filters
->ucast_drop_all
,
2081 mac_filters
->mcast_drop_all
,
2082 mac_filters
->ucast_accept_all
,
2083 mac_filters
->mcast_accept_all
,
2084 mac_filters
->bcast_accept_all
);
2086 /* write the MAC filter structure*/
2087 __storm_memset_mac_filters(bp
, mac_filters
, p
->func_id
);
2089 /* The operation is completed */
2090 clear_bit(p
->state
, p
->pstate
);
2091 smp_mb__after_clear_bit();
2096 /* Setup ramrod data */
2097 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid
,
2098 struct eth_classify_header
*hdr
,
2102 hdr
->rule_cnt
= rule_cnt
;
2105 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x
*bp
,
2106 unsigned long accept_flags
,
2107 struct eth_filter_rules_cmd
*cmd
,
2108 bool clear_accept_all
)
2112 /* start with 'drop-all' */
2113 state
= ETH_FILTER_RULES_CMD_UCAST_DROP_ALL
|
2114 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL
;
2117 if (test_bit(BNX2X_ACCEPT_UNICAST
, &accept_flags
))
2118 state
&= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL
;
2120 if (test_bit(BNX2X_ACCEPT_MULTICAST
, &accept_flags
))
2121 state
&= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL
;
2123 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST
, &accept_flags
)) {
2124 state
&= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL
;
2125 state
|= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL
;
2128 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST
, &accept_flags
)) {
2129 state
|= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL
;
2130 state
&= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL
;
2132 if (test_bit(BNX2X_ACCEPT_BROADCAST
, &accept_flags
))
2133 state
|= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL
;
2135 if (test_bit(BNX2X_ACCEPT_UNMATCHED
, &accept_flags
)) {
2136 state
&= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL
;
2137 state
|= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED
;
2139 if (test_bit(BNX2X_ACCEPT_ANY_VLAN
, &accept_flags
))
2140 state
|= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN
;
2143 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2144 if (clear_accept_all
) {
2145 state
&= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL
;
2146 state
&= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL
;
2147 state
&= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL
;
2148 state
&= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED
;
2151 cmd
->state
= cpu_to_le16(state
);
2155 static int bnx2x_set_rx_mode_e2(struct bnx2x
*bp
,
2156 struct bnx2x_rx_mode_ramrod_params
*p
)
2158 struct eth_filter_rules_ramrod_data
*data
= p
->rdata
;
2162 /* Reset the ramrod data buffer */
2163 memset(data
, 0, sizeof(*data
));
2165 /* Setup ramrod data */
2167 /* Tx (internal switching) */
2168 if (test_bit(RAMROD_TX
, &p
->ramrod_flags
)) {
2169 data
->rules
[rule_idx
].client_id
= p
->cl_id
;
2170 data
->rules
[rule_idx
].func_id
= p
->func_id
;
2172 data
->rules
[rule_idx
].cmd_general_data
=
2173 ETH_FILTER_RULES_CMD_TX_CMD
;
2175 bnx2x_rx_mode_set_cmd_state_e2(bp
, p
->tx_accept_flags
,
2176 &(data
->rules
[rule_idx
++]), false);
2180 if (test_bit(RAMROD_RX
, &p
->ramrod_flags
)) {
2181 data
->rules
[rule_idx
].client_id
= p
->cl_id
;
2182 data
->rules
[rule_idx
].func_id
= p
->func_id
;
2184 data
->rules
[rule_idx
].cmd_general_data
=
2185 ETH_FILTER_RULES_CMD_RX_CMD
;
2187 bnx2x_rx_mode_set_cmd_state_e2(bp
, p
->rx_accept_flags
,
2188 &(data
->rules
[rule_idx
++]), false);
2193 * If FCoE Queue configuration has been requested configure the Rx and
2194 * internal switching modes for this queue in separate rules.
2196 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2197 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2199 if (test_bit(BNX2X_RX_MODE_FCOE_ETH
, &p
->rx_mode_flags
)) {
2200 /* Tx (internal switching) */
2201 if (test_bit(RAMROD_TX
, &p
->ramrod_flags
)) {
2202 data
->rules
[rule_idx
].client_id
= bnx2x_fcoe(bp
, cl_id
);
2203 data
->rules
[rule_idx
].func_id
= p
->func_id
;
2205 data
->rules
[rule_idx
].cmd_general_data
=
2206 ETH_FILTER_RULES_CMD_TX_CMD
;
2208 bnx2x_rx_mode_set_cmd_state_e2(bp
, p
->tx_accept_flags
,
2209 &(data
->rules
[rule_idx
++]),
2214 if (test_bit(RAMROD_RX
, &p
->ramrod_flags
)) {
2215 data
->rules
[rule_idx
].client_id
= bnx2x_fcoe(bp
, cl_id
);
2216 data
->rules
[rule_idx
].func_id
= p
->func_id
;
2218 data
->rules
[rule_idx
].cmd_general_data
=
2219 ETH_FILTER_RULES_CMD_RX_CMD
;
2221 bnx2x_rx_mode_set_cmd_state_e2(bp
, p
->rx_accept_flags
,
2222 &(data
->rules
[rule_idx
++]),
2228 * Set the ramrod header (most importantly - number of rules to
2231 bnx2x_rx_mode_set_rdata_hdr_e2(p
->cid
, &data
->header
, rule_idx
);
2233 DP(BNX2X_MSG_SP
, "About to configure %d rules, rx_accept_flags 0x%lx, "
2234 "tx_accept_flags 0x%lx\n",
2235 data
->header
.rule_cnt
, p
->rx_accept_flags
,
2236 p
->tx_accept_flags
);
2239 * No need for an explicit memory barrier here as long we would
2240 * need to ensure the ordering of writing to the SPQ element
2241 * and updating of the SPQ producer which involves a memory
2242 * read and we will have to put a full memory barrier there
2243 * (inside bnx2x_sp_post()).
2247 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_FILTER_RULES
, p
->cid
,
2248 U64_HI(p
->rdata_mapping
),
2249 U64_LO(p
->rdata_mapping
),
2250 ETH_CONNECTION_TYPE
);
2254 /* Ramrod completion is pending */
2258 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x
*bp
,
2259 struct bnx2x_rx_mode_ramrod_params
*p
)
2261 return bnx2x_state_wait(bp
, p
->state
, p
->pstate
);
2264 static int bnx2x_empty_rx_mode_wait(struct bnx2x
*bp
,
2265 struct bnx2x_rx_mode_ramrod_params
*p
)
2271 int bnx2x_config_rx_mode(struct bnx2x
*bp
,
2272 struct bnx2x_rx_mode_ramrod_params
*p
)
2276 /* Configure the new classification in the chip */
2277 rc
= p
->rx_mode_obj
->config_rx_mode(bp
, p
);
2281 /* Wait for a ramrod completion if was requested */
2282 if (test_bit(RAMROD_COMP_WAIT
, &p
->ramrod_flags
)) {
2283 rc
= p
->rx_mode_obj
->wait_comp(bp
, p
);
2291 void bnx2x_init_rx_mode_obj(struct bnx2x
*bp
,
2292 struct bnx2x_rx_mode_obj
*o
)
2294 if (CHIP_IS_E1x(bp
)) {
2295 o
->wait_comp
= bnx2x_empty_rx_mode_wait
;
2296 o
->config_rx_mode
= bnx2x_set_rx_mode_e1x
;
2298 o
->wait_comp
= bnx2x_wait_rx_mode_comp_e2
;
2299 o
->config_rx_mode
= bnx2x_set_rx_mode_e2
;
2303 /********************* Multicast verbs: SET, CLEAR ****************************/
2304 static inline u8
bnx2x_mcast_bin_from_mac(u8
*mac
)
2306 return (crc32c_le(0, mac
, ETH_ALEN
) >> 24) & 0xff;
2309 struct bnx2x_mcast_mac_elem
{
2310 struct list_head link
;
2312 u8 pad
[2]; /* For a natural alignment of the following buffer */
2315 struct bnx2x_pending_mcast_cmd
{
2316 struct list_head link
;
2317 int type
; /* BNX2X_MCAST_CMD_X */
2319 struct list_head macs_head
;
2320 u32 macs_num
; /* Needed for DEL command */
2321 int next_bin
; /* Needed for RESTORE flow with aprox match */
2324 bool done
; /* set to true, when the command has been handled,
2325 * practically used in 57712 handling only, where one pending
2326 * command may be handled in a few operations. As long as for
2327 * other chips every operation handling is completed in a
2328 * single ramrod, there is no need to utilize this field.
2332 static int bnx2x_mcast_wait(struct bnx2x
*bp
,
2333 struct bnx2x_mcast_obj
*o
)
2335 if (bnx2x_state_wait(bp
, o
->sched_state
, o
->raw
.pstate
) ||
2336 o
->raw
.wait_comp(bp
, &o
->raw
))
2342 static int bnx2x_mcast_enqueue_cmd(struct bnx2x
*bp
,
2343 struct bnx2x_mcast_obj
*o
,
2344 struct bnx2x_mcast_ramrod_params
*p
,
2348 struct bnx2x_pending_mcast_cmd
*new_cmd
;
2349 struct bnx2x_mcast_mac_elem
*cur_mac
= NULL
;
2350 struct bnx2x_mcast_list_elem
*pos
;
2351 int macs_list_len
= ((cmd
== BNX2X_MCAST_CMD_ADD
) ?
2352 p
->mcast_list_len
: 0);
2354 /* If the command is empty ("handle pending commands only"), break */
2355 if (!p
->mcast_list_len
)
2358 total_sz
= sizeof(*new_cmd
) +
2359 macs_list_len
* sizeof(struct bnx2x_mcast_mac_elem
);
2361 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2362 new_cmd
= kzalloc(total_sz
, GFP_ATOMIC
);
2367 DP(BNX2X_MSG_SP
, "About to enqueue a new %d command. "
2368 "macs_list_len=%d\n", cmd
, macs_list_len
);
2370 INIT_LIST_HEAD(&new_cmd
->data
.macs_head
);
2372 new_cmd
->type
= cmd
;
2373 new_cmd
->done
= false;
2376 case BNX2X_MCAST_CMD_ADD
:
2377 cur_mac
= (struct bnx2x_mcast_mac_elem
*)
2378 ((u8
*)new_cmd
+ sizeof(*new_cmd
));
2380 /* Push the MACs of the current command into the pendig command
2383 list_for_each_entry(pos
, &p
->mcast_list
, link
) {
2384 memcpy(cur_mac
->mac
, pos
->mac
, ETH_ALEN
);
2385 list_add_tail(&cur_mac
->link
, &new_cmd
->data
.macs_head
);
2391 case BNX2X_MCAST_CMD_DEL
:
2392 new_cmd
->data
.macs_num
= p
->mcast_list_len
;
2395 case BNX2X_MCAST_CMD_RESTORE
:
2396 new_cmd
->data
.next_bin
= 0;
2400 BNX2X_ERR("Unknown command: %d\n", cmd
);
2404 /* Push the new pending command to the tail of the pending list: FIFO */
2405 list_add_tail(&new_cmd
->link
, &o
->pending_cmds_head
);
2413 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2416 * @last: index to start looking from (including)
2418 * returns the next found (set) bin or a negative value if none is found.
2420 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj
*o
, int last
)
2422 int i
, j
, inner_start
= last
% BIT_VEC64_ELEM_SZ
;
2424 for (i
= last
/ BIT_VEC64_ELEM_SZ
; i
< BNX2X_MCAST_VEC_SZ
; i
++) {
2425 if (o
->registry
.aprox_match
.vec
[i
])
2426 for (j
= inner_start
; j
< BIT_VEC64_ELEM_SZ
; j
++) {
2427 int cur_bit
= j
+ BIT_VEC64_ELEM_SZ
* i
;
2428 if (BIT_VEC64_TEST_BIT(o
->registry
.aprox_match
.
2441 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2445 * returns the index of the found bin or -1 if none is found
2447 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj
*o
)
2449 int cur_bit
= bnx2x_mcast_get_next_bin(o
, 0);
2452 BIT_VEC64_CLEAR_BIT(o
->registry
.aprox_match
.vec
, cur_bit
);
2457 static inline u8
bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj
*o
)
2459 struct bnx2x_raw_obj
*raw
= &o
->raw
;
2462 if ((raw
->obj_type
== BNX2X_OBJ_TYPE_TX
) ||
2463 (raw
->obj_type
== BNX2X_OBJ_TYPE_RX_TX
))
2464 rx_tx_flag
|= ETH_MULTICAST_RULES_CMD_TX_CMD
;
2466 if ((raw
->obj_type
== BNX2X_OBJ_TYPE_RX
) ||
2467 (raw
->obj_type
== BNX2X_OBJ_TYPE_RX_TX
))
2468 rx_tx_flag
|= ETH_MULTICAST_RULES_CMD_RX_CMD
;
2473 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x
*bp
,
2474 struct bnx2x_mcast_obj
*o
, int idx
,
2475 union bnx2x_mcast_config_data
*cfg_data
,
2478 struct bnx2x_raw_obj
*r
= &o
->raw
;
2479 struct eth_multicast_rules_ramrod_data
*data
=
2480 (struct eth_multicast_rules_ramrod_data
*)(r
->rdata
);
2481 u8 func_id
= r
->func_id
;
2482 u8 rx_tx_add_flag
= bnx2x_mcast_get_rx_tx_flag(o
);
2485 if ((cmd
== BNX2X_MCAST_CMD_ADD
) || (cmd
== BNX2X_MCAST_CMD_RESTORE
))
2486 rx_tx_add_flag
|= ETH_MULTICAST_RULES_CMD_IS_ADD
;
2488 data
->rules
[idx
].cmd_general_data
|= rx_tx_add_flag
;
2490 /* Get a bin and update a bins' vector */
2492 case BNX2X_MCAST_CMD_ADD
:
2493 bin
= bnx2x_mcast_bin_from_mac(cfg_data
->mac
);
2494 BIT_VEC64_SET_BIT(o
->registry
.aprox_match
.vec
, bin
);
2497 case BNX2X_MCAST_CMD_DEL
:
2498 /* If there were no more bins to clear
2499 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2500 * clear any (0xff) bin.
2501 * See bnx2x_mcast_validate_e2() for explanation when it may
2504 bin
= bnx2x_mcast_clear_first_bin(o
);
2507 case BNX2X_MCAST_CMD_RESTORE
:
2508 bin
= cfg_data
->bin
;
2512 BNX2X_ERR("Unknown command: %d\n", cmd
);
2516 DP(BNX2X_MSG_SP
, "%s bin %d\n",
2517 ((rx_tx_add_flag
& ETH_MULTICAST_RULES_CMD_IS_ADD
) ?
2518 "Setting" : "Clearing"), bin
);
2520 data
->rules
[idx
].bin_id
= (u8
)bin
;
2521 data
->rules
[idx
].func_id
= func_id
;
2522 data
->rules
[idx
].engine_id
= o
->engine_id
;
2526 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2528 * @bp: device handle
2530 * @start_bin: index in the registry to start from (including)
2531 * @rdata_idx: index in the ramrod data to start from
2533 * returns last handled bin index or -1 if all bins have been handled
2535 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2536 struct bnx2x
*bp
, struct bnx2x_mcast_obj
*o
, int start_bin
,
2539 int cur_bin
, cnt
= *rdata_idx
;
2540 union bnx2x_mcast_config_data cfg_data
= {0};
2542 /* go through the registry and configure the bins from it */
2543 for (cur_bin
= bnx2x_mcast_get_next_bin(o
, start_bin
); cur_bin
>= 0;
2544 cur_bin
= bnx2x_mcast_get_next_bin(o
, cur_bin
+ 1)) {
2546 cfg_data
.bin
= (u8
)cur_bin
;
2547 o
->set_one_rule(bp
, o
, cnt
, &cfg_data
,
2548 BNX2X_MCAST_CMD_RESTORE
);
2552 DP(BNX2X_MSG_SP
, "About to configure a bin %d\n", cur_bin
);
2554 /* Break if we reached the maximum number
2557 if (cnt
>= o
->max_cmd_len
)
2566 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x
*bp
,
2567 struct bnx2x_mcast_obj
*o
, struct bnx2x_pending_mcast_cmd
*cmd_pos
,
2570 struct bnx2x_mcast_mac_elem
*pmac_pos
, *pmac_pos_n
;
2571 int cnt
= *line_idx
;
2572 union bnx2x_mcast_config_data cfg_data
= {0};
2574 list_for_each_entry_safe(pmac_pos
, pmac_pos_n
, &cmd_pos
->data
.macs_head
,
2577 cfg_data
.mac
= &pmac_pos
->mac
[0];
2578 o
->set_one_rule(bp
, o
, cnt
, &cfg_data
, cmd_pos
->type
);
2582 DP(BNX2X_MSG_SP
, "About to configure "BNX2X_MAC_FMT
2584 BNX2X_MAC_PRN_LIST(pmac_pos
->mac
));
2586 list_del(&pmac_pos
->link
);
2588 /* Break if we reached the maximum number
2591 if (cnt
>= o
->max_cmd_len
)
2597 /* if no more MACs to configure - we are done */
2598 if (list_empty(&cmd_pos
->data
.macs_head
))
2599 cmd_pos
->done
= true;
2602 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x
*bp
,
2603 struct bnx2x_mcast_obj
*o
, struct bnx2x_pending_mcast_cmd
*cmd_pos
,
2606 int cnt
= *line_idx
;
2608 while (cmd_pos
->data
.macs_num
) {
2609 o
->set_one_rule(bp
, o
, cnt
, NULL
, cmd_pos
->type
);
2613 cmd_pos
->data
.macs_num
--;
2615 DP(BNX2X_MSG_SP
, "Deleting MAC. %d left,cnt is %d\n",
2616 cmd_pos
->data
.macs_num
, cnt
);
2618 /* Break if we reached the maximum
2621 if (cnt
>= o
->max_cmd_len
)
2627 /* If we cleared all bins - we are done */
2628 if (!cmd_pos
->data
.macs_num
)
2629 cmd_pos
->done
= true;
2632 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x
*bp
,
2633 struct bnx2x_mcast_obj
*o
, struct bnx2x_pending_mcast_cmd
*cmd_pos
,
2636 cmd_pos
->data
.next_bin
= o
->hdl_restore(bp
, o
, cmd_pos
->data
.next_bin
,
2639 if (cmd_pos
->data
.next_bin
< 0)
2640 /* If o->set_restore returned -1 we are done */
2641 cmd_pos
->done
= true;
2643 /* Start from the next bin next time */
2644 cmd_pos
->data
.next_bin
++;
2647 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x
*bp
,
2648 struct bnx2x_mcast_ramrod_params
*p
)
2650 struct bnx2x_pending_mcast_cmd
*cmd_pos
, *cmd_pos_n
;
2652 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
2654 list_for_each_entry_safe(cmd_pos
, cmd_pos_n
, &o
->pending_cmds_head
,
2656 switch (cmd_pos
->type
) {
2657 case BNX2X_MCAST_CMD_ADD
:
2658 bnx2x_mcast_hdl_pending_add_e2(bp
, o
, cmd_pos
, &cnt
);
2661 case BNX2X_MCAST_CMD_DEL
:
2662 bnx2x_mcast_hdl_pending_del_e2(bp
, o
, cmd_pos
, &cnt
);
2665 case BNX2X_MCAST_CMD_RESTORE
:
2666 bnx2x_mcast_hdl_pending_restore_e2(bp
, o
, cmd_pos
,
2671 BNX2X_ERR("Unknown command: %d\n", cmd_pos
->type
);
2675 /* If the command has been completed - remove it from the list
2676 * and free the memory
2678 if (cmd_pos
->done
) {
2679 list_del(&cmd_pos
->link
);
2683 /* Break if we reached the maximum number of rules */
2684 if (cnt
>= o
->max_cmd_len
)
2691 static inline void bnx2x_mcast_hdl_add(struct bnx2x
*bp
,
2692 struct bnx2x_mcast_obj
*o
, struct bnx2x_mcast_ramrod_params
*p
,
2695 struct bnx2x_mcast_list_elem
*mlist_pos
;
2696 union bnx2x_mcast_config_data cfg_data
= {0};
2697 int cnt
= *line_idx
;
2699 list_for_each_entry(mlist_pos
, &p
->mcast_list
, link
) {
2700 cfg_data
.mac
= mlist_pos
->mac
;
2701 o
->set_one_rule(bp
, o
, cnt
, &cfg_data
, BNX2X_MCAST_CMD_ADD
);
2705 DP(BNX2X_MSG_SP
, "About to configure "BNX2X_MAC_FMT
2707 BNX2X_MAC_PRN_LIST(mlist_pos
->mac
));
2713 static inline void bnx2x_mcast_hdl_del(struct bnx2x
*bp
,
2714 struct bnx2x_mcast_obj
*o
, struct bnx2x_mcast_ramrod_params
*p
,
2717 int cnt
= *line_idx
, i
;
2719 for (i
= 0; i
< p
->mcast_list_len
; i
++) {
2720 o
->set_one_rule(bp
, o
, cnt
, NULL
, BNX2X_MCAST_CMD_DEL
);
2724 DP(BNX2X_MSG_SP
, "Deleting MAC. %d left\n",
2725 p
->mcast_list_len
- i
- 1);
2732 * bnx2x_mcast_handle_current_cmd -
2734 * @bp: device handle
2737 * @start_cnt: first line in the ramrod data that may be used
2739 * This function is called iff there is enough place for the current command in
2741 * Returns number of lines filled in the ramrod data in total.
2743 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x
*bp
,
2744 struct bnx2x_mcast_ramrod_params
*p
, int cmd
,
2747 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
2748 int cnt
= start_cnt
;
2750 DP(BNX2X_MSG_SP
, "p->mcast_list_len=%d\n", p
->mcast_list_len
);
2753 case BNX2X_MCAST_CMD_ADD
:
2754 bnx2x_mcast_hdl_add(bp
, o
, p
, &cnt
);
2757 case BNX2X_MCAST_CMD_DEL
:
2758 bnx2x_mcast_hdl_del(bp
, o
, p
, &cnt
);
2761 case BNX2X_MCAST_CMD_RESTORE
:
2762 o
->hdl_restore(bp
, o
, 0, &cnt
);
2766 BNX2X_ERR("Unknown command: %d\n", cmd
);
2770 /* The current command has been handled */
2771 p
->mcast_list_len
= 0;
2776 static int bnx2x_mcast_validate_e2(struct bnx2x
*bp
,
2777 struct bnx2x_mcast_ramrod_params
*p
,
2780 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
2781 int reg_sz
= o
->get_registry_size(o
);
2784 /* DEL command deletes all currently configured MACs */
2785 case BNX2X_MCAST_CMD_DEL
:
2786 o
->set_registry_size(o
, 0);
2789 /* RESTORE command will restore the entire multicast configuration */
2790 case BNX2X_MCAST_CMD_RESTORE
:
2791 /* Here we set the approximate amount of work to do, which in
2792 * fact may be only less as some MACs in postponed ADD
2793 * command(s) scheduled before this command may fall into
2794 * the same bin and the actual number of bins set in the
2795 * registry would be less than we estimated here. See
2796 * bnx2x_mcast_set_one_rule_e2() for further details.
2798 p
->mcast_list_len
= reg_sz
;
2801 case BNX2X_MCAST_CMD_ADD
:
2802 case BNX2X_MCAST_CMD_CONT
:
2803 /* Here we assume that all new MACs will fall into new bins.
2804 * However we will correct the real registry size after we
2805 * handle all pending commands.
2807 o
->set_registry_size(o
, reg_sz
+ p
->mcast_list_len
);
2811 BNX2X_ERR("Unknown command: %d\n", cmd
);
2816 /* Increase the total number of MACs pending to be configured */
2817 o
->total_pending_num
+= p
->mcast_list_len
;
2822 static void bnx2x_mcast_revert_e2(struct bnx2x
*bp
,
2823 struct bnx2x_mcast_ramrod_params
*p
,
2826 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
2828 o
->set_registry_size(o
, old_num_bins
);
2829 o
->total_pending_num
-= p
->mcast_list_len
;
2833 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2835 * @bp: device handle
2837 * @len: number of rules to handle
2839 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x
*bp
,
2840 struct bnx2x_mcast_ramrod_params
*p
,
2843 struct bnx2x_raw_obj
*r
= &p
->mcast_obj
->raw
;
2844 struct eth_multicast_rules_ramrod_data
*data
=
2845 (struct eth_multicast_rules_ramrod_data
*)(r
->rdata
);
2847 data
->header
.echo
= ((r
->cid
& BNX2X_SWCID_MASK
) |
2848 (BNX2X_FILTER_MCAST_PENDING
<< BNX2X_SWCID_SHIFT
));
2849 data
->header
.rule_cnt
= len
;
2853 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2855 * @bp: device handle
2858 * Recalculate the actual number of set bins in the registry using Brian
2859 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2861 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2863 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x
*bp
,
2864 struct bnx2x_mcast_obj
*o
)
2869 for (i
= 0; i
< BNX2X_MCAST_VEC_SZ
; i
++) {
2870 elem
= o
->registry
.aprox_match
.vec
[i
];
2875 o
->set_registry_size(o
, cnt
);
2880 static int bnx2x_mcast_setup_e2(struct bnx2x
*bp
,
2881 struct bnx2x_mcast_ramrod_params
*p
,
2884 struct bnx2x_raw_obj
*raw
= &p
->mcast_obj
->raw
;
2885 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
2886 struct eth_multicast_rules_ramrod_data
*data
=
2887 (struct eth_multicast_rules_ramrod_data
*)(raw
->rdata
);
2890 /* Reset the ramrod data buffer */
2891 memset(data
, 0, sizeof(*data
));
2893 cnt
= bnx2x_mcast_handle_pending_cmds_e2(bp
, p
);
2895 /* If there are no more pending commands - clear SCHEDULED state */
2896 if (list_empty(&o
->pending_cmds_head
))
2899 /* The below may be true iff there was enough room in ramrod
2900 * data for all pending commands and for the current
2901 * command. Otherwise the current command would have been added
2902 * to the pending commands and p->mcast_list_len would have been
2905 if (p
->mcast_list_len
> 0)
2906 cnt
= bnx2x_mcast_handle_current_cmd(bp
, p
, cmd
, cnt
);
2908 /* We've pulled out some MACs - update the total number of
2911 o
->total_pending_num
-= cnt
;
2914 WARN_ON(o
->total_pending_num
< 0);
2915 WARN_ON(cnt
> o
->max_cmd_len
);
2917 bnx2x_mcast_set_rdata_hdr_e2(bp
, p
, (u8
)cnt
);
2919 /* Update a registry size if there are no more pending operations.
2921 * We don't want to change the value of the registry size if there are
2922 * pending operations because we want it to always be equal to the
2923 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2924 * set bins after the last requested operation in order to properly
2925 * evaluate the size of the next DEL/RESTORE operation.
2927 * Note that we update the registry itself during command(s) handling
2928 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2929 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2930 * with a limited amount of update commands (per MAC/bin) and we don't
2931 * know in this scope what the actual state of bins configuration is
2932 * going to be after this ramrod.
2934 if (!o
->total_pending_num
)
2935 bnx2x_mcast_refresh_registry_e2(bp
, o
);
2938 * If CLEAR_ONLY was requested - don't send a ramrod and clear
2939 * RAMROD_PENDING status immediately.
2941 if (test_bit(RAMROD_DRV_CLR_ONLY
, &p
->ramrod_flags
)) {
2942 raw
->clear_pending(raw
);
2946 * No need for an explicit memory barrier here as long we would
2947 * need to ensure the ordering of writing to the SPQ element
2948 * and updating of the SPQ producer which involves a memory
2949 * read and we will have to put a full memory barrier there
2950 * (inside bnx2x_sp_post()).
2954 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_MULTICAST_RULES
,
2955 raw
->cid
, U64_HI(raw
->rdata_mapping
),
2956 U64_LO(raw
->rdata_mapping
),
2957 ETH_CONNECTION_TYPE
);
2961 /* Ramrod completion is pending */
2966 static int bnx2x_mcast_validate_e1h(struct bnx2x
*bp
,
2967 struct bnx2x_mcast_ramrod_params
*p
,
2970 /* Mark, that there is a work to do */
2971 if ((cmd
== BNX2X_MCAST_CMD_DEL
) || (cmd
== BNX2X_MCAST_CMD_RESTORE
))
2972 p
->mcast_list_len
= 1;
2977 static void bnx2x_mcast_revert_e1h(struct bnx2x
*bp
,
2978 struct bnx2x_mcast_ramrod_params
*p
,
2984 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
2986 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2989 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x
*bp
,
2990 struct bnx2x_mcast_obj
*o
,
2991 struct bnx2x_mcast_ramrod_params
*p
,
2994 struct bnx2x_mcast_list_elem
*mlist_pos
;
2997 list_for_each_entry(mlist_pos
, &p
->mcast_list
, link
) {
2998 bit
= bnx2x_mcast_bin_from_mac(mlist_pos
->mac
);
2999 BNX2X_57711_SET_MC_FILTER(mc_filter
, bit
);
3001 DP(BNX2X_MSG_SP
, "About to configure "
3002 BNX2X_MAC_FMT
" mcast MAC, bin %d\n",
3003 BNX2X_MAC_PRN_LIST(mlist_pos
->mac
), bit
);
3005 /* bookkeeping... */
3006 BIT_VEC64_SET_BIT(o
->registry
.aprox_match
.vec
,
3011 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x
*bp
,
3012 struct bnx2x_mcast_obj
*o
, struct bnx2x_mcast_ramrod_params
*p
,
3017 for (bit
= bnx2x_mcast_get_next_bin(o
, 0);
3019 bit
= bnx2x_mcast_get_next_bin(o
, bit
+ 1)) {
3020 BNX2X_57711_SET_MC_FILTER(mc_filter
, bit
);
3021 DP(BNX2X_MSG_SP
, "About to set bin %d\n", bit
);
3025 /* On 57711 we write the multicast MACs' aproximate match
3026 * table by directly into the TSTORM's internal RAM. So we don't
3027 * really need to handle any tricks to make it work.
3029 static int bnx2x_mcast_setup_e1h(struct bnx2x
*bp
,
3030 struct bnx2x_mcast_ramrod_params
*p
,
3034 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
3035 struct bnx2x_raw_obj
*r
= &o
->raw
;
3037 /* If CLEAR_ONLY has been requested - clear the registry
3038 * and clear a pending bit.
3040 if (!test_bit(RAMROD_DRV_CLR_ONLY
, &p
->ramrod_flags
)) {
3041 u32 mc_filter
[MC_HASH_SIZE
] = {0};
3043 /* Set the multicast filter bits before writing it into
3044 * the internal memory.
3047 case BNX2X_MCAST_CMD_ADD
:
3048 bnx2x_mcast_hdl_add_e1h(bp
, o
, p
, mc_filter
);
3051 case BNX2X_MCAST_CMD_DEL
:
3052 DP(BNX2X_MSG_SP
, "Invalidating multicast "
3053 "MACs configuration\n");
3055 /* clear the registry */
3056 memset(o
->registry
.aprox_match
.vec
, 0,
3057 sizeof(o
->registry
.aprox_match
.vec
));
3060 case BNX2X_MCAST_CMD_RESTORE
:
3061 bnx2x_mcast_hdl_restore_e1h(bp
, o
, p
, mc_filter
);
3065 BNX2X_ERR("Unknown command: %d\n", cmd
);
3069 /* Set the mcast filter in the internal memory */
3070 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
3071 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), mc_filter
[i
]);
3073 /* clear the registry */
3074 memset(o
->registry
.aprox_match
.vec
, 0,
3075 sizeof(o
->registry
.aprox_match
.vec
));
3078 r
->clear_pending(r
);
3083 static int bnx2x_mcast_validate_e1(struct bnx2x
*bp
,
3084 struct bnx2x_mcast_ramrod_params
*p
,
3087 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
3088 int reg_sz
= o
->get_registry_size(o
);
3091 /* DEL command deletes all currently configured MACs */
3092 case BNX2X_MCAST_CMD_DEL
:
3093 o
->set_registry_size(o
, 0);
3096 /* RESTORE command will restore the entire multicast configuration */
3097 case BNX2X_MCAST_CMD_RESTORE
:
3098 p
->mcast_list_len
= reg_sz
;
3099 DP(BNX2X_MSG_SP
, "Command %d, p->mcast_list_len=%d\n",
3100 cmd
, p
->mcast_list_len
);
3103 case BNX2X_MCAST_CMD_ADD
:
3104 case BNX2X_MCAST_CMD_CONT
:
3105 /* Multicast MACs on 57710 are configured as unicast MACs and
3106 * there is only a limited number of CAM entries for that
3109 if (p
->mcast_list_len
> o
->max_cmd_len
) {
3110 BNX2X_ERR("Can't configure more than %d multicast MACs"
3111 "on 57710\n", o
->max_cmd_len
);
3114 /* Every configured MAC should be cleared if DEL command is
3115 * called. Only the last ADD command is relevant as long as
3116 * every ADD commands overrides the previous configuration.
3118 DP(BNX2X_MSG_SP
, "p->mcast_list_len=%d\n", p
->mcast_list_len
);
3119 if (p
->mcast_list_len
> 0)
3120 o
->set_registry_size(o
, p
->mcast_list_len
);
3125 BNX2X_ERR("Unknown command: %d\n", cmd
);
3130 /* We want to ensure that commands are executed one by one for 57710.
3131 * Therefore each none-empty command will consume o->max_cmd_len.
3133 if (p
->mcast_list_len
)
3134 o
->total_pending_num
+= o
->max_cmd_len
;
3139 static void bnx2x_mcast_revert_e1(struct bnx2x
*bp
,
3140 struct bnx2x_mcast_ramrod_params
*p
,
3143 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
3145 o
->set_registry_size(o
, old_num_macs
);
3147 /* If current command hasn't been handled yet and we are
3148 * here means that it's meant to be dropped and we have to
3149 * update the number of outstandling MACs accordingly.
3151 if (p
->mcast_list_len
)
3152 o
->total_pending_num
-= o
->max_cmd_len
;
3155 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x
*bp
,
3156 struct bnx2x_mcast_obj
*o
, int idx
,
3157 union bnx2x_mcast_config_data
*cfg_data
,
3160 struct bnx2x_raw_obj
*r
= &o
->raw
;
3161 struct mac_configuration_cmd
*data
=
3162 (struct mac_configuration_cmd
*)(r
->rdata
);
3165 if ((cmd
== BNX2X_MCAST_CMD_ADD
) || (cmd
== BNX2X_MCAST_CMD_RESTORE
)) {
3166 bnx2x_set_fw_mac_addr(&data
->config_table
[idx
].msb_mac_addr
,
3167 &data
->config_table
[idx
].middle_mac_addr
,
3168 &data
->config_table
[idx
].lsb_mac_addr
,
3171 data
->config_table
[idx
].vlan_id
= 0;
3172 data
->config_table
[idx
].pf_id
= r
->func_id
;
3173 data
->config_table
[idx
].clients_bit_vector
=
3174 cpu_to_le32(1 << r
->cl_id
);
3176 SET_FLAG(data
->config_table
[idx
].flags
,
3177 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
3178 T_ETH_MAC_COMMAND_SET
);
3183 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3185 * @bp: device handle
3187 * @len: number of rules to handle
3189 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x
*bp
,
3190 struct bnx2x_mcast_ramrod_params
*p
,
3193 struct bnx2x_raw_obj
*r
= &p
->mcast_obj
->raw
;
3194 struct mac_configuration_cmd
*data
=
3195 (struct mac_configuration_cmd
*)(r
->rdata
);
3197 u8 offset
= (CHIP_REV_IS_SLOW(bp
) ?
3198 BNX2X_MAX_EMUL_MULTI
*(1 + r
->func_id
) :
3199 BNX2X_MAX_MULTICAST
*(1 + r
->func_id
));
3201 data
->hdr
.offset
= offset
;
3202 data
->hdr
.client_id
= 0xff;
3203 data
->hdr
.echo
= ((r
->cid
& BNX2X_SWCID_MASK
) |
3204 (BNX2X_FILTER_MCAST_PENDING
<< BNX2X_SWCID_SHIFT
));
3205 data
->hdr
.length
= len
;
3209 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3211 * @bp: device handle
3213 * @start_idx: index in the registry to start from
3214 * @rdata_idx: index in the ramrod data to start from
3216 * restore command for 57710 is like all other commands - always a stand alone
3217 * command - start_idx and rdata_idx will always be 0. This function will always
3219 * returns -1 to comply with 57712 variant.
3221 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3222 struct bnx2x
*bp
, struct bnx2x_mcast_obj
*o
, int start_idx
,
3225 struct bnx2x_mcast_mac_elem
*elem
;
3227 union bnx2x_mcast_config_data cfg_data
= {0};
3229 /* go through the registry and configure the MACs from it. */
3230 list_for_each_entry(elem
, &o
->registry
.exact_match
.macs
, link
) {
3231 cfg_data
.mac
= &elem
->mac
[0];
3232 o
->set_one_rule(bp
, o
, i
, &cfg_data
, BNX2X_MCAST_CMD_RESTORE
);
3236 DP(BNX2X_MSG_SP
, "About to configure "BNX2X_MAC_FMT
3238 BNX2X_MAC_PRN_LIST(cfg_data
.mac
));
3247 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3248 struct bnx2x
*bp
, struct bnx2x_mcast_ramrod_params
*p
)
3250 struct bnx2x_pending_mcast_cmd
*cmd_pos
;
3251 struct bnx2x_mcast_mac_elem
*pmac_pos
;
3252 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
3253 union bnx2x_mcast_config_data cfg_data
= {0};
3257 /* If nothing to be done - return */
3258 if (list_empty(&o
->pending_cmds_head
))
3261 /* Handle the first command */
3262 cmd_pos
= list_first_entry(&o
->pending_cmds_head
,
3263 struct bnx2x_pending_mcast_cmd
, link
);
3265 switch (cmd_pos
->type
) {
3266 case BNX2X_MCAST_CMD_ADD
:
3267 list_for_each_entry(pmac_pos
, &cmd_pos
->data
.macs_head
, link
) {
3268 cfg_data
.mac
= &pmac_pos
->mac
[0];
3269 o
->set_one_rule(bp
, o
, cnt
, &cfg_data
, cmd_pos
->type
);
3273 DP(BNX2X_MSG_SP
, "About to configure "BNX2X_MAC_FMT
3275 BNX2X_MAC_PRN_LIST(pmac_pos
->mac
));
3279 case BNX2X_MCAST_CMD_DEL
:
3280 cnt
= cmd_pos
->data
.macs_num
;
3281 DP(BNX2X_MSG_SP
, "About to delete %d multicast MACs\n", cnt
);
3284 case BNX2X_MCAST_CMD_RESTORE
:
3285 o
->hdl_restore(bp
, o
, 0, &cnt
);
3289 BNX2X_ERR("Unknown command: %d\n", cmd_pos
->type
);
3293 list_del(&cmd_pos
->link
);
3300 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3307 static inline void bnx2x_get_fw_mac_addr(__le16
*fw_hi
, __le16
*fw_mid
,
3308 __le16
*fw_lo
, u8
*mac
)
3310 mac
[1] = ((u8
*)fw_hi
)[0];
3311 mac
[0] = ((u8
*)fw_hi
)[1];
3312 mac
[3] = ((u8
*)fw_mid
)[0];
3313 mac
[2] = ((u8
*)fw_mid
)[1];
3314 mac
[5] = ((u8
*)fw_lo
)[0];
3315 mac
[4] = ((u8
*)fw_lo
)[1];
3319 * bnx2x_mcast_refresh_registry_e1 -
3321 * @bp: device handle
3324 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3325 * and update the registry correspondingly: if ADD - allocate a memory and add
3326 * the entries to the registry (list), if DELETE - clear the registry and free
3329 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x
*bp
,
3330 struct bnx2x_mcast_obj
*o
)
3332 struct bnx2x_raw_obj
*raw
= &o
->raw
;
3333 struct bnx2x_mcast_mac_elem
*elem
;
3334 struct mac_configuration_cmd
*data
=
3335 (struct mac_configuration_cmd
*)(raw
->rdata
);
3337 /* If first entry contains a SET bit - the command was ADD,
3338 * otherwise - DEL_ALL
3340 if (GET_FLAG(data
->config_table
[0].flags
,
3341 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
)) {
3342 int i
, len
= data
->hdr
.length
;
3344 /* Break if it was a RESTORE command */
3345 if (!list_empty(&o
->registry
.exact_match
.macs
))
3348 elem
= kzalloc(sizeof(*elem
)*len
, GFP_ATOMIC
);
3350 BNX2X_ERR("Failed to allocate registry memory\n");
3354 for (i
= 0; i
< len
; i
++, elem
++) {
3355 bnx2x_get_fw_mac_addr(
3356 &data
->config_table
[i
].msb_mac_addr
,
3357 &data
->config_table
[i
].middle_mac_addr
,
3358 &data
->config_table
[i
].lsb_mac_addr
,
3360 DP(BNX2X_MSG_SP
, "Adding registry entry for ["
3362 BNX2X_MAC_PRN_LIST(elem
->mac
));
3363 list_add_tail(&elem
->link
,
3364 &o
->registry
.exact_match
.macs
);
3367 elem
= list_first_entry(&o
->registry
.exact_match
.macs
,
3368 struct bnx2x_mcast_mac_elem
, link
);
3369 DP(BNX2X_MSG_SP
, "Deleting a registry\n");
3371 INIT_LIST_HEAD(&o
->registry
.exact_match
.macs
);
3377 static int bnx2x_mcast_setup_e1(struct bnx2x
*bp
,
3378 struct bnx2x_mcast_ramrod_params
*p
,
3381 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
3382 struct bnx2x_raw_obj
*raw
= &o
->raw
;
3383 struct mac_configuration_cmd
*data
=
3384 (struct mac_configuration_cmd
*)(raw
->rdata
);
3387 /* Reset the ramrod data buffer */
3388 memset(data
, 0, sizeof(*data
));
3390 /* First set all entries as invalid */
3391 for (i
= 0; i
< o
->max_cmd_len
; i
++)
3392 SET_FLAG(data
->config_table
[i
].flags
,
3393 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
3394 T_ETH_MAC_COMMAND_INVALIDATE
);
3396 /* Handle pending commands first */
3397 cnt
= bnx2x_mcast_handle_pending_cmds_e1(bp
, p
);
3399 /* If there are no more pending commands - clear SCHEDULED state */
3400 if (list_empty(&o
->pending_cmds_head
))
3403 /* The below may be true iff there were no pending commands */
3405 cnt
= bnx2x_mcast_handle_current_cmd(bp
, p
, cmd
, 0);
3407 /* For 57710 every command has o->max_cmd_len length to ensure that
3408 * commands are done one at a time.
3410 o
->total_pending_num
-= o
->max_cmd_len
;
3414 WARN_ON(cnt
> o
->max_cmd_len
);
3416 /* Set ramrod header (in particular, a number of entries to update) */
3417 bnx2x_mcast_set_rdata_hdr_e1(bp
, p
, (u8
)cnt
);
3419 /* update a registry: we need the registry contents to be always up
3420 * to date in order to be able to execute a RESTORE opcode. Here
3421 * we use the fact that for 57710 we sent one command at a time
3422 * hence we may take the registry update out of the command handling
3423 * and do it in a simpler way here.
3425 rc
= bnx2x_mcast_refresh_registry_e1(bp
, o
);
3430 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3431 * RAMROD_PENDING status immediately.
3433 if (test_bit(RAMROD_DRV_CLR_ONLY
, &p
->ramrod_flags
)) {
3434 raw
->clear_pending(raw
);
3438 * No need for an explicit memory barrier here as long we would
3439 * need to ensure the ordering of writing to the SPQ element
3440 * and updating of the SPQ producer which involves a memory
3441 * read and we will have to put a full memory barrier there
3442 * (inside bnx2x_sp_post()).
3446 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, raw
->cid
,
3447 U64_HI(raw
->rdata_mapping
),
3448 U64_LO(raw
->rdata_mapping
),
3449 ETH_CONNECTION_TYPE
);
3453 /* Ramrod completion is pending */
3459 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj
*o
)
3461 return o
->registry
.exact_match
.num_macs_set
;
3464 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj
*o
)
3466 return o
->registry
.aprox_match
.num_bins_set
;
3469 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj
*o
,
3472 o
->registry
.exact_match
.num_macs_set
= n
;
3475 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj
*o
,
3478 o
->registry
.aprox_match
.num_bins_set
= n
;
3481 int bnx2x_config_mcast(struct bnx2x
*bp
,
3482 struct bnx2x_mcast_ramrod_params
*p
,
3485 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
3486 struct bnx2x_raw_obj
*r
= &o
->raw
;
3487 int rc
= 0, old_reg_size
;
3489 /* This is needed to recover number of currently configured mcast macs
3490 * in case of failure.
3492 old_reg_size
= o
->get_registry_size(o
);
3494 /* Do some calculations and checks */
3495 rc
= o
->validate(bp
, p
, cmd
);
3499 /* Return if there is no work to do */
3500 if ((!p
->mcast_list_len
) && (!o
->check_sched(o
)))
3503 DP(BNX2X_MSG_SP
, "o->total_pending_num=%d p->mcast_list_len=%d "
3504 "o->max_cmd_len=%d\n", o
->total_pending_num
,
3505 p
->mcast_list_len
, o
->max_cmd_len
);
3507 /* Enqueue the current command to the pending list if we can't complete
3508 * it in the current iteration
3510 if (r
->check_pending(r
) ||
3511 ((o
->max_cmd_len
> 0) && (o
->total_pending_num
> o
->max_cmd_len
))) {
3512 rc
= o
->enqueue_cmd(bp
, p
->mcast_obj
, p
, cmd
);
3516 /* As long as the current command is in a command list we
3517 * don't need to handle it separately.
3519 p
->mcast_list_len
= 0;
3522 if (!r
->check_pending(r
)) {
3524 /* Set 'pending' state */
3527 /* Configure the new classification in the chip */
3528 rc
= o
->config_mcast(bp
, p
, cmd
);
3532 /* Wait for a ramrod completion if was requested */
3533 if (test_bit(RAMROD_COMP_WAIT
, &p
->ramrod_flags
))
3534 rc
= o
->wait_comp(bp
, o
);
3540 r
->clear_pending(r
);
3543 o
->revert(bp
, p
, old_reg_size
);
3548 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj
*o
)
3550 smp_mb__before_clear_bit();
3551 clear_bit(o
->sched_state
, o
->raw
.pstate
);
3552 smp_mb__after_clear_bit();
3555 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj
*o
)
3557 smp_mb__before_clear_bit();
3558 set_bit(o
->sched_state
, o
->raw
.pstate
);
3559 smp_mb__after_clear_bit();
3562 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj
*o
)
3564 return !!test_bit(o
->sched_state
, o
->raw
.pstate
);
3567 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj
*o
)
3569 return o
->raw
.check_pending(&o
->raw
) || o
->check_sched(o
);
3572 void bnx2x_init_mcast_obj(struct bnx2x
*bp
,
3573 struct bnx2x_mcast_obj
*mcast_obj
,
3574 u8 mcast_cl_id
, u32 mcast_cid
, u8 func_id
,
3575 u8 engine_id
, void *rdata
, dma_addr_t rdata_mapping
,
3576 int state
, unsigned long *pstate
, bnx2x_obj_type type
)
3578 memset(mcast_obj
, 0, sizeof(*mcast_obj
));
3580 bnx2x_init_raw_obj(&mcast_obj
->raw
, mcast_cl_id
, mcast_cid
, func_id
,
3581 rdata
, rdata_mapping
, state
, pstate
, type
);
3583 mcast_obj
->engine_id
= engine_id
;
3585 INIT_LIST_HEAD(&mcast_obj
->pending_cmds_head
);
3587 mcast_obj
->sched_state
= BNX2X_FILTER_MCAST_SCHED
;
3588 mcast_obj
->check_sched
= bnx2x_mcast_check_sched
;
3589 mcast_obj
->set_sched
= bnx2x_mcast_set_sched
;
3590 mcast_obj
->clear_sched
= bnx2x_mcast_clear_sched
;
3592 if (CHIP_IS_E1(bp
)) {
3593 mcast_obj
->config_mcast
= bnx2x_mcast_setup_e1
;
3594 mcast_obj
->enqueue_cmd
= bnx2x_mcast_enqueue_cmd
;
3595 mcast_obj
->hdl_restore
=
3596 bnx2x_mcast_handle_restore_cmd_e1
;
3597 mcast_obj
->check_pending
= bnx2x_mcast_check_pending
;
3599 if (CHIP_REV_IS_SLOW(bp
))
3600 mcast_obj
->max_cmd_len
= BNX2X_MAX_EMUL_MULTI
;
3602 mcast_obj
->max_cmd_len
= BNX2X_MAX_MULTICAST
;
3604 mcast_obj
->wait_comp
= bnx2x_mcast_wait
;
3605 mcast_obj
->set_one_rule
= bnx2x_mcast_set_one_rule_e1
;
3606 mcast_obj
->validate
= bnx2x_mcast_validate_e1
;
3607 mcast_obj
->revert
= bnx2x_mcast_revert_e1
;
3608 mcast_obj
->get_registry_size
=
3609 bnx2x_mcast_get_registry_size_exact
;
3610 mcast_obj
->set_registry_size
=
3611 bnx2x_mcast_set_registry_size_exact
;
3613 /* 57710 is the only chip that uses the exact match for mcast
3616 INIT_LIST_HEAD(&mcast_obj
->registry
.exact_match
.macs
);
3618 } else if (CHIP_IS_E1H(bp
)) {
3619 mcast_obj
->config_mcast
= bnx2x_mcast_setup_e1h
;
3620 mcast_obj
->enqueue_cmd
= NULL
;
3621 mcast_obj
->hdl_restore
= NULL
;
3622 mcast_obj
->check_pending
= bnx2x_mcast_check_pending
;
3624 /* 57711 doesn't send a ramrod, so it has unlimited credit
3627 mcast_obj
->max_cmd_len
= -1;
3628 mcast_obj
->wait_comp
= bnx2x_mcast_wait
;
3629 mcast_obj
->set_one_rule
= NULL
;
3630 mcast_obj
->validate
= bnx2x_mcast_validate_e1h
;
3631 mcast_obj
->revert
= bnx2x_mcast_revert_e1h
;
3632 mcast_obj
->get_registry_size
=
3633 bnx2x_mcast_get_registry_size_aprox
;
3634 mcast_obj
->set_registry_size
=
3635 bnx2x_mcast_set_registry_size_aprox
;
3637 mcast_obj
->config_mcast
= bnx2x_mcast_setup_e2
;
3638 mcast_obj
->enqueue_cmd
= bnx2x_mcast_enqueue_cmd
;
3639 mcast_obj
->hdl_restore
=
3640 bnx2x_mcast_handle_restore_cmd_e2
;
3641 mcast_obj
->check_pending
= bnx2x_mcast_check_pending
;
3642 /* TODO: There should be a proper HSI define for this number!!!
3644 mcast_obj
->max_cmd_len
= 16;
3645 mcast_obj
->wait_comp
= bnx2x_mcast_wait
;
3646 mcast_obj
->set_one_rule
= bnx2x_mcast_set_one_rule_e2
;
3647 mcast_obj
->validate
= bnx2x_mcast_validate_e2
;
3648 mcast_obj
->revert
= bnx2x_mcast_revert_e2
;
3649 mcast_obj
->get_registry_size
=
3650 bnx2x_mcast_get_registry_size_aprox
;
3651 mcast_obj
->set_registry_size
=
3652 bnx2x_mcast_set_registry_size_aprox
;
3656 /*************************** Credit handling **********************************/
3659 * atomic_add_ifless - add if the result is less than a given value.
3661 * @v: pointer of type atomic_t
3662 * @a: the amount to add to v...
3663 * @u: ...if (v + a) is less than u.
3665 * returns true if (v + a) was less than u, and false otherwise.
3668 static inline bool __atomic_add_ifless(atomic_t
*v
, int a
, int u
)
3674 if (unlikely(c
+ a
>= u
))
3677 old
= atomic_cmpxchg((v
), c
, c
+ a
);
3678 if (likely(old
== c
))
3687 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3689 * @v: pointer of type atomic_t
3690 * @a: the amount to dec from v...
3691 * @u: ...if (v - a) is more or equal than u.
3693 * returns true if (v - a) was more or equal than u, and false
3696 static inline bool __atomic_dec_ifmoe(atomic_t
*v
, int a
, int u
)
3702 if (unlikely(c
- a
< u
))
3705 old
= atomic_cmpxchg((v
), c
, c
- a
);
3706 if (likely(old
== c
))
3714 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj
*o
, int cnt
)
3719 rc
= __atomic_dec_ifmoe(&o
->credit
, cnt
, 0);
3725 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj
*o
, int cnt
)
3731 /* Don't let to refill if credit + cnt > pool_sz */
3732 rc
= __atomic_add_ifless(&o
->credit
, cnt
, o
->pool_sz
+ 1);
3739 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj
*o
)
3744 cur_credit
= atomic_read(&o
->credit
);
3749 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj
*o
,
3756 static bool bnx2x_credit_pool_get_entry(
3757 struct bnx2x_credit_pool_obj
*o
,
3764 /* Find "internal cam-offset" then add to base for this object... */
3765 for (vec
= 0; vec
< BNX2X_POOL_VEC_SIZE
; vec
++) {
3767 /* Skip the current vector if there are no free entries in it */
3768 if (!o
->pool_mirror
[vec
])
3771 /* If we've got here we are going to find a free entry */
3772 for (idx
= vec
* BNX2X_POOL_VEC_SIZE
, i
= 0;
3773 i
< BIT_VEC64_ELEM_SZ
; idx
++, i
++)
3775 if (BIT_VEC64_TEST_BIT(o
->pool_mirror
, idx
)) {
3777 BIT_VEC64_CLEAR_BIT(o
->pool_mirror
, idx
);
3778 *offset
= o
->base_pool_offset
+ idx
;
3786 static bool bnx2x_credit_pool_put_entry(
3787 struct bnx2x_credit_pool_obj
*o
,
3790 if (offset
< o
->base_pool_offset
)
3793 offset
-= o
->base_pool_offset
;
3795 if (offset
>= o
->pool_sz
)
3798 /* Return the entry to the pool */
3799 BIT_VEC64_SET_BIT(o
->pool_mirror
, offset
);
3804 static bool bnx2x_credit_pool_put_entry_always_true(
3805 struct bnx2x_credit_pool_obj
*o
,
3811 static bool bnx2x_credit_pool_get_entry_always_true(
3812 struct bnx2x_credit_pool_obj
*o
,
3819 * bnx2x_init_credit_pool - initialize credit pool internals.
3822 * @base: Base entry in the CAM to use.
3823 * @credit: pool size.
3825 * If base is negative no CAM entries handling will be performed.
3826 * If credit is negative pool operations will always succeed (unlimited pool).
3829 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj
*p
,
3830 int base
, int credit
)
3832 /* Zero the object first */
3833 memset(p
, 0, sizeof(*p
));
3835 /* Set the table to all 1s */
3836 memset(&p
->pool_mirror
, 0xff, sizeof(p
->pool_mirror
));
3838 /* Init a pool as full */
3839 atomic_set(&p
->credit
, credit
);
3841 /* The total poll size */
3842 p
->pool_sz
= credit
;
3844 p
->base_pool_offset
= base
;
3846 /* Commit the change */
3849 p
->check
= bnx2x_credit_pool_check
;
3851 /* if pool credit is negative - disable the checks */
3853 p
->put
= bnx2x_credit_pool_put
;
3854 p
->get
= bnx2x_credit_pool_get
;
3855 p
->put_entry
= bnx2x_credit_pool_put_entry
;
3856 p
->get_entry
= bnx2x_credit_pool_get_entry
;
3858 p
->put
= bnx2x_credit_pool_always_true
;
3859 p
->get
= bnx2x_credit_pool_always_true
;
3860 p
->put_entry
= bnx2x_credit_pool_put_entry_always_true
;
3861 p
->get_entry
= bnx2x_credit_pool_get_entry_always_true
;
3864 /* If base is negative - disable entries handling */
3866 p
->put_entry
= bnx2x_credit_pool_put_entry_always_true
;
3867 p
->get_entry
= bnx2x_credit_pool_get_entry_always_true
;
3871 void bnx2x_init_mac_credit_pool(struct bnx2x
*bp
,
3872 struct bnx2x_credit_pool_obj
*p
, u8 func_id
,
3875 /* TODO: this will be defined in consts as well... */
3876 #define BNX2X_CAM_SIZE_EMUL 5
3880 if (CHIP_IS_E1(bp
)) {
3881 /* In E1, Multicast is saved in cam... */
3882 if (!CHIP_REV_IS_SLOW(bp
))
3883 cam_sz
= (MAX_MAC_CREDIT_E1
/ 2) - BNX2X_MAX_MULTICAST
;
3885 cam_sz
= BNX2X_CAM_SIZE_EMUL
- BNX2X_MAX_EMUL_MULTI
;
3887 bnx2x_init_credit_pool(p
, func_id
* cam_sz
, cam_sz
);
3889 } else if (CHIP_IS_E1H(bp
)) {
3890 /* CAM credit is equaly divided between all active functions
3893 if ((func_num
> 0)) {
3894 if (!CHIP_REV_IS_SLOW(bp
))
3895 cam_sz
= (MAX_MAC_CREDIT_E1H
/ (2*func_num
));
3897 cam_sz
= BNX2X_CAM_SIZE_EMUL
;
3898 bnx2x_init_credit_pool(p
, func_id
* cam_sz
, cam_sz
);
3900 /* this should never happen! Block MAC operations. */
3901 bnx2x_init_credit_pool(p
, 0, 0);
3907 * CAM credit is equaly divided between all active functions
3910 if ((func_num
> 0)) {
3911 if (!CHIP_REV_IS_SLOW(bp
))
3912 cam_sz
= (MAX_MAC_CREDIT_E2
/ func_num
);
3914 cam_sz
= BNX2X_CAM_SIZE_EMUL
;
3917 * No need for CAM entries handling for 57712 and
3920 bnx2x_init_credit_pool(p
, -1, cam_sz
);
3922 /* this should never happen! Block MAC operations. */
3923 bnx2x_init_credit_pool(p
, 0, 0);
3929 void bnx2x_init_vlan_credit_pool(struct bnx2x
*bp
,
3930 struct bnx2x_credit_pool_obj
*p
,
3934 if (CHIP_IS_E1x(bp
)) {
3936 * There is no VLAN credit in HW on 57710 and 57711 only
3937 * MAC / MAC-VLAN can be set
3939 bnx2x_init_credit_pool(p
, 0, -1);
3942 * CAM credit is equaly divided between all active functions
3946 int credit
= MAX_VLAN_CREDIT_E2
/ func_num
;
3947 bnx2x_init_credit_pool(p
, func_id
* credit
, credit
);
3949 /* this should never happen! Block VLAN operations. */
3950 bnx2x_init_credit_pool(p
, 0, 0);
3954 /****************** RSS Configuration ******************/
3956 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3958 * @bp: driver hanlde
3959 * @p: pointer to rss configuration
3961 * Prints it when NETIF_MSG_IFUP debug level is configured.
3963 static inline void bnx2x_debug_print_ind_table(struct bnx2x
*bp
,
3964 struct bnx2x_config_rss_params
*p
)
3968 DP(BNX2X_MSG_SP
, "Setting indirection table to:\n");
3969 DP(BNX2X_MSG_SP
, "0x0000: ");
3970 for (i
= 0; i
< T_ETH_INDIRECTION_TABLE_SIZE
; i
++) {
3971 DP_CONT(BNX2X_MSG_SP
, "0x%02x ", p
->ind_table
[i
]);
3973 /* Print 4 bytes in a line */
3974 if ((i
+ 1 < T_ETH_INDIRECTION_TABLE_SIZE
) &&
3975 (((i
+ 1) & 0x3) == 0)) {
3976 DP_CONT(BNX2X_MSG_SP
, "\n");
3977 DP(BNX2X_MSG_SP
, "0x%04x: ", i
+ 1);
3981 DP_CONT(BNX2X_MSG_SP
, "\n");
3985 * bnx2x_setup_rss - configure RSS
3987 * @bp: device handle
3988 * @p: rss configuration
3990 * sends on UPDATE ramrod for that matter.
3992 static int bnx2x_setup_rss(struct bnx2x
*bp
,
3993 struct bnx2x_config_rss_params
*p
)
3995 struct bnx2x_rss_config_obj
*o
= p
->rss_obj
;
3996 struct bnx2x_raw_obj
*r
= &o
->raw
;
3997 struct eth_rss_update_ramrod_data
*data
=
3998 (struct eth_rss_update_ramrod_data
*)(r
->rdata
);
4002 memset(data
, 0, sizeof(*data
));
4004 DP(BNX2X_MSG_SP
, "Configuring RSS\n");
4006 /* Set an echo field */
4007 data
->echo
= (r
->cid
& BNX2X_SWCID_MASK
) |
4008 (r
->state
<< BNX2X_SWCID_SHIFT
);
4011 if (test_bit(BNX2X_RSS_MODE_DISABLED
, &p
->rss_flags
))
4012 rss_mode
= ETH_RSS_MODE_DISABLED
;
4013 else if (test_bit(BNX2X_RSS_MODE_REGULAR
, &p
->rss_flags
))
4014 rss_mode
= ETH_RSS_MODE_REGULAR
;
4015 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI
, &p
->rss_flags
))
4016 rss_mode
= ETH_RSS_MODE_VLAN_PRI
;
4017 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI
, &p
->rss_flags
))
4018 rss_mode
= ETH_RSS_MODE_E1HOV_PRI
;
4019 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP
, &p
->rss_flags
))
4020 rss_mode
= ETH_RSS_MODE_IP_DSCP
;
4022 data
->rss_mode
= rss_mode
;
4024 DP(BNX2X_MSG_SP
, "rss_mode=%d\n", rss_mode
);
4026 /* RSS capabilities */
4027 if (test_bit(BNX2X_RSS_IPV4
, &p
->rss_flags
))
4028 data
->capabilities
|=
4029 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY
;
4031 if (test_bit(BNX2X_RSS_IPV4_TCP
, &p
->rss_flags
))
4032 data
->capabilities
|=
4033 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY
;
4035 if (test_bit(BNX2X_RSS_IPV6
, &p
->rss_flags
))
4036 data
->capabilities
|=
4037 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY
;
4039 if (test_bit(BNX2X_RSS_IPV6_TCP
, &p
->rss_flags
))
4040 data
->capabilities
|=
4041 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY
;
4044 data
->rss_result_mask
= p
->rss_result_mask
;
4047 data
->rss_engine_id
= o
->engine_id
;
4049 DP(BNX2X_MSG_SP
, "rss_engine_id=%d\n", data
->rss_engine_id
);
4051 /* Indirection table */
4052 memcpy(data
->indirection_table
, p
->ind_table
,
4053 T_ETH_INDIRECTION_TABLE_SIZE
);
4055 /* Remember the last configuration */
4056 memcpy(o
->ind_table
, p
->ind_table
, T_ETH_INDIRECTION_TABLE_SIZE
);
4058 /* Print the indirection table */
4059 if (netif_msg_ifup(bp
))
4060 bnx2x_debug_print_ind_table(bp
, p
);
4063 if (test_bit(BNX2X_RSS_SET_SRCH
, &p
->rss_flags
)) {
4064 memcpy(&data
->rss_key
[0], &p
->rss_key
[0],
4065 sizeof(data
->rss_key
));
4066 data
->capabilities
|= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY
;
4070 * No need for an explicit memory barrier here as long we would
4071 * need to ensure the ordering of writing to the SPQ element
4072 * and updating of the SPQ producer which involves a memory
4073 * read and we will have to put a full memory barrier there
4074 * (inside bnx2x_sp_post()).
4078 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_RSS_UPDATE
, r
->cid
,
4079 U64_HI(r
->rdata_mapping
),
4080 U64_LO(r
->rdata_mapping
),
4081 ETH_CONNECTION_TYPE
);
4089 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj
*rss_obj
,
4092 memcpy(ind_table
, rss_obj
->ind_table
, sizeof(rss_obj
->ind_table
));
4095 int bnx2x_config_rss(struct bnx2x
*bp
,
4096 struct bnx2x_config_rss_params
*p
)
4099 struct bnx2x_rss_config_obj
*o
= p
->rss_obj
;
4100 struct bnx2x_raw_obj
*r
= &o
->raw
;
4102 /* Do nothing if only driver cleanup was requested */
4103 if (test_bit(RAMROD_DRV_CLR_ONLY
, &p
->ramrod_flags
))
4108 rc
= o
->config_rss(bp
, p
);
4110 r
->clear_pending(r
);
4114 if (test_bit(RAMROD_COMP_WAIT
, &p
->ramrod_flags
))
4115 rc
= r
->wait_comp(bp
, r
);
4121 void bnx2x_init_rss_config_obj(struct bnx2x
*bp
,
4122 struct bnx2x_rss_config_obj
*rss_obj
,
4123 u8 cl_id
, u32 cid
, u8 func_id
, u8 engine_id
,
4124 void *rdata
, dma_addr_t rdata_mapping
,
4125 int state
, unsigned long *pstate
,
4126 bnx2x_obj_type type
)
4128 bnx2x_init_raw_obj(&rss_obj
->raw
, cl_id
, cid
, func_id
, rdata
,
4129 rdata_mapping
, state
, pstate
, type
);
4131 rss_obj
->engine_id
= engine_id
;
4132 rss_obj
->config_rss
= bnx2x_setup_rss
;
4135 /********************** Queue state object ***********************************/
4138 * bnx2x_queue_state_change - perform Queue state change transition
4140 * @bp: device handle
4141 * @params: parameters to perform the transition
4143 * returns 0 in case of successfully completed transition, negative error
4144 * code in case of failure, positive (EBUSY) value if there is a completion
4145 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4146 * not set in params->ramrod_flags for asynchronous commands).
4149 int bnx2x_queue_state_change(struct bnx2x
*bp
,
4150 struct bnx2x_queue_state_params
*params
)
4152 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4153 int rc
, pending_bit
;
4154 unsigned long *pending
= &o
->pending
;
4156 /* Check that the requested transition is legal */
4157 if (o
->check_transition(bp
, o
, params
))
4160 /* Set "pending" bit */
4161 pending_bit
= o
->set_pending(o
, params
);
4163 /* Don't send a command if only driver cleanup was requested */
4164 if (test_bit(RAMROD_DRV_CLR_ONLY
, ¶ms
->ramrod_flags
))
4165 o
->complete_cmd(bp
, o
, pending_bit
);
4168 rc
= o
->send_cmd(bp
, params
);
4170 o
->next_state
= BNX2X_Q_STATE_MAX
;
4171 clear_bit(pending_bit
, pending
);
4172 smp_mb__after_clear_bit();
4176 if (test_bit(RAMROD_COMP_WAIT
, ¶ms
->ramrod_flags
)) {
4177 rc
= o
->wait_comp(bp
, o
, pending_bit
);
4185 return !!test_bit(pending_bit
, pending
);
4189 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj
*obj
,
4190 struct bnx2x_queue_state_params
*params
)
4192 enum bnx2x_queue_cmd cmd
= params
->cmd
, bit
;
4194 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4197 if ((cmd
== BNX2X_Q_CMD_ACTIVATE
) ||
4198 (cmd
== BNX2X_Q_CMD_DEACTIVATE
))
4199 bit
= BNX2X_Q_CMD_UPDATE
;
4203 set_bit(bit
, &obj
->pending
);
4207 static int bnx2x_queue_wait_comp(struct bnx2x
*bp
,
4208 struct bnx2x_queue_sp_obj
*o
,
4209 enum bnx2x_queue_cmd cmd
)
4211 return bnx2x_state_wait(bp
, cmd
, &o
->pending
);
4215 * bnx2x_queue_comp_cmd - complete the state change command.
4217 * @bp: device handle
4221 * Checks that the arrived completion is expected.
4223 static int bnx2x_queue_comp_cmd(struct bnx2x
*bp
,
4224 struct bnx2x_queue_sp_obj
*o
,
4225 enum bnx2x_queue_cmd cmd
)
4227 unsigned long cur_pending
= o
->pending
;
4229 if (!test_and_clear_bit(cmd
, &cur_pending
)) {
4230 BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
4231 "pending 0x%lx, next_state %d\n", cmd
,
4232 o
->cids
[BNX2X_PRIMARY_CID_INDEX
],
4233 o
->state
, cur_pending
, o
->next_state
);
4237 if (o
->next_tx_only
>= o
->max_cos
)
4238 /* >= becuase tx only must always be smaller than cos since the
4239 * primary connection suports COS 0
4241 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4242 o
->next_tx_only
, o
->max_cos
);
4244 DP(BNX2X_MSG_SP
, "Completing command %d for queue %d, "
4245 "setting state to %d\n", cmd
,
4246 o
->cids
[BNX2X_PRIMARY_CID_INDEX
], o
->next_state
);
4248 if (o
->next_tx_only
) /* print num tx-only if any exist */
4249 DP(BNX2X_MSG_SP
, "primary cid %d: num tx-only cons %d",
4250 o
->cids
[BNX2X_PRIMARY_CID_INDEX
], o
->next_tx_only
);
4252 o
->state
= o
->next_state
;
4253 o
->num_tx_only
= o
->next_tx_only
;
4254 o
->next_state
= BNX2X_Q_STATE_MAX
;
4256 /* It's important that o->state and o->next_state are
4257 * updated before o->pending.
4261 clear_bit(cmd
, &o
->pending
);
4262 smp_mb__after_clear_bit();
4267 static void bnx2x_q_fill_setup_data_e2(struct bnx2x
*bp
,
4268 struct bnx2x_queue_state_params
*cmd_params
,
4269 struct client_init_ramrod_data
*data
)
4271 struct bnx2x_queue_setup_params
*params
= &cmd_params
->params
.setup
;
4275 /* IPv6 TPA supported for E2 and above only */
4276 data
->rx
.tpa_en
|= test_bit(BNX2X_Q_FLG_TPA_IPV6
, ¶ms
->flags
) *
4277 CLIENT_INIT_RX_DATA_TPA_EN_IPV6
;
4280 static void bnx2x_q_fill_init_general_data(struct bnx2x
*bp
,
4281 struct bnx2x_queue_sp_obj
*o
,
4282 struct bnx2x_general_setup_params
*params
,
4283 struct client_init_general_data
*gen_data
,
4284 unsigned long *flags
)
4286 gen_data
->client_id
= o
->cl_id
;
4288 if (test_bit(BNX2X_Q_FLG_STATS
, flags
)) {
4289 gen_data
->statistics_counter_id
=
4291 gen_data
->statistics_en_flg
= 1;
4292 gen_data
->statistics_zero_flg
=
4293 test_bit(BNX2X_Q_FLG_ZERO_STATS
, flags
);
4295 gen_data
->statistics_counter_id
=
4296 DISABLE_STATISTIC_COUNTER_ID_VALUE
;
4298 gen_data
->is_fcoe_flg
= test_bit(BNX2X_Q_FLG_FCOE
, flags
);
4299 gen_data
->activate_flg
= test_bit(BNX2X_Q_FLG_ACTIVE
, flags
);
4300 gen_data
->sp_client_id
= params
->spcl_id
;
4301 gen_data
->mtu
= cpu_to_le16(params
->mtu
);
4302 gen_data
->func_id
= o
->func_id
;
4305 gen_data
->cos
= params
->cos
;
4307 gen_data
->traffic_type
=
4308 test_bit(BNX2X_Q_FLG_FCOE
, flags
) ?
4309 LLFC_TRAFFIC_TYPE_FCOE
: LLFC_TRAFFIC_TYPE_NW
;
4311 DP(BNX2X_MSG_SP
, "flags: active %d, cos %d, stats en %d",
4312 gen_data
->activate_flg
, gen_data
->cos
, gen_data
->statistics_en_flg
);
4315 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj
*o
,
4316 struct bnx2x_txq_setup_params
*params
,
4317 struct client_init_tx_data
*tx_data
,
4318 unsigned long *flags
)
4320 tx_data
->enforce_security_flg
=
4321 test_bit(BNX2X_Q_FLG_TX_SEC
, flags
);
4322 tx_data
->default_vlan
=
4323 cpu_to_le16(params
->default_vlan
);
4324 tx_data
->default_vlan_flg
=
4325 test_bit(BNX2X_Q_FLG_DEF_VLAN
, flags
);
4326 tx_data
->tx_switching_flg
=
4327 test_bit(BNX2X_Q_FLG_TX_SWITCH
, flags
);
4328 tx_data
->anti_spoofing_flg
=
4329 test_bit(BNX2X_Q_FLG_ANTI_SPOOF
, flags
);
4330 tx_data
->tx_status_block_id
= params
->fw_sb_id
;
4331 tx_data
->tx_sb_index_number
= params
->sb_cq_index
;
4332 tx_data
->tss_leading_client_id
= params
->tss_leading_cl_id
;
4334 tx_data
->tx_bd_page_base
.lo
=
4335 cpu_to_le32(U64_LO(params
->dscr_map
));
4336 tx_data
->tx_bd_page_base
.hi
=
4337 cpu_to_le32(U64_HI(params
->dscr_map
));
4339 /* Don't configure any Tx switching mode during queue SETUP */
4343 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj
*o
,
4344 struct rxq_pause_params
*params
,
4345 struct client_init_rx_data
*rx_data
)
4347 /* flow control data */
4348 rx_data
->cqe_pause_thr_low
= cpu_to_le16(params
->rcq_th_lo
);
4349 rx_data
->cqe_pause_thr_high
= cpu_to_le16(params
->rcq_th_hi
);
4350 rx_data
->bd_pause_thr_low
= cpu_to_le16(params
->bd_th_lo
);
4351 rx_data
->bd_pause_thr_high
= cpu_to_le16(params
->bd_th_hi
);
4352 rx_data
->sge_pause_thr_low
= cpu_to_le16(params
->sge_th_lo
);
4353 rx_data
->sge_pause_thr_high
= cpu_to_le16(params
->sge_th_hi
);
4354 rx_data
->rx_cos_mask
= cpu_to_le16(params
->pri_map
);
4357 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj
*o
,
4358 struct bnx2x_rxq_setup_params
*params
,
4359 struct client_init_rx_data
*rx_data
,
4360 unsigned long *flags
)
4363 rx_data
->tpa_en
= test_bit(BNX2X_Q_FLG_TPA
, flags
) *
4364 CLIENT_INIT_RX_DATA_TPA_EN_IPV4
;
4365 rx_data
->vmqueue_mode_en_flg
= 0;
4367 rx_data
->cache_line_alignment_log_size
=
4368 params
->cache_line_log
;
4369 rx_data
->enable_dynamic_hc
=
4370 test_bit(BNX2X_Q_FLG_DHC
, flags
);
4371 rx_data
->max_sges_for_packet
= params
->max_sges_pkt
;
4372 rx_data
->client_qzone_id
= params
->cl_qzone_id
;
4373 rx_data
->max_agg_size
= cpu_to_le16(params
->tpa_agg_sz
);
4375 /* Always start in DROP_ALL mode */
4376 rx_data
->state
= cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL
|
4377 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL
);
4379 /* We don't set drop flags */
4380 rx_data
->drop_ip_cs_err_flg
= 0;
4381 rx_data
->drop_tcp_cs_err_flg
= 0;
4382 rx_data
->drop_ttl0_flg
= 0;
4383 rx_data
->drop_udp_cs_err_flg
= 0;
4384 rx_data
->inner_vlan_removal_enable_flg
=
4385 test_bit(BNX2X_Q_FLG_VLAN
, flags
);
4386 rx_data
->outer_vlan_removal_enable_flg
=
4387 test_bit(BNX2X_Q_FLG_OV
, flags
);
4388 rx_data
->status_block_id
= params
->fw_sb_id
;
4389 rx_data
->rx_sb_index_number
= params
->sb_cq_index
;
4390 rx_data
->max_tpa_queues
= params
->max_tpa_queues
;
4391 rx_data
->max_bytes_on_bd
= cpu_to_le16(params
->buf_sz
);
4392 rx_data
->sge_buff_size
= cpu_to_le16(params
->sge_buf_sz
);
4393 rx_data
->bd_page_base
.lo
=
4394 cpu_to_le32(U64_LO(params
->dscr_map
));
4395 rx_data
->bd_page_base
.hi
=
4396 cpu_to_le32(U64_HI(params
->dscr_map
));
4397 rx_data
->sge_page_base
.lo
=
4398 cpu_to_le32(U64_LO(params
->sge_map
));
4399 rx_data
->sge_page_base
.hi
=
4400 cpu_to_le32(U64_HI(params
->sge_map
));
4401 rx_data
->cqe_page_base
.lo
=
4402 cpu_to_le32(U64_LO(params
->rcq_map
));
4403 rx_data
->cqe_page_base
.hi
=
4404 cpu_to_le32(U64_HI(params
->rcq_map
));
4405 rx_data
->is_leading_rss
= test_bit(BNX2X_Q_FLG_LEADING_RSS
, flags
);
4407 if (test_bit(BNX2X_Q_FLG_MCAST
, flags
)) {
4408 rx_data
->approx_mcast_engine_id
= o
->func_id
;
4409 rx_data
->is_approx_mcast
= 1;
4412 rx_data
->rss_engine_id
= params
->rss_engine_id
;
4414 /* silent vlan removal */
4415 rx_data
->silent_vlan_removal_flg
=
4416 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM
, flags
);
4417 rx_data
->silent_vlan_value
=
4418 cpu_to_le16(params
->silent_removal_value
);
4419 rx_data
->silent_vlan_mask
=
4420 cpu_to_le16(params
->silent_removal_mask
);
4424 /* initialize the general, tx and rx parts of a queue object */
4425 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x
*bp
,
4426 struct bnx2x_queue_state_params
*cmd_params
,
4427 struct client_init_ramrod_data
*data
)
4429 bnx2x_q_fill_init_general_data(bp
, cmd_params
->q_obj
,
4430 &cmd_params
->params
.setup
.gen_params
,
4432 &cmd_params
->params
.setup
.flags
);
4434 bnx2x_q_fill_init_tx_data(cmd_params
->q_obj
,
4435 &cmd_params
->params
.setup
.txq_params
,
4437 &cmd_params
->params
.setup
.flags
);
4439 bnx2x_q_fill_init_rx_data(cmd_params
->q_obj
,
4440 &cmd_params
->params
.setup
.rxq_params
,
4442 &cmd_params
->params
.setup
.flags
);
4444 bnx2x_q_fill_init_pause_data(cmd_params
->q_obj
,
4445 &cmd_params
->params
.setup
.pause_params
,
4449 /* initialize the general and tx parts of a tx-only queue object */
4450 static void bnx2x_q_fill_setup_tx_only(struct bnx2x
*bp
,
4451 struct bnx2x_queue_state_params
*cmd_params
,
4452 struct tx_queue_init_ramrod_data
*data
)
4454 bnx2x_q_fill_init_general_data(bp
, cmd_params
->q_obj
,
4455 &cmd_params
->params
.tx_only
.gen_params
,
4457 &cmd_params
->params
.tx_only
.flags
);
4459 bnx2x_q_fill_init_tx_data(cmd_params
->q_obj
,
4460 &cmd_params
->params
.tx_only
.txq_params
,
4462 &cmd_params
->params
.tx_only
.flags
);
4464 DP(BNX2X_MSG_SP
, "cid %d, tx bd page lo %x hi %x",cmd_params
->q_obj
->cids
[0],
4465 data
->tx
.tx_bd_page_base
.lo
, data
->tx
.tx_bd_page_base
.hi
);
4469 * bnx2x_q_init - init HW/FW queue
4471 * @bp: device handle
4474 * HW/FW initial Queue configuration:
4476 * - CDU context validation
4479 static inline int bnx2x_q_init(struct bnx2x
*bp
,
4480 struct bnx2x_queue_state_params
*params
)
4482 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4483 struct bnx2x_queue_init_params
*init
= ¶ms
->params
.init
;
4487 /* Tx HC configuration */
4488 if (test_bit(BNX2X_Q_TYPE_HAS_TX
, &o
->type
) &&
4489 test_bit(BNX2X_Q_FLG_HC
, &init
->tx
.flags
)) {
4490 hc_usec
= init
->tx
.hc_rate
? 1000000 / init
->tx
.hc_rate
: 0;
4492 bnx2x_update_coalesce_sb_index(bp
, init
->tx
.fw_sb_id
,
4493 init
->tx
.sb_cq_index
,
4494 !test_bit(BNX2X_Q_FLG_HC_EN
, &init
->tx
.flags
),
4498 /* Rx HC configuration */
4499 if (test_bit(BNX2X_Q_TYPE_HAS_RX
, &o
->type
) &&
4500 test_bit(BNX2X_Q_FLG_HC
, &init
->rx
.flags
)) {
4501 hc_usec
= init
->rx
.hc_rate
? 1000000 / init
->rx
.hc_rate
: 0;
4503 bnx2x_update_coalesce_sb_index(bp
, init
->rx
.fw_sb_id
,
4504 init
->rx
.sb_cq_index
,
4505 !test_bit(BNX2X_Q_FLG_HC_EN
, &init
->rx
.flags
),
4509 /* Set CDU context validation values */
4510 for (cos
= 0; cos
< o
->max_cos
; cos
++) {
4511 DP(BNX2X_MSG_SP
, "setting context validation. cid %d, cos %d",
4513 DP(BNX2X_MSG_SP
, "context pointer %p", init
->cxts
[cos
]);
4514 bnx2x_set_ctx_validation(bp
, init
->cxts
[cos
], o
->cids
[cos
]);
4517 /* As no ramrod is sent, complete the command immediately */
4518 o
->complete_cmd(bp
, o
, BNX2X_Q_CMD_INIT
);
4526 static inline int bnx2x_q_send_setup_e1x(struct bnx2x
*bp
,
4527 struct bnx2x_queue_state_params
*params
)
4529 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4530 struct client_init_ramrod_data
*rdata
=
4531 (struct client_init_ramrod_data
*)o
->rdata
;
4532 dma_addr_t data_mapping
= o
->rdata_mapping
;
4533 int ramrod
= RAMROD_CMD_ID_ETH_CLIENT_SETUP
;
4535 /* Clear the ramrod data */
4536 memset(rdata
, 0, sizeof(*rdata
));
4538 /* Fill the ramrod data */
4539 bnx2x_q_fill_setup_data_cmn(bp
, params
, rdata
);
4542 * No need for an explicit memory barrier here as long we would
4543 * need to ensure the ordering of writing to the SPQ element
4544 * and updating of the SPQ producer which involves a memory
4545 * read and we will have to put a full memory barrier there
4546 * (inside bnx2x_sp_post()).
4549 return bnx2x_sp_post(bp
, ramrod
, o
->cids
[BNX2X_PRIMARY_CID_INDEX
],
4550 U64_HI(data_mapping
),
4551 U64_LO(data_mapping
), ETH_CONNECTION_TYPE
);
4554 static inline int bnx2x_q_send_setup_e2(struct bnx2x
*bp
,
4555 struct bnx2x_queue_state_params
*params
)
4557 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4558 struct client_init_ramrod_data
*rdata
=
4559 (struct client_init_ramrod_data
*)o
->rdata
;
4560 dma_addr_t data_mapping
= o
->rdata_mapping
;
4561 int ramrod
= RAMROD_CMD_ID_ETH_CLIENT_SETUP
;
4563 /* Clear the ramrod data */
4564 memset(rdata
, 0, sizeof(*rdata
));
4566 /* Fill the ramrod data */
4567 bnx2x_q_fill_setup_data_cmn(bp
, params
, rdata
);
4568 bnx2x_q_fill_setup_data_e2(bp
, params
, rdata
);
4571 * No need for an explicit memory barrier here as long we would
4572 * need to ensure the ordering of writing to the SPQ element
4573 * and updating of the SPQ producer which involves a memory
4574 * read and we will have to put a full memory barrier there
4575 * (inside bnx2x_sp_post()).
4578 return bnx2x_sp_post(bp
, ramrod
, o
->cids
[BNX2X_PRIMARY_CID_INDEX
],
4579 U64_HI(data_mapping
),
4580 U64_LO(data_mapping
), ETH_CONNECTION_TYPE
);
4583 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x
*bp
,
4584 struct bnx2x_queue_state_params
*params
)
4586 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4587 struct tx_queue_init_ramrod_data
*rdata
=
4588 (struct tx_queue_init_ramrod_data
*)o
->rdata
;
4589 dma_addr_t data_mapping
= o
->rdata_mapping
;
4590 int ramrod
= RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP
;
4591 struct bnx2x_queue_setup_tx_only_params
*tx_only_params
=
4592 ¶ms
->params
.tx_only
;
4593 u8 cid_index
= tx_only_params
->cid_index
;
4596 if (cid_index
>= o
->max_cos
) {
4597 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4598 o
->cl_id
, cid_index
);
4602 DP(BNX2X_MSG_SP
, "parameters received: cos: %d sp-id: %d",
4603 tx_only_params
->gen_params
.cos
,
4604 tx_only_params
->gen_params
.spcl_id
);
4606 /* Clear the ramrod data */
4607 memset(rdata
, 0, sizeof(*rdata
));
4609 /* Fill the ramrod data */
4610 bnx2x_q_fill_setup_tx_only(bp
, params
, rdata
);
4612 DP(BNX2X_MSG_SP
, "sending tx-only ramrod: cid %d, client-id %d,"
4613 "sp-client id %d, cos %d",
4615 rdata
->general
.client_id
,
4616 rdata
->general
.sp_client_id
, rdata
->general
.cos
);
4619 * No need for an explicit memory barrier here as long we would
4620 * need to ensure the ordering of writing to the SPQ element
4621 * and updating of the SPQ producer which involves a memory
4622 * read and we will have to put a full memory barrier there
4623 * (inside bnx2x_sp_post()).
4626 return bnx2x_sp_post(bp
, ramrod
, o
->cids
[cid_index
],
4627 U64_HI(data_mapping
),
4628 U64_LO(data_mapping
), ETH_CONNECTION_TYPE
);
4631 static void bnx2x_q_fill_update_data(struct bnx2x
*bp
,
4632 struct bnx2x_queue_sp_obj
*obj
,
4633 struct bnx2x_queue_update_params
*params
,
4634 struct client_update_ramrod_data
*data
)
4636 /* Client ID of the client to update */
4637 data
->client_id
= obj
->cl_id
;
4639 /* Function ID of the client to update */
4640 data
->func_id
= obj
->func_id
;
4642 /* Default VLAN value */
4643 data
->default_vlan
= cpu_to_le16(params
->def_vlan
);
4645 /* Inner VLAN stripping */
4646 data
->inner_vlan_removal_enable_flg
=
4647 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM
, ¶ms
->update_flags
);
4648 data
->inner_vlan_removal_change_flg
=
4649 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG
,
4650 ¶ms
->update_flags
);
4652 /* Outer VLAN sripping */
4653 data
->outer_vlan_removal_enable_flg
=
4654 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM
, ¶ms
->update_flags
);
4655 data
->outer_vlan_removal_change_flg
=
4656 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG
,
4657 ¶ms
->update_flags
);
4659 /* Drop packets that have source MAC that doesn't belong to this
4662 data
->anti_spoofing_enable_flg
=
4663 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF
, ¶ms
->update_flags
);
4664 data
->anti_spoofing_change_flg
=
4665 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG
, ¶ms
->update_flags
);
4667 /* Activate/Deactivate */
4668 data
->activate_flg
=
4669 test_bit(BNX2X_Q_UPDATE_ACTIVATE
, ¶ms
->update_flags
);
4670 data
->activate_change_flg
=
4671 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG
, ¶ms
->update_flags
);
4673 /* Enable default VLAN */
4674 data
->default_vlan_enable_flg
=
4675 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN
, ¶ms
->update_flags
);
4676 data
->default_vlan_change_flg
=
4677 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG
,
4678 ¶ms
->update_flags
);
4680 /* silent vlan removal */
4681 data
->silent_vlan_change_flg
=
4682 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG
,
4683 ¶ms
->update_flags
);
4684 data
->silent_vlan_removal_flg
=
4685 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM
, ¶ms
->update_flags
);
4686 data
->silent_vlan_value
= cpu_to_le16(params
->silent_removal_value
);
4687 data
->silent_vlan_mask
= cpu_to_le16(params
->silent_removal_mask
);
4690 static inline int bnx2x_q_send_update(struct bnx2x
*bp
,
4691 struct bnx2x_queue_state_params
*params
)
4693 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4694 struct client_update_ramrod_data
*rdata
=
4695 (struct client_update_ramrod_data
*)o
->rdata
;
4696 dma_addr_t data_mapping
= o
->rdata_mapping
;
4697 struct bnx2x_queue_update_params
*update_params
=
4698 ¶ms
->params
.update
;
4699 u8 cid_index
= update_params
->cid_index
;
4701 if (cid_index
>= o
->max_cos
) {
4702 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4703 o
->cl_id
, cid_index
);
4708 /* Clear the ramrod data */
4709 memset(rdata
, 0, sizeof(*rdata
));
4711 /* Fill the ramrod data */
4712 bnx2x_q_fill_update_data(bp
, o
, update_params
, rdata
);
4715 * No need for an explicit memory barrier here as long we would
4716 * need to ensure the ordering of writing to the SPQ element
4717 * and updating of the SPQ producer which involves a memory
4718 * read and we will have to put a full memory barrier there
4719 * (inside bnx2x_sp_post()).
4722 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_UPDATE
,
4723 o
->cids
[cid_index
], U64_HI(data_mapping
),
4724 U64_LO(data_mapping
), ETH_CONNECTION_TYPE
);
4728 * bnx2x_q_send_deactivate - send DEACTIVATE command
4730 * @bp: device handle
4733 * implemented using the UPDATE command.
4735 static inline int bnx2x_q_send_deactivate(struct bnx2x
*bp
,
4736 struct bnx2x_queue_state_params
*params
)
4738 struct bnx2x_queue_update_params
*update
= ¶ms
->params
.update
;
4740 memset(update
, 0, sizeof(*update
));
4742 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG
, &update
->update_flags
);
4744 return bnx2x_q_send_update(bp
, params
);
4748 * bnx2x_q_send_activate - send ACTIVATE command
4750 * @bp: device handle
4753 * implemented using the UPDATE command.
4755 static inline int bnx2x_q_send_activate(struct bnx2x
*bp
,
4756 struct bnx2x_queue_state_params
*params
)
4758 struct bnx2x_queue_update_params
*update
= ¶ms
->params
.update
;
4760 memset(update
, 0, sizeof(*update
));
4762 __set_bit(BNX2X_Q_UPDATE_ACTIVATE
, &update
->update_flags
);
4763 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG
, &update
->update_flags
);
4765 return bnx2x_q_send_update(bp
, params
);
4768 static inline int bnx2x_q_send_update_tpa(struct bnx2x
*bp
,
4769 struct bnx2x_queue_state_params
*params
)
4771 /* TODO: Not implemented yet. */
4775 static inline int bnx2x_q_send_halt(struct bnx2x
*bp
,
4776 struct bnx2x_queue_state_params
*params
)
4778 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4780 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
,
4781 o
->cids
[BNX2X_PRIMARY_CID_INDEX
], 0, o
->cl_id
,
4782 ETH_CONNECTION_TYPE
);
4785 static inline int bnx2x_q_send_cfc_del(struct bnx2x
*bp
,
4786 struct bnx2x_queue_state_params
*params
)
4788 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4789 u8 cid_idx
= params
->params
.cfc_del
.cid_index
;
4791 if (cid_idx
>= o
->max_cos
) {
4792 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4797 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
4798 o
->cids
[cid_idx
], 0, 0, NONE_CONNECTION_TYPE
);
4801 static inline int bnx2x_q_send_terminate(struct bnx2x
*bp
,
4802 struct bnx2x_queue_state_params
*params
)
4804 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4805 u8 cid_index
= params
->params
.terminate
.cid_index
;
4807 if (cid_index
>= o
->max_cos
) {
4808 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4809 o
->cl_id
, cid_index
);
4813 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_TERMINATE
,
4814 o
->cids
[cid_index
], 0, 0, ETH_CONNECTION_TYPE
);
4817 static inline int bnx2x_q_send_empty(struct bnx2x
*bp
,
4818 struct bnx2x_queue_state_params
*params
)
4820 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4822 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_EMPTY
,
4823 o
->cids
[BNX2X_PRIMARY_CID_INDEX
], 0, 0,
4824 ETH_CONNECTION_TYPE
);
4827 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x
*bp
,
4828 struct bnx2x_queue_state_params
*params
)
4830 switch (params
->cmd
) {
4831 case BNX2X_Q_CMD_INIT
:
4832 return bnx2x_q_init(bp
, params
);
4833 case BNX2X_Q_CMD_SETUP_TX_ONLY
:
4834 return bnx2x_q_send_setup_tx_only(bp
, params
);
4835 case BNX2X_Q_CMD_DEACTIVATE
:
4836 return bnx2x_q_send_deactivate(bp
, params
);
4837 case BNX2X_Q_CMD_ACTIVATE
:
4838 return bnx2x_q_send_activate(bp
, params
);
4839 case BNX2X_Q_CMD_UPDATE
:
4840 return bnx2x_q_send_update(bp
, params
);
4841 case BNX2X_Q_CMD_UPDATE_TPA
:
4842 return bnx2x_q_send_update_tpa(bp
, params
);
4843 case BNX2X_Q_CMD_HALT
:
4844 return bnx2x_q_send_halt(bp
, params
);
4845 case BNX2X_Q_CMD_CFC_DEL
:
4846 return bnx2x_q_send_cfc_del(bp
, params
);
4847 case BNX2X_Q_CMD_TERMINATE
:
4848 return bnx2x_q_send_terminate(bp
, params
);
4849 case BNX2X_Q_CMD_EMPTY
:
4850 return bnx2x_q_send_empty(bp
, params
);
4852 BNX2X_ERR("Unknown command: %d\n", params
->cmd
);
4857 static int bnx2x_queue_send_cmd_e1x(struct bnx2x
*bp
,
4858 struct bnx2x_queue_state_params
*params
)
4860 switch (params
->cmd
) {
4861 case BNX2X_Q_CMD_SETUP
:
4862 return bnx2x_q_send_setup_e1x(bp
, params
);
4863 case BNX2X_Q_CMD_INIT
:
4864 case BNX2X_Q_CMD_SETUP_TX_ONLY
:
4865 case BNX2X_Q_CMD_DEACTIVATE
:
4866 case BNX2X_Q_CMD_ACTIVATE
:
4867 case BNX2X_Q_CMD_UPDATE
:
4868 case BNX2X_Q_CMD_UPDATE_TPA
:
4869 case BNX2X_Q_CMD_HALT
:
4870 case BNX2X_Q_CMD_CFC_DEL
:
4871 case BNX2X_Q_CMD_TERMINATE
:
4872 case BNX2X_Q_CMD_EMPTY
:
4873 return bnx2x_queue_send_cmd_cmn(bp
, params
);
4875 BNX2X_ERR("Unknown command: %d\n", params
->cmd
);
4880 static int bnx2x_queue_send_cmd_e2(struct bnx2x
*bp
,
4881 struct bnx2x_queue_state_params
*params
)
4883 switch (params
->cmd
) {
4884 case BNX2X_Q_CMD_SETUP
:
4885 return bnx2x_q_send_setup_e2(bp
, params
);
4886 case BNX2X_Q_CMD_INIT
:
4887 case BNX2X_Q_CMD_SETUP_TX_ONLY
:
4888 case BNX2X_Q_CMD_DEACTIVATE
:
4889 case BNX2X_Q_CMD_ACTIVATE
:
4890 case BNX2X_Q_CMD_UPDATE
:
4891 case BNX2X_Q_CMD_UPDATE_TPA
:
4892 case BNX2X_Q_CMD_HALT
:
4893 case BNX2X_Q_CMD_CFC_DEL
:
4894 case BNX2X_Q_CMD_TERMINATE
:
4895 case BNX2X_Q_CMD_EMPTY
:
4896 return bnx2x_queue_send_cmd_cmn(bp
, params
);
4898 BNX2X_ERR("Unknown command: %d\n", params
->cmd
);
4904 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4906 * @bp: device handle
4911 * It both checks if the requested command is legal in a current
4912 * state and, if it's legal, sets a `next_state' in the object
4913 * that will be used in the completion flow to set the `state'
4916 * returns 0 if a requested command is a legal transition,
4917 * -EINVAL otherwise.
4919 static int bnx2x_queue_chk_transition(struct bnx2x
*bp
,
4920 struct bnx2x_queue_sp_obj
*o
,
4921 struct bnx2x_queue_state_params
*params
)
4923 enum bnx2x_q_state state
= o
->state
, next_state
= BNX2X_Q_STATE_MAX
;
4924 enum bnx2x_queue_cmd cmd
= params
->cmd
;
4925 struct bnx2x_queue_update_params
*update_params
=
4926 ¶ms
->params
.update
;
4927 u8 next_tx_only
= o
->num_tx_only
;
4930 * Forget all pending for completion commands if a driver only state
4931 * transition has been requested.
4933 if (test_bit(RAMROD_DRV_CLR_ONLY
, ¶ms
->ramrod_flags
)) {
4935 o
->next_state
= BNX2X_Q_STATE_MAX
;
4939 * Don't allow a next state transition if we are in the middle of
4946 case BNX2X_Q_STATE_RESET
:
4947 if (cmd
== BNX2X_Q_CMD_INIT
)
4948 next_state
= BNX2X_Q_STATE_INITIALIZED
;
4951 case BNX2X_Q_STATE_INITIALIZED
:
4952 if (cmd
== BNX2X_Q_CMD_SETUP
) {
4953 if (test_bit(BNX2X_Q_FLG_ACTIVE
,
4954 ¶ms
->params
.setup
.flags
))
4955 next_state
= BNX2X_Q_STATE_ACTIVE
;
4957 next_state
= BNX2X_Q_STATE_INACTIVE
;
4961 case BNX2X_Q_STATE_ACTIVE
:
4962 if (cmd
== BNX2X_Q_CMD_DEACTIVATE
)
4963 next_state
= BNX2X_Q_STATE_INACTIVE
;
4965 else if ((cmd
== BNX2X_Q_CMD_EMPTY
) ||
4966 (cmd
== BNX2X_Q_CMD_UPDATE_TPA
))
4967 next_state
= BNX2X_Q_STATE_ACTIVE
;
4969 else if (cmd
== BNX2X_Q_CMD_SETUP_TX_ONLY
) {
4970 next_state
= BNX2X_Q_STATE_MULTI_COS
;
4974 else if (cmd
== BNX2X_Q_CMD_HALT
)
4975 next_state
= BNX2X_Q_STATE_STOPPED
;
4977 else if (cmd
== BNX2X_Q_CMD_UPDATE
) {
4978 /* If "active" state change is requested, update the
4979 * state accordingly.
4981 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG
,
4982 &update_params
->update_flags
) &&
4983 !test_bit(BNX2X_Q_UPDATE_ACTIVATE
,
4984 &update_params
->update_flags
))
4985 next_state
= BNX2X_Q_STATE_INACTIVE
;
4987 next_state
= BNX2X_Q_STATE_ACTIVE
;
4991 case BNX2X_Q_STATE_MULTI_COS
:
4992 if (cmd
== BNX2X_Q_CMD_TERMINATE
)
4993 next_state
= BNX2X_Q_STATE_MCOS_TERMINATED
;
4995 else if (cmd
== BNX2X_Q_CMD_SETUP_TX_ONLY
) {
4996 next_state
= BNX2X_Q_STATE_MULTI_COS
;
4997 next_tx_only
= o
->num_tx_only
+ 1;
5000 else if ((cmd
== BNX2X_Q_CMD_EMPTY
) ||
5001 (cmd
== BNX2X_Q_CMD_UPDATE_TPA
))
5002 next_state
= BNX2X_Q_STATE_MULTI_COS
;
5004 else if (cmd
== BNX2X_Q_CMD_UPDATE
) {
5005 /* If "active" state change is requested, update the
5006 * state accordingly.
5008 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG
,
5009 &update_params
->update_flags
) &&
5010 !test_bit(BNX2X_Q_UPDATE_ACTIVATE
,
5011 &update_params
->update_flags
))
5012 next_state
= BNX2X_Q_STATE_INACTIVE
;
5014 next_state
= BNX2X_Q_STATE_MULTI_COS
;
5018 case BNX2X_Q_STATE_MCOS_TERMINATED
:
5019 if (cmd
== BNX2X_Q_CMD_CFC_DEL
) {
5020 next_tx_only
= o
->num_tx_only
- 1;
5021 if (next_tx_only
== 0)
5022 next_state
= BNX2X_Q_STATE_ACTIVE
;
5024 next_state
= BNX2X_Q_STATE_MULTI_COS
;
5028 case BNX2X_Q_STATE_INACTIVE
:
5029 if (cmd
== BNX2X_Q_CMD_ACTIVATE
)
5030 next_state
= BNX2X_Q_STATE_ACTIVE
;
5032 else if ((cmd
== BNX2X_Q_CMD_EMPTY
) ||
5033 (cmd
== BNX2X_Q_CMD_UPDATE_TPA
))
5034 next_state
= BNX2X_Q_STATE_INACTIVE
;
5036 else if (cmd
== BNX2X_Q_CMD_HALT
)
5037 next_state
= BNX2X_Q_STATE_STOPPED
;
5039 else if (cmd
== BNX2X_Q_CMD_UPDATE
) {
5040 /* If "active" state change is requested, update the
5041 * state accordingly.
5043 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG
,
5044 &update_params
->update_flags
) &&
5045 test_bit(BNX2X_Q_UPDATE_ACTIVATE
,
5046 &update_params
->update_flags
)){
5047 if (o
->num_tx_only
== 0)
5048 next_state
= BNX2X_Q_STATE_ACTIVE
;
5049 else /* tx only queues exist for this queue */
5050 next_state
= BNX2X_Q_STATE_MULTI_COS
;
5052 next_state
= BNX2X_Q_STATE_INACTIVE
;
5056 case BNX2X_Q_STATE_STOPPED
:
5057 if (cmd
== BNX2X_Q_CMD_TERMINATE
)
5058 next_state
= BNX2X_Q_STATE_TERMINATED
;
5061 case BNX2X_Q_STATE_TERMINATED
:
5062 if (cmd
== BNX2X_Q_CMD_CFC_DEL
)
5063 next_state
= BNX2X_Q_STATE_RESET
;
5067 BNX2X_ERR("Illegal state: %d\n", state
);
5070 /* Transition is assured */
5071 if (next_state
!= BNX2X_Q_STATE_MAX
) {
5072 DP(BNX2X_MSG_SP
, "Good state transition: %d(%d)->%d\n",
5073 state
, cmd
, next_state
);
5074 o
->next_state
= next_state
;
5075 o
->next_tx_only
= next_tx_only
;
5079 DP(BNX2X_MSG_SP
, "Bad state transition request: %d %d\n", state
, cmd
);
5084 void bnx2x_init_queue_obj(struct bnx2x
*bp
,
5085 struct bnx2x_queue_sp_obj
*obj
,
5086 u8 cl_id
, u32
*cids
, u8 cid_cnt
, u8 func_id
,
5088 dma_addr_t rdata_mapping
, unsigned long type
)
5090 memset(obj
, 0, sizeof(*obj
));
5092 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5093 BUG_ON(BNX2X_MULTI_TX_COS
< cid_cnt
);
5095 memcpy(obj
->cids
, cids
, sizeof(obj
->cids
[0]) * cid_cnt
);
5096 obj
->max_cos
= cid_cnt
;
5098 obj
->func_id
= func_id
;
5100 obj
->rdata_mapping
= rdata_mapping
;
5102 obj
->next_state
= BNX2X_Q_STATE_MAX
;
5104 if (CHIP_IS_E1x(bp
))
5105 obj
->send_cmd
= bnx2x_queue_send_cmd_e1x
;
5107 obj
->send_cmd
= bnx2x_queue_send_cmd_e2
;
5109 obj
->check_transition
= bnx2x_queue_chk_transition
;
5111 obj
->complete_cmd
= bnx2x_queue_comp_cmd
;
5112 obj
->wait_comp
= bnx2x_queue_wait_comp
;
5113 obj
->set_pending
= bnx2x_queue_set_pending
;
5116 void bnx2x_queue_set_cos_cid(struct bnx2x
*bp
,
5117 struct bnx2x_queue_sp_obj
*obj
,
5120 obj
->cids
[index
] = cid
;
5123 /********************** Function state object *********************************/
5124 enum bnx2x_func_state
bnx2x_func_get_state(struct bnx2x
*bp
,
5125 struct bnx2x_func_sp_obj
*o
)
5127 /* in the middle of transaction - return INVALID state */
5129 return BNX2X_F_STATE_MAX
;
5132 * unsure the order of reading of o->pending and o->state
5133 * o->pending should be read first
5140 static int bnx2x_func_wait_comp(struct bnx2x
*bp
,
5141 struct bnx2x_func_sp_obj
*o
,
5142 enum bnx2x_func_cmd cmd
)
5144 return bnx2x_state_wait(bp
, cmd
, &o
->pending
);
5148 * bnx2x_func_state_change_comp - complete the state machine transition
5150 * @bp: device handle
5154 * Called on state change transition. Completes the state
5155 * machine transition only - no HW interaction.
5157 static inline int bnx2x_func_state_change_comp(struct bnx2x
*bp
,
5158 struct bnx2x_func_sp_obj
*o
,
5159 enum bnx2x_func_cmd cmd
)
5161 unsigned long cur_pending
= o
->pending
;
5163 if (!test_and_clear_bit(cmd
, &cur_pending
)) {
5164 BNX2X_ERR("Bad MC reply %d for func %d in state %d "
5165 "pending 0x%lx, next_state %d\n", cmd
, BP_FUNC(bp
),
5166 o
->state
, cur_pending
, o
->next_state
);
5170 DP(BNX2X_MSG_SP
, "Completing command %d for func %d, setting state to "
5171 "%d\n", cmd
, BP_FUNC(bp
), o
->next_state
);
5173 o
->state
= o
->next_state
;
5174 o
->next_state
= BNX2X_F_STATE_MAX
;
5176 /* It's important that o->state and o->next_state are
5177 * updated before o->pending.
5181 clear_bit(cmd
, &o
->pending
);
5182 smp_mb__after_clear_bit();
5188 * bnx2x_func_comp_cmd - complete the state change command
5190 * @bp: device handle
5194 * Checks that the arrived completion is expected.
5196 static int bnx2x_func_comp_cmd(struct bnx2x
*bp
,
5197 struct bnx2x_func_sp_obj
*o
,
5198 enum bnx2x_func_cmd cmd
)
5200 /* Complete the state machine part first, check if it's a
5203 int rc
= bnx2x_func_state_change_comp(bp
, o
, cmd
);
5208 * bnx2x_func_chk_transition - perform function state machine transition
5210 * @bp: device handle
5214 * It both checks if the requested command is legal in a current
5215 * state and, if it's legal, sets a `next_state' in the object
5216 * that will be used in the completion flow to set the `state'
5219 * returns 0 if a requested command is a legal transition,
5220 * -EINVAL otherwise.
5222 static int bnx2x_func_chk_transition(struct bnx2x
*bp
,
5223 struct bnx2x_func_sp_obj
*o
,
5224 struct bnx2x_func_state_params
*params
)
5226 enum bnx2x_func_state state
= o
->state
, next_state
= BNX2X_F_STATE_MAX
;
5227 enum bnx2x_func_cmd cmd
= params
->cmd
;
5230 * Forget all pending for completion commands if a driver only state
5231 * transition has been requested.
5233 if (test_bit(RAMROD_DRV_CLR_ONLY
, ¶ms
->ramrod_flags
)) {
5235 o
->next_state
= BNX2X_F_STATE_MAX
;
5239 * Don't allow a next state transition if we are in the middle of
5246 case BNX2X_F_STATE_RESET
:
5247 if (cmd
== BNX2X_F_CMD_HW_INIT
)
5248 next_state
= BNX2X_F_STATE_INITIALIZED
;
5251 case BNX2X_F_STATE_INITIALIZED
:
5252 if (cmd
== BNX2X_F_CMD_START
)
5253 next_state
= BNX2X_F_STATE_STARTED
;
5255 else if (cmd
== BNX2X_F_CMD_HW_RESET
)
5256 next_state
= BNX2X_F_STATE_RESET
;
5259 case BNX2X_F_STATE_STARTED
:
5260 if (cmd
== BNX2X_F_CMD_STOP
)
5261 next_state
= BNX2X_F_STATE_INITIALIZED
;
5262 else if (cmd
== BNX2X_F_CMD_TX_STOP
)
5263 next_state
= BNX2X_F_STATE_TX_STOPPED
;
5266 case BNX2X_F_STATE_TX_STOPPED
:
5267 if (cmd
== BNX2X_F_CMD_TX_START
)
5268 next_state
= BNX2X_F_STATE_STARTED
;
5272 BNX2X_ERR("Unknown state: %d\n", state
);
5275 /* Transition is assured */
5276 if (next_state
!= BNX2X_F_STATE_MAX
) {
5277 DP(BNX2X_MSG_SP
, "Good function state transition: %d(%d)->%d\n",
5278 state
, cmd
, next_state
);
5279 o
->next_state
= next_state
;
5283 DP(BNX2X_MSG_SP
, "Bad function state transition request: %d %d\n",
5290 * bnx2x_func_init_func - performs HW init at function stage
5292 * @bp: device handle
5295 * Init HW when the current phase is
5296 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5299 static inline int bnx2x_func_init_func(struct bnx2x
*bp
,
5300 const struct bnx2x_func_sp_drv_ops
*drv
)
5302 return drv
->init_hw_func(bp
);
5306 * bnx2x_func_init_port - performs HW init at port stage
5308 * @bp: device handle
5311 * Init HW when the current phase is
5312 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5313 * FUNCTION-only HW blocks.
5316 static inline int bnx2x_func_init_port(struct bnx2x
*bp
,
5317 const struct bnx2x_func_sp_drv_ops
*drv
)
5319 int rc
= drv
->init_hw_port(bp
);
5323 return bnx2x_func_init_func(bp
, drv
);
5327 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5329 * @bp: device handle
5332 * Init HW when the current phase is
5333 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5334 * PORT-only and FUNCTION-only HW blocks.
5336 static inline int bnx2x_func_init_cmn_chip(struct bnx2x
*bp
,
5337 const struct bnx2x_func_sp_drv_ops
*drv
)
5339 int rc
= drv
->init_hw_cmn_chip(bp
);
5343 return bnx2x_func_init_port(bp
, drv
);
5347 * bnx2x_func_init_cmn - performs HW init at common stage
5349 * @bp: device handle
5352 * Init HW when the current phase is
5353 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5354 * PORT-only and FUNCTION-only HW blocks.
5356 static inline int bnx2x_func_init_cmn(struct bnx2x
*bp
,
5357 const struct bnx2x_func_sp_drv_ops
*drv
)
5359 int rc
= drv
->init_hw_cmn(bp
);
5363 return bnx2x_func_init_port(bp
, drv
);
5366 static int bnx2x_func_hw_init(struct bnx2x
*bp
,
5367 struct bnx2x_func_state_params
*params
)
5369 u32 load_code
= params
->params
.hw_init
.load_phase
;
5370 struct bnx2x_func_sp_obj
*o
= params
->f_obj
;
5371 const struct bnx2x_func_sp_drv_ops
*drv
= o
->drv
;
5374 DP(BNX2X_MSG_SP
, "function %d load_code %x\n",
5375 BP_ABS_FUNC(bp
), load_code
);
5377 /* Prepare buffers for unzipping the FW */
5378 rc
= drv
->gunzip_init(bp
);
5383 rc
= drv
->init_fw(bp
);
5385 BNX2X_ERR("Error loading firmware\n");
5389 /* Handle the beginning of COMMON_XXX pases separatelly... */
5390 switch (load_code
) {
5391 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
:
5392 rc
= bnx2x_func_init_cmn_chip(bp
, drv
);
5397 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5398 rc
= bnx2x_func_init_cmn(bp
, drv
);
5403 case FW_MSG_CODE_DRV_LOAD_PORT
:
5404 rc
= bnx2x_func_init_port(bp
, drv
);
5409 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5410 rc
= bnx2x_func_init_func(bp
, drv
);
5416 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5421 drv
->release_fw(bp
);
5424 drv
->gunzip_end(bp
);
5426 /* In case of success, complete the comand immediatelly: no ramrods
5430 o
->complete_cmd(bp
, o
, BNX2X_F_CMD_HW_INIT
);
5436 * bnx2x_func_reset_func - reset HW at function stage
5438 * @bp: device handle
5441 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5442 * FUNCTION-only HW blocks.
5444 static inline void bnx2x_func_reset_func(struct bnx2x
*bp
,
5445 const struct bnx2x_func_sp_drv_ops
*drv
)
5447 drv
->reset_hw_func(bp
);
5451 * bnx2x_func_reset_port - reser HW at port stage
5453 * @bp: device handle
5456 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5457 * FUNCTION-only and PORT-only HW blocks.
5461 * It's important to call reset_port before reset_func() as the last thing
5462 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5463 * makes impossible any DMAE transactions.
5465 static inline void bnx2x_func_reset_port(struct bnx2x
*bp
,
5466 const struct bnx2x_func_sp_drv_ops
*drv
)
5468 drv
->reset_hw_port(bp
);
5469 bnx2x_func_reset_func(bp
, drv
);
5473 * bnx2x_func_reset_cmn - reser HW at common stage
5475 * @bp: device handle
5478 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5479 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5480 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5482 static inline void bnx2x_func_reset_cmn(struct bnx2x
*bp
,
5483 const struct bnx2x_func_sp_drv_ops
*drv
)
5485 bnx2x_func_reset_port(bp
, drv
);
5486 drv
->reset_hw_cmn(bp
);
5490 static inline int bnx2x_func_hw_reset(struct bnx2x
*bp
,
5491 struct bnx2x_func_state_params
*params
)
5493 u32 reset_phase
= params
->params
.hw_reset
.reset_phase
;
5494 struct bnx2x_func_sp_obj
*o
= params
->f_obj
;
5495 const struct bnx2x_func_sp_drv_ops
*drv
= o
->drv
;
5497 DP(BNX2X_MSG_SP
, "function %d reset_phase %x\n", BP_ABS_FUNC(bp
),
5500 switch (reset_phase
) {
5501 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
5502 bnx2x_func_reset_cmn(bp
, drv
);
5504 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
5505 bnx2x_func_reset_port(bp
, drv
);
5507 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
5508 bnx2x_func_reset_func(bp
, drv
);
5511 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5516 /* Complete the comand immediatelly: no ramrods have been sent. */
5517 o
->complete_cmd(bp
, o
, BNX2X_F_CMD_HW_RESET
);
5522 static inline int bnx2x_func_send_start(struct bnx2x
*bp
,
5523 struct bnx2x_func_state_params
*params
)
5525 struct bnx2x_func_sp_obj
*o
= params
->f_obj
;
5526 struct function_start_data
*rdata
=
5527 (struct function_start_data
*)o
->rdata
;
5528 dma_addr_t data_mapping
= o
->rdata_mapping
;
5529 struct bnx2x_func_start_params
*start_params
= ¶ms
->params
.start
;
5531 memset(rdata
, 0, sizeof(*rdata
));
5533 /* Fill the ramrod data with provided parameters */
5534 rdata
->function_mode
= cpu_to_le16(start_params
->mf_mode
);
5535 rdata
->sd_vlan_tag
= start_params
->sd_vlan_tag
;
5536 rdata
->path_id
= BP_PATH(bp
);
5537 rdata
->network_cos_mode
= start_params
->network_cos_mode
;
5540 * No need for an explicit memory barrier here as long we would
5541 * need to ensure the ordering of writing to the SPQ element
5542 * and updating of the SPQ producer which involves a memory
5543 * read and we will have to put a full memory barrier there
5544 * (inside bnx2x_sp_post()).
5547 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_FUNCTION_START
, 0,
5548 U64_HI(data_mapping
),
5549 U64_LO(data_mapping
), NONE_CONNECTION_TYPE
);
5552 static inline int bnx2x_func_send_stop(struct bnx2x
*bp
,
5553 struct bnx2x_func_state_params
*params
)
5555 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_FUNCTION_STOP
, 0, 0, 0,
5556 NONE_CONNECTION_TYPE
);
5559 static inline int bnx2x_func_send_tx_stop(struct bnx2x
*bp
,
5560 struct bnx2x_func_state_params
*params
)
5562 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC
, 0, 0, 0,
5563 NONE_CONNECTION_TYPE
);
5565 static inline int bnx2x_func_send_tx_start(struct bnx2x
*bp
,
5566 struct bnx2x_func_state_params
*params
)
5568 struct bnx2x_func_sp_obj
*o
= params
->f_obj
;
5569 struct flow_control_configuration
*rdata
=
5570 (struct flow_control_configuration
*)o
->rdata
;
5571 dma_addr_t data_mapping
= o
->rdata_mapping
;
5572 struct bnx2x_func_tx_start_params
*tx_start_params
=
5573 ¶ms
->params
.tx_start
;
5576 memset(rdata
, 0, sizeof(*rdata
));
5578 rdata
->dcb_enabled
= tx_start_params
->dcb_enabled
;
5579 rdata
->dcb_version
= tx_start_params
->dcb_version
;
5580 rdata
->dont_add_pri_0_en
= tx_start_params
->dont_add_pri_0_en
;
5582 for (i
= 0; i
< ARRAY_SIZE(rdata
->traffic_type_to_priority_cos
); i
++)
5583 rdata
->traffic_type_to_priority_cos
[i
] =
5584 tx_start_params
->traffic_type_to_priority_cos
[i
];
5586 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_START_TRAFFIC
, 0,
5587 U64_HI(data_mapping
),
5588 U64_LO(data_mapping
), NONE_CONNECTION_TYPE
);
5591 static int bnx2x_func_send_cmd(struct bnx2x
*bp
,
5592 struct bnx2x_func_state_params
*params
)
5594 switch (params
->cmd
) {
5595 case BNX2X_F_CMD_HW_INIT
:
5596 return bnx2x_func_hw_init(bp
, params
);
5597 case BNX2X_F_CMD_START
:
5598 return bnx2x_func_send_start(bp
, params
);
5599 case BNX2X_F_CMD_STOP
:
5600 return bnx2x_func_send_stop(bp
, params
);
5601 case BNX2X_F_CMD_HW_RESET
:
5602 return bnx2x_func_hw_reset(bp
, params
);
5603 case BNX2X_F_CMD_TX_STOP
:
5604 return bnx2x_func_send_tx_stop(bp
, params
);
5605 case BNX2X_F_CMD_TX_START
:
5606 return bnx2x_func_send_tx_start(bp
, params
);
5608 BNX2X_ERR("Unknown command: %d\n", params
->cmd
);
5613 void bnx2x_init_func_obj(struct bnx2x
*bp
,
5614 struct bnx2x_func_sp_obj
*obj
,
5615 void *rdata
, dma_addr_t rdata_mapping
,
5616 struct bnx2x_func_sp_drv_ops
*drv_iface
)
5618 memset(obj
, 0, sizeof(*obj
));
5620 mutex_init(&obj
->one_pending_mutex
);
5623 obj
->rdata_mapping
= rdata_mapping
;
5625 obj
->send_cmd
= bnx2x_func_send_cmd
;
5626 obj
->check_transition
= bnx2x_func_chk_transition
;
5627 obj
->complete_cmd
= bnx2x_func_comp_cmd
;
5628 obj
->wait_comp
= bnx2x_func_wait_comp
;
5630 obj
->drv
= drv_iface
;
5634 * bnx2x_func_state_change - perform Function state change transition
5636 * @bp: device handle
5637 * @params: parameters to perform the transaction
5639 * returns 0 in case of successfully completed transition,
5640 * negative error code in case of failure, positive
5641 * (EBUSY) value if there is a completion to that is
5642 * still pending (possible only if RAMROD_COMP_WAIT is
5643 * not set in params->ramrod_flags for asynchronous
5646 int bnx2x_func_state_change(struct bnx2x
*bp
,
5647 struct bnx2x_func_state_params
*params
)
5649 struct bnx2x_func_sp_obj
*o
= params
->f_obj
;
5651 enum bnx2x_func_cmd cmd
= params
->cmd
;
5652 unsigned long *pending
= &o
->pending
;
5654 mutex_lock(&o
->one_pending_mutex
);
5656 /* Check that the requested transition is legal */
5657 if (o
->check_transition(bp
, o
, params
)) {
5658 mutex_unlock(&o
->one_pending_mutex
);
5662 /* Set "pending" bit */
5663 set_bit(cmd
, pending
);
5665 /* Don't send a command if only driver cleanup was requested */
5666 if (test_bit(RAMROD_DRV_CLR_ONLY
, ¶ms
->ramrod_flags
)) {
5667 bnx2x_func_state_change_comp(bp
, o
, cmd
);
5668 mutex_unlock(&o
->one_pending_mutex
);
5671 rc
= o
->send_cmd(bp
, params
);
5673 mutex_unlock(&o
->one_pending_mutex
);
5676 o
->next_state
= BNX2X_F_STATE_MAX
;
5677 clear_bit(cmd
, pending
);
5678 smp_mb__after_clear_bit();
5682 if (test_bit(RAMROD_COMP_WAIT
, ¶ms
->ramrod_flags
)) {
5683 rc
= o
->wait_comp(bp
, o
, cmd
);
5691 return !!test_bit(cmd
, pending
);