bnx2x: use FW 7.8.17
[linux-2.6/btrfs-unstable.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_sp.c
blob66ab259080867cf627dc3451f699860c709975c3
1 /* bnx2x_sp.c: Broadcom Everest network driver.
3 * Copyright (c) 2011-2013 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
27 #include "bnx2x.h"
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_sp.h"
31 #define BNX2X_MAX_EMUL_MULTI 16
33 /**** Exe Queue interfaces ****/
35 /**
36 * bnx2x_exe_queue_init - init the Exe Queue object
38 * @o: poiter to the object
39 * @exe_len: length
40 * @owner: poiter to the owner
41 * @validate: validate function pointer
42 * @optimize: optimize function pointer
43 * @exec: execute function pointer
44 * @get: get function pointer
46 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47 struct bnx2x_exe_queue_obj *o,
48 int exe_len,
49 union bnx2x_qable_obj *owner,
50 exe_q_validate validate,
51 exe_q_remove remove,
52 exe_q_optimize optimize,
53 exe_q_execute exec,
54 exe_q_get get)
56 memset(o, 0, sizeof(*o));
58 INIT_LIST_HEAD(&o->exe_queue);
59 INIT_LIST_HEAD(&o->pending_comp);
61 spin_lock_init(&o->lock);
63 o->exe_chunk_len = exe_len;
64 o->owner = owner;
66 /* Owner specific callbacks */
67 o->validate = validate;
68 o->remove = remove;
69 o->optimize = optimize;
70 o->execute = exec;
71 o->get = get;
73 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
74 exe_len);
77 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78 struct bnx2x_exeq_elem *elem)
80 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
81 kfree(elem);
84 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
86 struct bnx2x_exeq_elem *elem;
87 int cnt = 0;
89 spin_lock_bh(&o->lock);
91 list_for_each_entry(elem, &o->exe_queue, link)
92 cnt++;
94 spin_unlock_bh(&o->lock);
96 return cnt;
99 /**
100 * bnx2x_exe_queue_add - add a new element to the execution queue
102 * @bp: driver handle
103 * @o: queue
104 * @cmd: new command to add
105 * @restore: true - do not optimize the command
107 * If the element is optimized or is illegal, frees it.
109 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110 struct bnx2x_exe_queue_obj *o,
111 struct bnx2x_exeq_elem *elem,
112 bool restore)
114 int rc;
116 spin_lock_bh(&o->lock);
118 if (!restore) {
119 /* Try to cancel this element queue */
120 rc = o->optimize(bp, o->owner, elem);
121 if (rc)
122 goto free_and_exit;
124 /* Check if this request is ok */
125 rc = o->validate(bp, o->owner, elem);
126 if (rc) {
127 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
128 goto free_and_exit;
132 /* If so, add it to the execution queue */
133 list_add_tail(&elem->link, &o->exe_queue);
135 spin_unlock_bh(&o->lock);
137 return 0;
139 free_and_exit:
140 bnx2x_exe_queue_free_elem(bp, elem);
142 spin_unlock_bh(&o->lock);
144 return rc;
148 static inline void __bnx2x_exe_queue_reset_pending(
149 struct bnx2x *bp,
150 struct bnx2x_exe_queue_obj *o)
152 struct bnx2x_exeq_elem *elem;
154 while (!list_empty(&o->pending_comp)) {
155 elem = list_first_entry(&o->pending_comp,
156 struct bnx2x_exeq_elem, link);
158 list_del(&elem->link);
159 bnx2x_exe_queue_free_elem(bp, elem);
163 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
164 struct bnx2x_exe_queue_obj *o)
167 spin_lock_bh(&o->lock);
169 __bnx2x_exe_queue_reset_pending(bp, o);
171 spin_unlock_bh(&o->lock);
176 * bnx2x_exe_queue_step - execute one execution chunk atomically
178 * @bp: driver handle
179 * @o: queue
180 * @ramrod_flags: flags
182 * (Atomicy is ensured using the exe_queue->lock).
184 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
185 struct bnx2x_exe_queue_obj *o,
186 unsigned long *ramrod_flags)
188 struct bnx2x_exeq_elem *elem, spacer;
189 int cur_len = 0, rc;
191 memset(&spacer, 0, sizeof(spacer));
193 spin_lock_bh(&o->lock);
196 * Next step should not be performed until the current is finished,
197 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
198 * properly clear object internals without sending any command to the FW
199 * which also implies there won't be any completion to clear the
200 * 'pending' list.
202 if (!list_empty(&o->pending_comp)) {
203 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
204 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
205 __bnx2x_exe_queue_reset_pending(bp, o);
206 } else {
207 spin_unlock_bh(&o->lock);
208 return 1;
213 * Run through the pending commands list and create a next
214 * execution chunk.
216 while (!list_empty(&o->exe_queue)) {
217 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
218 link);
219 WARN_ON(!elem->cmd_len);
221 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
222 cur_len += elem->cmd_len;
224 * Prevent from both lists being empty when moving an
225 * element. This will allow the call of
226 * bnx2x_exe_queue_empty() without locking.
228 list_add_tail(&spacer.link, &o->pending_comp);
229 mb();
230 list_move_tail(&elem->link, &o->pending_comp);
231 list_del(&spacer.link);
232 } else
233 break;
236 /* Sanity check */
237 if (!cur_len) {
238 spin_unlock_bh(&o->lock);
239 return 0;
242 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
243 if (rc < 0)
245 * In case of an error return the commands back to the queue
246 * and reset the pending_comp.
248 list_splice_init(&o->pending_comp, &o->exe_queue);
249 else if (!rc)
251 * If zero is returned, means there are no outstanding pending
252 * completions and we may dismiss the pending list.
254 __bnx2x_exe_queue_reset_pending(bp, o);
256 spin_unlock_bh(&o->lock);
257 return rc;
260 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
262 bool empty = list_empty(&o->exe_queue);
264 /* Don't reorder!!! */
265 mb();
267 return empty && list_empty(&o->pending_comp);
270 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
271 struct bnx2x *bp)
273 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
274 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
277 /************************ raw_obj functions ***********************************/
278 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
280 return !!test_bit(o->state, o->pstate);
283 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
285 smp_mb__before_clear_bit();
286 clear_bit(o->state, o->pstate);
287 smp_mb__after_clear_bit();
290 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
292 smp_mb__before_clear_bit();
293 set_bit(o->state, o->pstate);
294 smp_mb__after_clear_bit();
298 * bnx2x_state_wait - wait until the given bit(state) is cleared
300 * @bp: device handle
301 * @state: state which is to be cleared
302 * @state_p: state buffer
305 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
306 unsigned long *pstate)
308 /* can take a while if any port is running */
309 int cnt = 5000;
312 if (CHIP_REV_IS_EMUL(bp))
313 cnt *= 20;
315 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
317 might_sleep();
318 while (cnt--) {
319 if (!test_bit(state, pstate)) {
320 #ifdef BNX2X_STOP_ON_ERROR
321 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
322 #endif
323 return 0;
326 usleep_range(1000, 2000);
328 if (bp->panic)
329 return -EIO;
332 /* timeout! */
333 BNX2X_ERR("timeout waiting for state %d\n", state);
334 #ifdef BNX2X_STOP_ON_ERROR
335 bnx2x_panic();
336 #endif
338 return -EBUSY;
341 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
343 return bnx2x_state_wait(bp, raw->state, raw->pstate);
346 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
347 /* credit handling callbacks */
348 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
350 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
352 WARN_ON(!mp);
354 return mp->get_entry(mp, offset);
357 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
359 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
361 WARN_ON(!mp);
363 return mp->get(mp, 1);
366 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
368 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
370 WARN_ON(!vp);
372 return vp->get_entry(vp, offset);
375 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
377 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
379 WARN_ON(!vp);
381 return vp->get(vp, 1);
384 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
386 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
387 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
389 if (!mp->get(mp, 1))
390 return false;
392 if (!vp->get(vp, 1)) {
393 mp->put(mp, 1);
394 return false;
397 return true;
400 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
402 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
404 return mp->put_entry(mp, offset);
407 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
409 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
411 return mp->put(mp, 1);
414 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
416 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
418 return vp->put_entry(vp, offset);
421 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
423 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
425 return vp->put(vp, 1);
428 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
430 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
431 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
433 if (!mp->put(mp, 1))
434 return false;
436 if (!vp->put(vp, 1)) {
437 mp->get(mp, 1);
438 return false;
441 return true;
444 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
445 int n, u8 *base, u8 stride, u8 size)
447 struct bnx2x_vlan_mac_registry_elem *pos;
448 u8 *next = base;
449 int counter = 0;
451 /* traverse list */
452 list_for_each_entry(pos, &o->head, link) {
453 if (counter < n) {
454 memcpy(next, &pos->u, size);
455 counter++;
456 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
457 counter, next);
458 next += stride + size;
462 return counter * ETH_ALEN;
465 /* check_add() callbacks */
466 static int bnx2x_check_mac_add(struct bnx2x *bp,
467 struct bnx2x_vlan_mac_obj *o,
468 union bnx2x_classification_ramrod_data *data)
470 struct bnx2x_vlan_mac_registry_elem *pos;
472 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
474 if (!is_valid_ether_addr(data->mac.mac))
475 return -EINVAL;
477 /* Check if a requested MAC already exists */
478 list_for_each_entry(pos, &o->head, link)
479 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
480 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
481 return -EEXIST;
483 return 0;
486 static int bnx2x_check_vlan_add(struct bnx2x *bp,
487 struct bnx2x_vlan_mac_obj *o,
488 union bnx2x_classification_ramrod_data *data)
490 struct bnx2x_vlan_mac_registry_elem *pos;
492 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
494 list_for_each_entry(pos, &o->head, link)
495 if (data->vlan.vlan == pos->u.vlan.vlan)
496 return -EEXIST;
498 return 0;
501 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
502 struct bnx2x_vlan_mac_obj *o,
503 union bnx2x_classification_ramrod_data *data)
505 struct bnx2x_vlan_mac_registry_elem *pos;
507 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
508 data->vlan_mac.mac, data->vlan_mac.vlan);
510 list_for_each_entry(pos, &o->head, link)
511 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
512 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
513 ETH_ALEN)) &&
514 (data->vlan_mac.is_inner_mac ==
515 pos->u.vlan_mac.is_inner_mac))
516 return -EEXIST;
518 return 0;
522 /* check_del() callbacks */
523 static struct bnx2x_vlan_mac_registry_elem *
524 bnx2x_check_mac_del(struct bnx2x *bp,
525 struct bnx2x_vlan_mac_obj *o,
526 union bnx2x_classification_ramrod_data *data)
528 struct bnx2x_vlan_mac_registry_elem *pos;
530 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
532 list_for_each_entry(pos, &o->head, link)
533 if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
534 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
535 return pos;
537 return NULL;
540 static struct bnx2x_vlan_mac_registry_elem *
541 bnx2x_check_vlan_del(struct bnx2x *bp,
542 struct bnx2x_vlan_mac_obj *o,
543 union bnx2x_classification_ramrod_data *data)
545 struct bnx2x_vlan_mac_registry_elem *pos;
547 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
549 list_for_each_entry(pos, &o->head, link)
550 if (data->vlan.vlan == pos->u.vlan.vlan)
551 return pos;
553 return NULL;
556 static struct bnx2x_vlan_mac_registry_elem *
557 bnx2x_check_vlan_mac_del(struct bnx2x *bp,
558 struct bnx2x_vlan_mac_obj *o,
559 union bnx2x_classification_ramrod_data *data)
561 struct bnx2x_vlan_mac_registry_elem *pos;
563 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
564 data->vlan_mac.mac, data->vlan_mac.vlan);
566 list_for_each_entry(pos, &o->head, link)
567 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
568 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
569 ETH_ALEN)) &&
570 (data->vlan_mac.is_inner_mac ==
571 pos->u.vlan_mac.is_inner_mac))
572 return pos;
574 return NULL;
577 /* check_move() callback */
578 static bool bnx2x_check_move(struct bnx2x *bp,
579 struct bnx2x_vlan_mac_obj *src_o,
580 struct bnx2x_vlan_mac_obj *dst_o,
581 union bnx2x_classification_ramrod_data *data)
583 struct bnx2x_vlan_mac_registry_elem *pos;
584 int rc;
586 /* Check if we can delete the requested configuration from the first
587 * object.
589 pos = src_o->check_del(bp, src_o, data);
591 /* check if configuration can be added */
592 rc = dst_o->check_add(bp, dst_o, data);
594 /* If this classification can not be added (is already set)
595 * or can't be deleted - return an error.
597 if (rc || !pos)
598 return false;
600 return true;
603 static bool bnx2x_check_move_always_err(
604 struct bnx2x *bp,
605 struct bnx2x_vlan_mac_obj *src_o,
606 struct bnx2x_vlan_mac_obj *dst_o,
607 union bnx2x_classification_ramrod_data *data)
609 return false;
613 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
615 struct bnx2x_raw_obj *raw = &o->raw;
616 u8 rx_tx_flag = 0;
618 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
619 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
620 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
622 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
623 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
624 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
626 return rx_tx_flag;
630 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
631 bool add, unsigned char *dev_addr, int index)
633 u32 wb_data[2];
634 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
635 NIG_REG_LLH0_FUNC_MEM;
637 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
638 return;
640 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
641 return;
643 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
644 (add ? "ADD" : "DELETE"), index);
646 if (add) {
647 /* LLH_FUNC_MEM is a u64 WB register */
648 reg_offset += 8*index;
650 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
651 (dev_addr[4] << 8) | dev_addr[5]);
652 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
654 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
657 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
658 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
662 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
664 * @bp: device handle
665 * @o: queue for which we want to configure this rule
666 * @add: if true the command is an ADD command, DEL otherwise
667 * @opcode: CLASSIFY_RULE_OPCODE_XXX
668 * @hdr: pointer to a header to setup
671 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
672 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
673 struct eth_classify_cmd_header *hdr)
675 struct bnx2x_raw_obj *raw = &o->raw;
677 hdr->client_id = raw->cl_id;
678 hdr->func_id = raw->func_id;
680 /* Rx or/and Tx (internal switching) configuration ? */
681 hdr->cmd_general_data |=
682 bnx2x_vlan_mac_get_rx_tx_flag(o);
684 if (add)
685 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
687 hdr->cmd_general_data |=
688 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
692 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
694 * @cid: connection id
695 * @type: BNX2X_FILTER_XXX_PENDING
696 * @hdr: poiter to header to setup
697 * @rule_cnt:
699 * currently we always configure one rule and echo field to contain a CID and an
700 * opcode type.
702 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
703 struct eth_classify_header *hdr, int rule_cnt)
705 hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
706 (type << BNX2X_SWCID_SHIFT));
707 hdr->rule_cnt = (u8)rule_cnt;
711 /* hw_config() callbacks */
712 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
713 struct bnx2x_vlan_mac_obj *o,
714 struct bnx2x_exeq_elem *elem, int rule_idx,
715 int cam_offset)
717 struct bnx2x_raw_obj *raw = &o->raw;
718 struct eth_classify_rules_ramrod_data *data =
719 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
720 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
721 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
722 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
723 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
724 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
727 * Set LLH CAM entry: currently only iSCSI and ETH macs are
728 * relevant. In addition, current implementation is tuned for a
729 * single ETH MAC.
731 * When multiple unicast ETH MACs PF configuration in switch
732 * independent mode is required (NetQ, multiple netdev MACs,
733 * etc.), consider better utilisation of 8 per function MAC
734 * entries in the LLH register. There is also
735 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
736 * total number of CAM entries to 16.
738 * Currently we won't configure NIG for MACs other than a primary ETH
739 * MAC and iSCSI L2 MAC.
741 * If this MAC is moving from one Queue to another, no need to change
742 * NIG configuration.
744 if (cmd != BNX2X_VLAN_MAC_MOVE) {
745 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
746 bnx2x_set_mac_in_nig(bp, add, mac,
747 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
748 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
749 bnx2x_set_mac_in_nig(bp, add, mac,
750 BNX2X_LLH_CAM_ETH_LINE);
753 /* Reset the ramrod data buffer for the first rule */
754 if (rule_idx == 0)
755 memset(data, 0, sizeof(*data));
757 /* Setup a command header */
758 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
759 &rule_entry->mac.header);
761 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
762 (add ? "add" : "delete"), mac, raw->cl_id);
764 /* Set a MAC itself */
765 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
766 &rule_entry->mac.mac_mid,
767 &rule_entry->mac.mac_lsb, mac);
768 rule_entry->mac.inner_mac =
769 cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
771 /* MOVE: Add a rule that will add this MAC to the target Queue */
772 if (cmd == BNX2X_VLAN_MAC_MOVE) {
773 rule_entry++;
774 rule_cnt++;
776 /* Setup ramrod data */
777 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
778 elem->cmd_data.vlan_mac.target_obj,
779 true, CLASSIFY_RULE_OPCODE_MAC,
780 &rule_entry->mac.header);
782 /* Set a MAC itself */
783 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
784 &rule_entry->mac.mac_mid,
785 &rule_entry->mac.mac_lsb, mac);
786 rule_entry->mac.inner_mac =
787 cpu_to_le16(elem->cmd_data.vlan_mac.
788 u.mac.is_inner_mac);
791 /* Set the ramrod data header */
792 /* TODO: take this to the higher level in order to prevent multiple
793 writing */
794 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
795 rule_cnt);
799 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
801 * @bp: device handle
802 * @o: queue
803 * @type:
804 * @cam_offset: offset in cam memory
805 * @hdr: pointer to a header to setup
807 * E1/E1H
809 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
810 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
811 struct mac_configuration_hdr *hdr)
813 struct bnx2x_raw_obj *r = &o->raw;
815 hdr->length = 1;
816 hdr->offset = (u8)cam_offset;
817 hdr->client_id = cpu_to_le16(0xff);
818 hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
819 (type << BNX2X_SWCID_SHIFT));
822 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
823 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
824 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
826 struct bnx2x_raw_obj *r = &o->raw;
827 u32 cl_bit_vec = (1 << r->cl_id);
829 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
830 cfg_entry->pf_id = r->func_id;
831 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
833 if (add) {
834 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
835 T_ETH_MAC_COMMAND_SET);
836 SET_FLAG(cfg_entry->flags,
837 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
839 /* Set a MAC in a ramrod data */
840 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
841 &cfg_entry->middle_mac_addr,
842 &cfg_entry->lsb_mac_addr, mac);
843 } else
844 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
845 T_ETH_MAC_COMMAND_INVALIDATE);
848 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
849 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
850 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
852 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
853 struct bnx2x_raw_obj *raw = &o->raw;
855 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
856 &config->hdr);
857 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
858 cfg_entry);
860 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
861 (add ? "setting" : "clearing"),
862 mac, raw->cl_id, cam_offset);
866 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
868 * @bp: device handle
869 * @o: bnx2x_vlan_mac_obj
870 * @elem: bnx2x_exeq_elem
871 * @rule_idx: rule_idx
872 * @cam_offset: cam_offset
874 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
875 struct bnx2x_vlan_mac_obj *o,
876 struct bnx2x_exeq_elem *elem, int rule_idx,
877 int cam_offset)
879 struct bnx2x_raw_obj *raw = &o->raw;
880 struct mac_configuration_cmd *config =
881 (struct mac_configuration_cmd *)(raw->rdata);
883 * 57710 and 57711 do not support MOVE command,
884 * so it's either ADD or DEL
886 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
887 true : false;
889 /* Reset the ramrod data buffer */
890 memset(config, 0, sizeof(*config));
892 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
893 cam_offset, add,
894 elem->cmd_data.vlan_mac.u.mac.mac, 0,
895 ETH_VLAN_FILTER_ANY_VLAN, config);
898 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
899 struct bnx2x_vlan_mac_obj *o,
900 struct bnx2x_exeq_elem *elem, int rule_idx,
901 int cam_offset)
903 struct bnx2x_raw_obj *raw = &o->raw;
904 struct eth_classify_rules_ramrod_data *data =
905 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
906 int rule_cnt = rule_idx + 1;
907 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
908 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
909 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
910 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
912 /* Reset the ramrod data buffer for the first rule */
913 if (rule_idx == 0)
914 memset(data, 0, sizeof(*data));
916 /* Set a rule header */
917 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
918 &rule_entry->vlan.header);
920 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
921 vlan);
923 /* Set a VLAN itself */
924 rule_entry->vlan.vlan = cpu_to_le16(vlan);
926 /* MOVE: Add a rule that will add this MAC to the target Queue */
927 if (cmd == BNX2X_VLAN_MAC_MOVE) {
928 rule_entry++;
929 rule_cnt++;
931 /* Setup ramrod data */
932 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
933 elem->cmd_data.vlan_mac.target_obj,
934 true, CLASSIFY_RULE_OPCODE_VLAN,
935 &rule_entry->vlan.header);
937 /* Set a VLAN itself */
938 rule_entry->vlan.vlan = cpu_to_le16(vlan);
941 /* Set the ramrod data header */
942 /* TODO: take this to the higher level in order to prevent multiple
943 writing */
944 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
945 rule_cnt);
948 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
949 struct bnx2x_vlan_mac_obj *o,
950 struct bnx2x_exeq_elem *elem,
951 int rule_idx, int cam_offset)
953 struct bnx2x_raw_obj *raw = &o->raw;
954 struct eth_classify_rules_ramrod_data *data =
955 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
956 int rule_cnt = rule_idx + 1;
957 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
958 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
959 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
960 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
961 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
964 /* Reset the ramrod data buffer for the first rule */
965 if (rule_idx == 0)
966 memset(data, 0, sizeof(*data));
968 /* Set a rule header */
969 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
970 &rule_entry->pair.header);
972 /* Set VLAN and MAC themselvs */
973 rule_entry->pair.vlan = cpu_to_le16(vlan);
974 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
975 &rule_entry->pair.mac_mid,
976 &rule_entry->pair.mac_lsb, mac);
977 rule_entry->pair.inner_mac =
978 cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
979 /* MOVE: Add a rule that will add this MAC to the target Queue */
980 if (cmd == BNX2X_VLAN_MAC_MOVE) {
981 rule_entry++;
982 rule_cnt++;
984 /* Setup ramrod data */
985 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
986 elem->cmd_data.vlan_mac.target_obj,
987 true, CLASSIFY_RULE_OPCODE_PAIR,
988 &rule_entry->pair.header);
990 /* Set a VLAN itself */
991 rule_entry->pair.vlan = cpu_to_le16(vlan);
992 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
993 &rule_entry->pair.mac_mid,
994 &rule_entry->pair.mac_lsb, mac);
995 rule_entry->pair.inner_mac =
996 cpu_to_le16(elem->cmd_data.vlan_mac.u.
997 vlan_mac.is_inner_mac);
1000 /* Set the ramrod data header */
1001 /* TODO: take this to the higher level in order to prevent multiple
1002 writing */
1003 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1004 rule_cnt);
1008 * bnx2x_set_one_vlan_mac_e1h -
1010 * @bp: device handle
1011 * @o: bnx2x_vlan_mac_obj
1012 * @elem: bnx2x_exeq_elem
1013 * @rule_idx: rule_idx
1014 * @cam_offset: cam_offset
1016 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1017 struct bnx2x_vlan_mac_obj *o,
1018 struct bnx2x_exeq_elem *elem,
1019 int rule_idx, int cam_offset)
1021 struct bnx2x_raw_obj *raw = &o->raw;
1022 struct mac_configuration_cmd *config =
1023 (struct mac_configuration_cmd *)(raw->rdata);
1025 * 57710 and 57711 do not support MOVE command,
1026 * so it's either ADD or DEL
1028 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1029 true : false;
1031 /* Reset the ramrod data buffer */
1032 memset(config, 0, sizeof(*config));
1034 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1035 cam_offset, add,
1036 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1037 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1038 ETH_VLAN_FILTER_CLASSIFY, config);
1041 #define list_next_entry(pos, member) \
1042 list_entry((pos)->member.next, typeof(*(pos)), member)
1045 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1047 * @bp: device handle
1048 * @p: command parameters
1049 * @ppos: pointer to the cooky
1051 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1052 * previously configured elements list.
1054 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1055 * into an account
1057 * pointer to the cooky - that should be given back in the next call to make
1058 * function handle the next element. If *ppos is set to NULL it will restart the
1059 * iterator. If returned *ppos == NULL this means that the last element has been
1060 * handled.
1063 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1064 struct bnx2x_vlan_mac_ramrod_params *p,
1065 struct bnx2x_vlan_mac_registry_elem **ppos)
1067 struct bnx2x_vlan_mac_registry_elem *pos;
1068 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1070 /* If list is empty - there is nothing to do here */
1071 if (list_empty(&o->head)) {
1072 *ppos = NULL;
1073 return 0;
1076 /* make a step... */
1077 if (*ppos == NULL)
1078 *ppos = list_first_entry(&o->head,
1079 struct bnx2x_vlan_mac_registry_elem,
1080 link);
1081 else
1082 *ppos = list_next_entry(*ppos, link);
1084 pos = *ppos;
1086 /* If it's the last step - return NULL */
1087 if (list_is_last(&pos->link, &o->head))
1088 *ppos = NULL;
1090 /* Prepare a 'user_req' */
1091 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1093 /* Set the command */
1094 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1096 /* Set vlan_mac_flags */
1097 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1099 /* Set a restore bit */
1100 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1102 return bnx2x_config_vlan_mac(bp, p);
1106 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1107 * pointer to an element with a specific criteria and NULL if such an element
1108 * hasn't been found.
1110 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1111 struct bnx2x_exe_queue_obj *o,
1112 struct bnx2x_exeq_elem *elem)
1114 struct bnx2x_exeq_elem *pos;
1115 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1117 /* Check pending for execution commands */
1118 list_for_each_entry(pos, &o->exe_queue, link)
1119 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1120 sizeof(*data)) &&
1121 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1122 return pos;
1124 return NULL;
1127 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1128 struct bnx2x_exe_queue_obj *o,
1129 struct bnx2x_exeq_elem *elem)
1131 struct bnx2x_exeq_elem *pos;
1132 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1134 /* Check pending for execution commands */
1135 list_for_each_entry(pos, &o->exe_queue, link)
1136 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1137 sizeof(*data)) &&
1138 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1139 return pos;
1141 return NULL;
1144 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1145 struct bnx2x_exe_queue_obj *o,
1146 struct bnx2x_exeq_elem *elem)
1148 struct bnx2x_exeq_elem *pos;
1149 struct bnx2x_vlan_mac_ramrod_data *data =
1150 &elem->cmd_data.vlan_mac.u.vlan_mac;
1152 /* Check pending for execution commands */
1153 list_for_each_entry(pos, &o->exe_queue, link)
1154 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1155 sizeof(*data)) &&
1156 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1157 return pos;
1159 return NULL;
1163 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1165 * @bp: device handle
1166 * @qo: bnx2x_qable_obj
1167 * @elem: bnx2x_exeq_elem
1169 * Checks that the requested configuration can be added. If yes and if
1170 * requested, consume CAM credit.
1172 * The 'validate' is run after the 'optimize'.
1175 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1176 union bnx2x_qable_obj *qo,
1177 struct bnx2x_exeq_elem *elem)
1179 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1180 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1181 int rc;
1183 /* Check the registry */
1184 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1185 if (rc) {
1186 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1187 return rc;
1191 * Check if there is a pending ADD command for this
1192 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1194 if (exeq->get(exeq, elem)) {
1195 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1196 return -EEXIST;
1200 * TODO: Check the pending MOVE from other objects where this
1201 * object is a destination object.
1204 /* Consume the credit if not requested not to */
1205 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1206 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1207 o->get_credit(o)))
1208 return -EINVAL;
1210 return 0;
1214 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1216 * @bp: device handle
1217 * @qo: quable object to check
1218 * @elem: element that needs to be deleted
1220 * Checks that the requested configuration can be deleted. If yes and if
1221 * requested, returns a CAM credit.
1223 * The 'validate' is run after the 'optimize'.
1225 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1226 union bnx2x_qable_obj *qo,
1227 struct bnx2x_exeq_elem *elem)
1229 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1230 struct bnx2x_vlan_mac_registry_elem *pos;
1231 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1232 struct bnx2x_exeq_elem query_elem;
1234 /* If this classification can not be deleted (doesn't exist)
1235 * - return a BNX2X_EXIST.
1237 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1238 if (!pos) {
1239 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1240 return -EEXIST;
1244 * Check if there are pending DEL or MOVE commands for this
1245 * MAC/VLAN/VLAN-MAC. Return an error if so.
1247 memcpy(&query_elem, elem, sizeof(query_elem));
1249 /* Check for MOVE commands */
1250 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1251 if (exeq->get(exeq, &query_elem)) {
1252 BNX2X_ERR("There is a pending MOVE command already\n");
1253 return -EINVAL;
1256 /* Check for DEL commands */
1257 if (exeq->get(exeq, elem)) {
1258 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1259 return -EEXIST;
1262 /* Return the credit to the credit pool if not requested not to */
1263 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1264 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1265 o->put_credit(o))) {
1266 BNX2X_ERR("Failed to return a credit\n");
1267 return -EINVAL;
1270 return 0;
1274 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1276 * @bp: device handle
1277 * @qo: quable object to check (source)
1278 * @elem: element that needs to be moved
1280 * Checks that the requested configuration can be moved. If yes and if
1281 * requested, returns a CAM credit.
1283 * The 'validate' is run after the 'optimize'.
1285 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1286 union bnx2x_qable_obj *qo,
1287 struct bnx2x_exeq_elem *elem)
1289 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1290 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1291 struct bnx2x_exeq_elem query_elem;
1292 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1293 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1296 * Check if we can perform this operation based on the current registry
1297 * state.
1299 if (!src_o->check_move(bp, src_o, dest_o,
1300 &elem->cmd_data.vlan_mac.u)) {
1301 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1302 return -EINVAL;
1306 * Check if there is an already pending DEL or MOVE command for the
1307 * source object or ADD command for a destination object. Return an
1308 * error if so.
1310 memcpy(&query_elem, elem, sizeof(query_elem));
1312 /* Check DEL on source */
1313 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1314 if (src_exeq->get(src_exeq, &query_elem)) {
1315 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1316 return -EINVAL;
1319 /* Check MOVE on source */
1320 if (src_exeq->get(src_exeq, elem)) {
1321 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1322 return -EEXIST;
1325 /* Check ADD on destination */
1326 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1327 if (dest_exeq->get(dest_exeq, &query_elem)) {
1328 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1329 return -EINVAL;
1332 /* Consume the credit if not requested not to */
1333 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1334 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1335 dest_o->get_credit(dest_o)))
1336 return -EINVAL;
1338 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1339 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1340 src_o->put_credit(src_o))) {
1341 /* return the credit taken from dest... */
1342 dest_o->put_credit(dest_o);
1343 return -EINVAL;
1346 return 0;
1349 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1350 union bnx2x_qable_obj *qo,
1351 struct bnx2x_exeq_elem *elem)
1353 switch (elem->cmd_data.vlan_mac.cmd) {
1354 case BNX2X_VLAN_MAC_ADD:
1355 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1356 case BNX2X_VLAN_MAC_DEL:
1357 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1358 case BNX2X_VLAN_MAC_MOVE:
1359 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1360 default:
1361 return -EINVAL;
1365 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1366 union bnx2x_qable_obj *qo,
1367 struct bnx2x_exeq_elem *elem)
1369 int rc = 0;
1371 /* If consumption wasn't required, nothing to do */
1372 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1373 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1374 return 0;
1376 switch (elem->cmd_data.vlan_mac.cmd) {
1377 case BNX2X_VLAN_MAC_ADD:
1378 case BNX2X_VLAN_MAC_MOVE:
1379 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1380 break;
1381 case BNX2X_VLAN_MAC_DEL:
1382 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1383 break;
1384 default:
1385 return -EINVAL;
1388 if (rc != true)
1389 return -EINVAL;
1391 return 0;
1395 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1397 * @bp: device handle
1398 * @o: bnx2x_vlan_mac_obj
1401 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1402 struct bnx2x_vlan_mac_obj *o)
1404 int cnt = 5000, rc;
1405 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1406 struct bnx2x_raw_obj *raw = &o->raw;
1408 while (cnt--) {
1409 /* Wait for the current command to complete */
1410 rc = raw->wait_comp(bp, raw);
1411 if (rc)
1412 return rc;
1414 /* Wait until there are no pending commands */
1415 if (!bnx2x_exe_queue_empty(exeq))
1416 usleep_range(1000, 2000);
1417 else
1418 return 0;
1421 return -EBUSY;
1425 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1427 * @bp: device handle
1428 * @o: bnx2x_vlan_mac_obj
1429 * @cqe:
1430 * @cont: if true schedule next execution chunk
1433 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1434 struct bnx2x_vlan_mac_obj *o,
1435 union event_ring_elem *cqe,
1436 unsigned long *ramrod_flags)
1438 struct bnx2x_raw_obj *r = &o->raw;
1439 int rc;
1441 /* Reset pending list */
1442 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1444 /* Clear pending */
1445 r->clear_pending(r);
1447 /* If ramrod failed this is most likely a SW bug */
1448 if (cqe->message.error)
1449 return -EINVAL;
1451 /* Run the next bulk of pending commands if requested */
1452 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1453 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1454 if (rc < 0)
1455 return rc;
1458 /* If there is more work to do return PENDING */
1459 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1460 return 1;
1462 return 0;
1466 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1468 * @bp: device handle
1469 * @o: bnx2x_qable_obj
1470 * @elem: bnx2x_exeq_elem
1472 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1473 union bnx2x_qable_obj *qo,
1474 struct bnx2x_exeq_elem *elem)
1476 struct bnx2x_exeq_elem query, *pos;
1477 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1478 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1480 memcpy(&query, elem, sizeof(query));
1482 switch (elem->cmd_data.vlan_mac.cmd) {
1483 case BNX2X_VLAN_MAC_ADD:
1484 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1485 break;
1486 case BNX2X_VLAN_MAC_DEL:
1487 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1488 break;
1489 default:
1490 /* Don't handle anything other than ADD or DEL */
1491 return 0;
1494 /* If we found the appropriate element - delete it */
1495 pos = exeq->get(exeq, &query);
1496 if (pos) {
1498 /* Return the credit of the optimized command */
1499 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1500 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1501 if ((query.cmd_data.vlan_mac.cmd ==
1502 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1503 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1504 return -EINVAL;
1505 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1506 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1507 return -EINVAL;
1511 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1512 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1513 "ADD" : "DEL");
1515 list_del(&pos->link);
1516 bnx2x_exe_queue_free_elem(bp, pos);
1517 return 1;
1520 return 0;
1524 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1526 * @bp: device handle
1527 * @o:
1528 * @elem:
1529 * @restore:
1530 * @re:
1532 * prepare a registry element according to the current command request.
1534 static inline int bnx2x_vlan_mac_get_registry_elem(
1535 struct bnx2x *bp,
1536 struct bnx2x_vlan_mac_obj *o,
1537 struct bnx2x_exeq_elem *elem,
1538 bool restore,
1539 struct bnx2x_vlan_mac_registry_elem **re)
1541 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1542 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1544 /* Allocate a new registry element if needed. */
1545 if (!restore &&
1546 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1547 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1548 if (!reg_elem)
1549 return -ENOMEM;
1551 /* Get a new CAM offset */
1552 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1554 * This shell never happen, because we have checked the
1555 * CAM availiability in the 'validate'.
1557 WARN_ON(1);
1558 kfree(reg_elem);
1559 return -EINVAL;
1562 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1564 /* Set a VLAN-MAC data */
1565 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1566 sizeof(reg_elem->u));
1568 /* Copy the flags (needed for DEL and RESTORE flows) */
1569 reg_elem->vlan_mac_flags =
1570 elem->cmd_data.vlan_mac.vlan_mac_flags;
1571 } else /* DEL, RESTORE */
1572 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1574 *re = reg_elem;
1575 return 0;
1579 * bnx2x_execute_vlan_mac - execute vlan mac command
1581 * @bp: device handle
1582 * @qo:
1583 * @exe_chunk:
1584 * @ramrod_flags:
1586 * go and send a ramrod!
1588 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1589 union bnx2x_qable_obj *qo,
1590 struct list_head *exe_chunk,
1591 unsigned long *ramrod_flags)
1593 struct bnx2x_exeq_elem *elem;
1594 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1595 struct bnx2x_raw_obj *r = &o->raw;
1596 int rc, idx = 0;
1597 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1598 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1599 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1600 enum bnx2x_vlan_mac_cmd cmd;
1603 * If DRIVER_ONLY execution is requested, cleanup a registry
1604 * and exit. Otherwise send a ramrod to FW.
1606 if (!drv_only) {
1607 WARN_ON(r->check_pending(r));
1609 /* Set pending */
1610 r->set_pending(r);
1612 /* Fill tha ramrod data */
1613 list_for_each_entry(elem, exe_chunk, link) {
1614 cmd = elem->cmd_data.vlan_mac.cmd;
1616 * We will add to the target object in MOVE command, so
1617 * change the object for a CAM search.
1619 if (cmd == BNX2X_VLAN_MAC_MOVE)
1620 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1621 else
1622 cam_obj = o;
1624 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1625 elem, restore,
1626 &reg_elem);
1627 if (rc)
1628 goto error_exit;
1630 WARN_ON(!reg_elem);
1632 /* Push a new entry into the registry */
1633 if (!restore &&
1634 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1635 (cmd == BNX2X_VLAN_MAC_MOVE)))
1636 list_add(&reg_elem->link, &cam_obj->head);
1638 /* Configure a single command in a ramrod data buffer */
1639 o->set_one_rule(bp, o, elem, idx,
1640 reg_elem->cam_offset);
1642 /* MOVE command consumes 2 entries in the ramrod data */
1643 if (cmd == BNX2X_VLAN_MAC_MOVE)
1644 idx += 2;
1645 else
1646 idx++;
1650 * No need for an explicit memory barrier here as long we would
1651 * need to ensure the ordering of writing to the SPQ element
1652 * and updating of the SPQ producer which involves a memory
1653 * read and we will have to put a full memory barrier there
1654 * (inside bnx2x_sp_post()).
1657 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1658 U64_HI(r->rdata_mapping),
1659 U64_LO(r->rdata_mapping),
1660 ETH_CONNECTION_TYPE);
1661 if (rc)
1662 goto error_exit;
1665 /* Now, when we are done with the ramrod - clean up the registry */
1666 list_for_each_entry(elem, exe_chunk, link) {
1667 cmd = elem->cmd_data.vlan_mac.cmd;
1668 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1669 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1670 reg_elem = o->check_del(bp, o,
1671 &elem->cmd_data.vlan_mac.u);
1673 WARN_ON(!reg_elem);
1675 o->put_cam_offset(o, reg_elem->cam_offset);
1676 list_del(&reg_elem->link);
1677 kfree(reg_elem);
1681 if (!drv_only)
1682 return 1;
1683 else
1684 return 0;
1686 error_exit:
1687 r->clear_pending(r);
1689 /* Cleanup a registry in case of a failure */
1690 list_for_each_entry(elem, exe_chunk, link) {
1691 cmd = elem->cmd_data.vlan_mac.cmd;
1693 if (cmd == BNX2X_VLAN_MAC_MOVE)
1694 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1695 else
1696 cam_obj = o;
1698 /* Delete all newly added above entries */
1699 if (!restore &&
1700 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1701 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1702 reg_elem = o->check_del(bp, cam_obj,
1703 &elem->cmd_data.vlan_mac.u);
1704 if (reg_elem) {
1705 list_del(&reg_elem->link);
1706 kfree(reg_elem);
1711 return rc;
1714 static inline int bnx2x_vlan_mac_push_new_cmd(
1715 struct bnx2x *bp,
1716 struct bnx2x_vlan_mac_ramrod_params *p)
1718 struct bnx2x_exeq_elem *elem;
1719 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1720 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1722 /* Allocate the execution queue element */
1723 elem = bnx2x_exe_queue_alloc_elem(bp);
1724 if (!elem)
1725 return -ENOMEM;
1727 /* Set the command 'length' */
1728 switch (p->user_req.cmd) {
1729 case BNX2X_VLAN_MAC_MOVE:
1730 elem->cmd_len = 2;
1731 break;
1732 default:
1733 elem->cmd_len = 1;
1736 /* Fill the object specific info */
1737 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1739 /* Try to add a new command to the pending list */
1740 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1744 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1746 * @bp: device handle
1747 * @p:
1750 int bnx2x_config_vlan_mac(
1751 struct bnx2x *bp,
1752 struct bnx2x_vlan_mac_ramrod_params *p)
1754 int rc = 0;
1755 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1756 unsigned long *ramrod_flags = &p->ramrod_flags;
1757 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1758 struct bnx2x_raw_obj *raw = &o->raw;
1761 * Add new elements to the execution list for commands that require it.
1763 if (!cont) {
1764 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1765 if (rc)
1766 return rc;
1770 * If nothing will be executed further in this iteration we want to
1771 * return PENDING if there are pending commands
1773 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1774 rc = 1;
1776 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1777 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1778 raw->clear_pending(raw);
1781 /* Execute commands if required */
1782 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1783 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1784 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1785 if (rc < 0)
1786 return rc;
1790 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1791 * then user want to wait until the last command is done.
1793 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1795 * Wait maximum for the current exe_queue length iterations plus
1796 * one (for the current pending command).
1798 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1800 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1801 max_iterations--) {
1803 /* Wait for the current command to complete */
1804 rc = raw->wait_comp(bp, raw);
1805 if (rc)
1806 return rc;
1808 /* Make a next step */
1809 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1810 ramrod_flags);
1811 if (rc < 0)
1812 return rc;
1815 return 0;
1818 return rc;
1824 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1826 * @bp: device handle
1827 * @o:
1828 * @vlan_mac_flags:
1829 * @ramrod_flags: execution flags to be used for this deletion
1831 * if the last operation has completed successfully and there are no
1832 * moreelements left, positive value if the last operation has completed
1833 * successfully and there are more previously configured elements, negative
1834 * value is current operation has failed.
1836 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1837 struct bnx2x_vlan_mac_obj *o,
1838 unsigned long *vlan_mac_flags,
1839 unsigned long *ramrod_flags)
1841 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1842 int rc = 0;
1843 struct bnx2x_vlan_mac_ramrod_params p;
1844 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1845 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1847 /* Clear pending commands first */
1849 spin_lock_bh(&exeq->lock);
1851 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1852 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1853 *vlan_mac_flags) {
1854 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1855 if (rc) {
1856 BNX2X_ERR("Failed to remove command\n");
1857 spin_unlock_bh(&exeq->lock);
1858 return rc;
1860 list_del(&exeq_pos->link);
1861 bnx2x_exe_queue_free_elem(bp, exeq_pos);
1865 spin_unlock_bh(&exeq->lock);
1867 /* Prepare a command request */
1868 memset(&p, 0, sizeof(p));
1869 p.vlan_mac_obj = o;
1870 p.ramrod_flags = *ramrod_flags;
1871 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1874 * Add all but the last VLAN-MAC to the execution queue without actually
1875 * execution anything.
1877 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1878 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1879 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1881 list_for_each_entry(pos, &o->head, link) {
1882 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1883 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1884 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1885 rc = bnx2x_config_vlan_mac(bp, &p);
1886 if (rc < 0) {
1887 BNX2X_ERR("Failed to add a new DEL command\n");
1888 return rc;
1893 p.ramrod_flags = *ramrod_flags;
1894 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1896 return bnx2x_config_vlan_mac(bp, &p);
1899 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1900 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1901 unsigned long *pstate, bnx2x_obj_type type)
1903 raw->func_id = func_id;
1904 raw->cid = cid;
1905 raw->cl_id = cl_id;
1906 raw->rdata = rdata;
1907 raw->rdata_mapping = rdata_mapping;
1908 raw->state = state;
1909 raw->pstate = pstate;
1910 raw->obj_type = type;
1911 raw->check_pending = bnx2x_raw_check_pending;
1912 raw->clear_pending = bnx2x_raw_clear_pending;
1913 raw->set_pending = bnx2x_raw_set_pending;
1914 raw->wait_comp = bnx2x_raw_wait;
1917 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1918 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1919 int state, unsigned long *pstate, bnx2x_obj_type type,
1920 struct bnx2x_credit_pool_obj *macs_pool,
1921 struct bnx2x_credit_pool_obj *vlans_pool)
1923 INIT_LIST_HEAD(&o->head);
1925 o->macs_pool = macs_pool;
1926 o->vlans_pool = vlans_pool;
1928 o->delete_all = bnx2x_vlan_mac_del_all;
1929 o->restore = bnx2x_vlan_mac_restore;
1930 o->complete = bnx2x_complete_vlan_mac;
1931 o->wait = bnx2x_wait_vlan_mac;
1933 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1934 state, pstate, type);
1938 void bnx2x_init_mac_obj(struct bnx2x *bp,
1939 struct bnx2x_vlan_mac_obj *mac_obj,
1940 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1941 dma_addr_t rdata_mapping, int state,
1942 unsigned long *pstate, bnx2x_obj_type type,
1943 struct bnx2x_credit_pool_obj *macs_pool)
1945 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1947 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1948 rdata_mapping, state, pstate, type,
1949 macs_pool, NULL);
1951 /* CAM credit pool handling */
1952 mac_obj->get_credit = bnx2x_get_credit_mac;
1953 mac_obj->put_credit = bnx2x_put_credit_mac;
1954 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1955 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1957 if (CHIP_IS_E1x(bp)) {
1958 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1959 mac_obj->check_del = bnx2x_check_mac_del;
1960 mac_obj->check_add = bnx2x_check_mac_add;
1961 mac_obj->check_move = bnx2x_check_move_always_err;
1962 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1964 /* Exe Queue */
1965 bnx2x_exe_queue_init(bp,
1966 &mac_obj->exe_queue, 1, qable_obj,
1967 bnx2x_validate_vlan_mac,
1968 bnx2x_remove_vlan_mac,
1969 bnx2x_optimize_vlan_mac,
1970 bnx2x_execute_vlan_mac,
1971 bnx2x_exeq_get_mac);
1972 } else {
1973 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1974 mac_obj->check_del = bnx2x_check_mac_del;
1975 mac_obj->check_add = bnx2x_check_mac_add;
1976 mac_obj->check_move = bnx2x_check_move;
1977 mac_obj->ramrod_cmd =
1978 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1979 mac_obj->get_n_elements = bnx2x_get_n_elements;
1981 /* Exe Queue */
1982 bnx2x_exe_queue_init(bp,
1983 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1984 qable_obj, bnx2x_validate_vlan_mac,
1985 bnx2x_remove_vlan_mac,
1986 bnx2x_optimize_vlan_mac,
1987 bnx2x_execute_vlan_mac,
1988 bnx2x_exeq_get_mac);
1992 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1993 struct bnx2x_vlan_mac_obj *vlan_obj,
1994 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1995 dma_addr_t rdata_mapping, int state,
1996 unsigned long *pstate, bnx2x_obj_type type,
1997 struct bnx2x_credit_pool_obj *vlans_pool)
1999 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
2001 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2002 rdata_mapping, state, pstate, type, NULL,
2003 vlans_pool);
2005 vlan_obj->get_credit = bnx2x_get_credit_vlan;
2006 vlan_obj->put_credit = bnx2x_put_credit_vlan;
2007 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2008 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2010 if (CHIP_IS_E1x(bp)) {
2011 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2012 BUG();
2013 } else {
2014 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
2015 vlan_obj->check_del = bnx2x_check_vlan_del;
2016 vlan_obj->check_add = bnx2x_check_vlan_add;
2017 vlan_obj->check_move = bnx2x_check_move;
2018 vlan_obj->ramrod_cmd =
2019 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2020 vlan_obj->get_n_elements = bnx2x_get_n_elements;
2022 /* Exe Queue */
2023 bnx2x_exe_queue_init(bp,
2024 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2025 qable_obj, bnx2x_validate_vlan_mac,
2026 bnx2x_remove_vlan_mac,
2027 bnx2x_optimize_vlan_mac,
2028 bnx2x_execute_vlan_mac,
2029 bnx2x_exeq_get_vlan);
2033 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2034 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2035 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2036 dma_addr_t rdata_mapping, int state,
2037 unsigned long *pstate, bnx2x_obj_type type,
2038 struct bnx2x_credit_pool_obj *macs_pool,
2039 struct bnx2x_credit_pool_obj *vlans_pool)
2041 union bnx2x_qable_obj *qable_obj =
2042 (union bnx2x_qable_obj *)vlan_mac_obj;
2044 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2045 rdata_mapping, state, pstate, type,
2046 macs_pool, vlans_pool);
2048 /* CAM pool handling */
2049 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2050 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2052 * CAM offset is relevant for 57710 and 57711 chips only which have a
2053 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2054 * will be taken from MACs' pool object only.
2056 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2057 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2059 if (CHIP_IS_E1(bp)) {
2060 BNX2X_ERR("Do not support chips others than E2\n");
2061 BUG();
2062 } else if (CHIP_IS_E1H(bp)) {
2063 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2064 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2065 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2066 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2067 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2069 /* Exe Queue */
2070 bnx2x_exe_queue_init(bp,
2071 &vlan_mac_obj->exe_queue, 1, qable_obj,
2072 bnx2x_validate_vlan_mac,
2073 bnx2x_remove_vlan_mac,
2074 bnx2x_optimize_vlan_mac,
2075 bnx2x_execute_vlan_mac,
2076 bnx2x_exeq_get_vlan_mac);
2077 } else {
2078 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2079 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2080 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2081 vlan_mac_obj->check_move = bnx2x_check_move;
2082 vlan_mac_obj->ramrod_cmd =
2083 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2085 /* Exe Queue */
2086 bnx2x_exe_queue_init(bp,
2087 &vlan_mac_obj->exe_queue,
2088 CLASSIFY_RULES_COUNT,
2089 qable_obj, bnx2x_validate_vlan_mac,
2090 bnx2x_remove_vlan_mac,
2091 bnx2x_optimize_vlan_mac,
2092 bnx2x_execute_vlan_mac,
2093 bnx2x_exeq_get_vlan_mac);
2098 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2099 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2100 struct tstorm_eth_mac_filter_config *mac_filters,
2101 u16 pf_id)
2103 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2105 u32 addr = BAR_TSTRORM_INTMEM +
2106 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2108 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2111 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2112 struct bnx2x_rx_mode_ramrod_params *p)
2114 /* update the bp MAC filter structure */
2115 u32 mask = (1 << p->cl_id);
2117 struct tstorm_eth_mac_filter_config *mac_filters =
2118 (struct tstorm_eth_mac_filter_config *)p->rdata;
2120 /* initial seeting is drop-all */
2121 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2122 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2123 u8 unmatched_unicast = 0;
2125 /* In e1x there we only take into account rx acceot flag since tx switching
2126 * isn't enabled. */
2127 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2128 /* accept matched ucast */
2129 drop_all_ucast = 0;
2131 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2132 /* accept matched mcast */
2133 drop_all_mcast = 0;
2135 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2136 /* accept all mcast */
2137 drop_all_ucast = 0;
2138 accp_all_ucast = 1;
2140 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2141 /* accept all mcast */
2142 drop_all_mcast = 0;
2143 accp_all_mcast = 1;
2145 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2146 /* accept (all) bcast */
2147 accp_all_bcast = 1;
2148 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2149 /* accept unmatched unicasts */
2150 unmatched_unicast = 1;
2152 mac_filters->ucast_drop_all = drop_all_ucast ?
2153 mac_filters->ucast_drop_all | mask :
2154 mac_filters->ucast_drop_all & ~mask;
2156 mac_filters->mcast_drop_all = drop_all_mcast ?
2157 mac_filters->mcast_drop_all | mask :
2158 mac_filters->mcast_drop_all & ~mask;
2160 mac_filters->ucast_accept_all = accp_all_ucast ?
2161 mac_filters->ucast_accept_all | mask :
2162 mac_filters->ucast_accept_all & ~mask;
2164 mac_filters->mcast_accept_all = accp_all_mcast ?
2165 mac_filters->mcast_accept_all | mask :
2166 mac_filters->mcast_accept_all & ~mask;
2168 mac_filters->bcast_accept_all = accp_all_bcast ?
2169 mac_filters->bcast_accept_all | mask :
2170 mac_filters->bcast_accept_all & ~mask;
2172 mac_filters->unmatched_unicast = unmatched_unicast ?
2173 mac_filters->unmatched_unicast | mask :
2174 mac_filters->unmatched_unicast & ~mask;
2176 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2177 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2178 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2179 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2180 mac_filters->bcast_accept_all);
2182 /* write the MAC filter structure*/
2183 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2185 /* The operation is completed */
2186 clear_bit(p->state, p->pstate);
2187 smp_mb__after_clear_bit();
2189 return 0;
2192 /* Setup ramrod data */
2193 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2194 struct eth_classify_header *hdr,
2195 u8 rule_cnt)
2197 hdr->echo = cpu_to_le32(cid);
2198 hdr->rule_cnt = rule_cnt;
2201 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2202 unsigned long *accept_flags,
2203 struct eth_filter_rules_cmd *cmd,
2204 bool clear_accept_all)
2206 u16 state;
2208 /* start with 'drop-all' */
2209 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2210 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2212 if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2213 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2215 if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2216 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2218 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2219 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2220 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2223 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2224 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2225 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2228 if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2229 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2231 if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2232 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2233 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2236 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2237 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2239 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2240 if (clear_accept_all) {
2241 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2242 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2243 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2244 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2247 cmd->state = cpu_to_le16(state);
2251 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2252 struct bnx2x_rx_mode_ramrod_params *p)
2254 struct eth_filter_rules_ramrod_data *data = p->rdata;
2255 int rc;
2256 u8 rule_idx = 0;
2258 /* Reset the ramrod data buffer */
2259 memset(data, 0, sizeof(*data));
2261 /* Setup ramrod data */
2263 /* Tx (internal switching) */
2264 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2265 data->rules[rule_idx].client_id = p->cl_id;
2266 data->rules[rule_idx].func_id = p->func_id;
2268 data->rules[rule_idx].cmd_general_data =
2269 ETH_FILTER_RULES_CMD_TX_CMD;
2271 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2272 &(data->rules[rule_idx++]),
2273 false);
2276 /* Rx */
2277 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2278 data->rules[rule_idx].client_id = p->cl_id;
2279 data->rules[rule_idx].func_id = p->func_id;
2281 data->rules[rule_idx].cmd_general_data =
2282 ETH_FILTER_RULES_CMD_RX_CMD;
2284 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2285 &(data->rules[rule_idx++]),
2286 false);
2291 * If FCoE Queue configuration has been requested configure the Rx and
2292 * internal switching modes for this queue in separate rules.
2294 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2295 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2297 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2298 /* Tx (internal switching) */
2299 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2300 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2301 data->rules[rule_idx].func_id = p->func_id;
2303 data->rules[rule_idx].cmd_general_data =
2304 ETH_FILTER_RULES_CMD_TX_CMD;
2306 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2307 &(data->rules[rule_idx]),
2308 true);
2309 rule_idx++;
2312 /* Rx */
2313 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2314 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2315 data->rules[rule_idx].func_id = p->func_id;
2317 data->rules[rule_idx].cmd_general_data =
2318 ETH_FILTER_RULES_CMD_RX_CMD;
2320 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2321 &(data->rules[rule_idx]),
2322 true);
2323 rule_idx++;
2328 * Set the ramrod header (most importantly - number of rules to
2329 * configure).
2331 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2333 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2334 data->header.rule_cnt, p->rx_accept_flags,
2335 p->tx_accept_flags);
2338 * No need for an explicit memory barrier here as long we would
2339 * need to ensure the ordering of writing to the SPQ element
2340 * and updating of the SPQ producer which involves a memory
2341 * read and we will have to put a full memory barrier there
2342 * (inside bnx2x_sp_post()).
2345 /* Send a ramrod */
2346 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2347 U64_HI(p->rdata_mapping),
2348 U64_LO(p->rdata_mapping),
2349 ETH_CONNECTION_TYPE);
2350 if (rc)
2351 return rc;
2353 /* Ramrod completion is pending */
2354 return 1;
2357 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2358 struct bnx2x_rx_mode_ramrod_params *p)
2360 return bnx2x_state_wait(bp, p->state, p->pstate);
2363 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2364 struct bnx2x_rx_mode_ramrod_params *p)
2366 /* Do nothing */
2367 return 0;
2370 int bnx2x_config_rx_mode(struct bnx2x *bp,
2371 struct bnx2x_rx_mode_ramrod_params *p)
2373 int rc;
2375 /* Configure the new classification in the chip */
2376 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2377 if (rc < 0)
2378 return rc;
2380 /* Wait for a ramrod completion if was requested */
2381 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2382 rc = p->rx_mode_obj->wait_comp(bp, p);
2383 if (rc)
2384 return rc;
2387 return rc;
2390 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2391 struct bnx2x_rx_mode_obj *o)
2393 if (CHIP_IS_E1x(bp)) {
2394 o->wait_comp = bnx2x_empty_rx_mode_wait;
2395 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2396 } else {
2397 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2398 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2402 /********************* Multicast verbs: SET, CLEAR ****************************/
2403 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2405 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2408 struct bnx2x_mcast_mac_elem {
2409 struct list_head link;
2410 u8 mac[ETH_ALEN];
2411 u8 pad[2]; /* For a natural alignment of the following buffer */
2414 struct bnx2x_pending_mcast_cmd {
2415 struct list_head link;
2416 int type; /* BNX2X_MCAST_CMD_X */
2417 union {
2418 struct list_head macs_head;
2419 u32 macs_num; /* Needed for DEL command */
2420 int next_bin; /* Needed for RESTORE flow with aprox match */
2421 } data;
2423 bool done; /* set to true, when the command has been handled,
2424 * practically used in 57712 handling only, where one pending
2425 * command may be handled in a few operations. As long as for
2426 * other chips every operation handling is completed in a
2427 * single ramrod, there is no need to utilize this field.
2431 static int bnx2x_mcast_wait(struct bnx2x *bp,
2432 struct bnx2x_mcast_obj *o)
2434 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2435 o->raw.wait_comp(bp, &o->raw))
2436 return -EBUSY;
2438 return 0;
2441 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2442 struct bnx2x_mcast_obj *o,
2443 struct bnx2x_mcast_ramrod_params *p,
2444 enum bnx2x_mcast_cmd cmd)
2446 int total_sz;
2447 struct bnx2x_pending_mcast_cmd *new_cmd;
2448 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2449 struct bnx2x_mcast_list_elem *pos;
2450 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2451 p->mcast_list_len : 0);
2453 /* If the command is empty ("handle pending commands only"), break */
2454 if (!p->mcast_list_len)
2455 return 0;
2457 total_sz = sizeof(*new_cmd) +
2458 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2460 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2461 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2463 if (!new_cmd)
2464 return -ENOMEM;
2466 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2467 cmd, macs_list_len);
2469 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2471 new_cmd->type = cmd;
2472 new_cmd->done = false;
2474 switch (cmd) {
2475 case BNX2X_MCAST_CMD_ADD:
2476 cur_mac = (struct bnx2x_mcast_mac_elem *)
2477 ((u8 *)new_cmd + sizeof(*new_cmd));
2479 /* Push the MACs of the current command into the pendig command
2480 * MACs list: FIFO
2482 list_for_each_entry(pos, &p->mcast_list, link) {
2483 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2484 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2485 cur_mac++;
2488 break;
2490 case BNX2X_MCAST_CMD_DEL:
2491 new_cmd->data.macs_num = p->mcast_list_len;
2492 break;
2494 case BNX2X_MCAST_CMD_RESTORE:
2495 new_cmd->data.next_bin = 0;
2496 break;
2498 default:
2499 kfree(new_cmd);
2500 BNX2X_ERR("Unknown command: %d\n", cmd);
2501 return -EINVAL;
2504 /* Push the new pending command to the tail of the pending list: FIFO */
2505 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2507 o->set_sched(o);
2509 return 1;
2513 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2515 * @o:
2516 * @last: index to start looking from (including)
2518 * returns the next found (set) bin or a negative value if none is found.
2520 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2522 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2524 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2525 if (o->registry.aprox_match.vec[i])
2526 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2527 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2528 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2529 vec, cur_bit)) {
2530 return cur_bit;
2533 inner_start = 0;
2536 /* None found */
2537 return -1;
2541 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2543 * @o:
2545 * returns the index of the found bin or -1 if none is found
2547 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2549 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2551 if (cur_bit >= 0)
2552 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2554 return cur_bit;
2557 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2559 struct bnx2x_raw_obj *raw = &o->raw;
2560 u8 rx_tx_flag = 0;
2562 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2563 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2564 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2566 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2567 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2568 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2570 return rx_tx_flag;
2573 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2574 struct bnx2x_mcast_obj *o, int idx,
2575 union bnx2x_mcast_config_data *cfg_data,
2576 enum bnx2x_mcast_cmd cmd)
2578 struct bnx2x_raw_obj *r = &o->raw;
2579 struct eth_multicast_rules_ramrod_data *data =
2580 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2581 u8 func_id = r->func_id;
2582 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2583 int bin;
2585 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2586 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2588 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2590 /* Get a bin and update a bins' vector */
2591 switch (cmd) {
2592 case BNX2X_MCAST_CMD_ADD:
2593 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2594 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2595 break;
2597 case BNX2X_MCAST_CMD_DEL:
2598 /* If there were no more bins to clear
2599 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2600 * clear any (0xff) bin.
2601 * See bnx2x_mcast_validate_e2() for explanation when it may
2602 * happen.
2604 bin = bnx2x_mcast_clear_first_bin(o);
2605 break;
2607 case BNX2X_MCAST_CMD_RESTORE:
2608 bin = cfg_data->bin;
2609 break;
2611 default:
2612 BNX2X_ERR("Unknown command: %d\n", cmd);
2613 return;
2616 DP(BNX2X_MSG_SP, "%s bin %d\n",
2617 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2618 "Setting" : "Clearing"), bin);
2620 data->rules[idx].bin_id = (u8)bin;
2621 data->rules[idx].func_id = func_id;
2622 data->rules[idx].engine_id = o->engine_id;
2626 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2628 * @bp: device handle
2629 * @o:
2630 * @start_bin: index in the registry to start from (including)
2631 * @rdata_idx: index in the ramrod data to start from
2633 * returns last handled bin index or -1 if all bins have been handled
2635 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2636 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2637 int *rdata_idx)
2639 int cur_bin, cnt = *rdata_idx;
2640 union bnx2x_mcast_config_data cfg_data = {NULL};
2642 /* go through the registry and configure the bins from it */
2643 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2644 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2646 cfg_data.bin = (u8)cur_bin;
2647 o->set_one_rule(bp, o, cnt, &cfg_data,
2648 BNX2X_MCAST_CMD_RESTORE);
2650 cnt++;
2652 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2654 /* Break if we reached the maximum number
2655 * of rules.
2657 if (cnt >= o->max_cmd_len)
2658 break;
2661 *rdata_idx = cnt;
2663 return cur_bin;
2666 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2667 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2668 int *line_idx)
2670 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2671 int cnt = *line_idx;
2672 union bnx2x_mcast_config_data cfg_data = {NULL};
2674 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2675 link) {
2677 cfg_data.mac = &pmac_pos->mac[0];
2678 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2680 cnt++;
2682 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2683 pmac_pos->mac);
2685 list_del(&pmac_pos->link);
2687 /* Break if we reached the maximum number
2688 * of rules.
2690 if (cnt >= o->max_cmd_len)
2691 break;
2694 *line_idx = cnt;
2696 /* if no more MACs to configure - we are done */
2697 if (list_empty(&cmd_pos->data.macs_head))
2698 cmd_pos->done = true;
2701 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2702 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2703 int *line_idx)
2705 int cnt = *line_idx;
2707 while (cmd_pos->data.macs_num) {
2708 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2710 cnt++;
2712 cmd_pos->data.macs_num--;
2714 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2715 cmd_pos->data.macs_num, cnt);
2717 /* Break if we reached the maximum
2718 * number of rules.
2720 if (cnt >= o->max_cmd_len)
2721 break;
2724 *line_idx = cnt;
2726 /* If we cleared all bins - we are done */
2727 if (!cmd_pos->data.macs_num)
2728 cmd_pos->done = true;
2731 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2732 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2733 int *line_idx)
2735 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2736 line_idx);
2738 if (cmd_pos->data.next_bin < 0)
2739 /* If o->set_restore returned -1 we are done */
2740 cmd_pos->done = true;
2741 else
2742 /* Start from the next bin next time */
2743 cmd_pos->data.next_bin++;
2746 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2747 struct bnx2x_mcast_ramrod_params *p)
2749 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2750 int cnt = 0;
2751 struct bnx2x_mcast_obj *o = p->mcast_obj;
2753 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2754 link) {
2755 switch (cmd_pos->type) {
2756 case BNX2X_MCAST_CMD_ADD:
2757 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2758 break;
2760 case BNX2X_MCAST_CMD_DEL:
2761 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2762 break;
2764 case BNX2X_MCAST_CMD_RESTORE:
2765 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2766 &cnt);
2767 break;
2769 default:
2770 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2771 return -EINVAL;
2774 /* If the command has been completed - remove it from the list
2775 * and free the memory
2777 if (cmd_pos->done) {
2778 list_del(&cmd_pos->link);
2779 kfree(cmd_pos);
2782 /* Break if we reached the maximum number of rules */
2783 if (cnt >= o->max_cmd_len)
2784 break;
2787 return cnt;
2790 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2791 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2792 int *line_idx)
2794 struct bnx2x_mcast_list_elem *mlist_pos;
2795 union bnx2x_mcast_config_data cfg_data = {NULL};
2796 int cnt = *line_idx;
2798 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2799 cfg_data.mac = mlist_pos->mac;
2800 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2802 cnt++;
2804 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2805 mlist_pos->mac);
2808 *line_idx = cnt;
2811 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2812 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2813 int *line_idx)
2815 int cnt = *line_idx, i;
2817 for (i = 0; i < p->mcast_list_len; i++) {
2818 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2820 cnt++;
2822 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2823 p->mcast_list_len - i - 1);
2826 *line_idx = cnt;
2830 * bnx2x_mcast_handle_current_cmd -
2832 * @bp: device handle
2833 * @p:
2834 * @cmd:
2835 * @start_cnt: first line in the ramrod data that may be used
2837 * This function is called iff there is enough place for the current command in
2838 * the ramrod data.
2839 * Returns number of lines filled in the ramrod data in total.
2841 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2842 struct bnx2x_mcast_ramrod_params *p,
2843 enum bnx2x_mcast_cmd cmd,
2844 int start_cnt)
2846 struct bnx2x_mcast_obj *o = p->mcast_obj;
2847 int cnt = start_cnt;
2849 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2851 switch (cmd) {
2852 case BNX2X_MCAST_CMD_ADD:
2853 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2854 break;
2856 case BNX2X_MCAST_CMD_DEL:
2857 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2858 break;
2860 case BNX2X_MCAST_CMD_RESTORE:
2861 o->hdl_restore(bp, o, 0, &cnt);
2862 break;
2864 default:
2865 BNX2X_ERR("Unknown command: %d\n", cmd);
2866 return -EINVAL;
2869 /* The current command has been handled */
2870 p->mcast_list_len = 0;
2872 return cnt;
2875 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2876 struct bnx2x_mcast_ramrod_params *p,
2877 enum bnx2x_mcast_cmd cmd)
2879 struct bnx2x_mcast_obj *o = p->mcast_obj;
2880 int reg_sz = o->get_registry_size(o);
2882 switch (cmd) {
2883 /* DEL command deletes all currently configured MACs */
2884 case BNX2X_MCAST_CMD_DEL:
2885 o->set_registry_size(o, 0);
2886 /* Don't break */
2888 /* RESTORE command will restore the entire multicast configuration */
2889 case BNX2X_MCAST_CMD_RESTORE:
2890 /* Here we set the approximate amount of work to do, which in
2891 * fact may be only less as some MACs in postponed ADD
2892 * command(s) scheduled before this command may fall into
2893 * the same bin and the actual number of bins set in the
2894 * registry would be less than we estimated here. See
2895 * bnx2x_mcast_set_one_rule_e2() for further details.
2897 p->mcast_list_len = reg_sz;
2898 break;
2900 case BNX2X_MCAST_CMD_ADD:
2901 case BNX2X_MCAST_CMD_CONT:
2902 /* Here we assume that all new MACs will fall into new bins.
2903 * However we will correct the real registry size after we
2904 * handle all pending commands.
2906 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2907 break;
2909 default:
2910 BNX2X_ERR("Unknown command: %d\n", cmd);
2911 return -EINVAL;
2915 /* Increase the total number of MACs pending to be configured */
2916 o->total_pending_num += p->mcast_list_len;
2918 return 0;
2921 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2922 struct bnx2x_mcast_ramrod_params *p,
2923 int old_num_bins)
2925 struct bnx2x_mcast_obj *o = p->mcast_obj;
2927 o->set_registry_size(o, old_num_bins);
2928 o->total_pending_num -= p->mcast_list_len;
2932 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2934 * @bp: device handle
2935 * @p:
2936 * @len: number of rules to handle
2938 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2939 struct bnx2x_mcast_ramrod_params *p,
2940 u8 len)
2942 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2943 struct eth_multicast_rules_ramrod_data *data =
2944 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2946 data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2947 (BNX2X_FILTER_MCAST_PENDING <<
2948 BNX2X_SWCID_SHIFT));
2949 data->header.rule_cnt = len;
2953 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2955 * @bp: device handle
2956 * @o:
2958 * Recalculate the actual number of set bins in the registry using Brian
2959 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2961 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2963 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2964 struct bnx2x_mcast_obj *o)
2966 int i, cnt = 0;
2967 u64 elem;
2969 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2970 elem = o->registry.aprox_match.vec[i];
2971 for (; elem; cnt++)
2972 elem &= elem - 1;
2975 o->set_registry_size(o, cnt);
2977 return 0;
2980 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2981 struct bnx2x_mcast_ramrod_params *p,
2982 enum bnx2x_mcast_cmd cmd)
2984 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2985 struct bnx2x_mcast_obj *o = p->mcast_obj;
2986 struct eth_multicast_rules_ramrod_data *data =
2987 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2988 int cnt = 0, rc;
2990 /* Reset the ramrod data buffer */
2991 memset(data, 0, sizeof(*data));
2993 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2995 /* If there are no more pending commands - clear SCHEDULED state */
2996 if (list_empty(&o->pending_cmds_head))
2997 o->clear_sched(o);
2999 /* The below may be true iff there was enough room in ramrod
3000 * data for all pending commands and for the current
3001 * command. Otherwise the current command would have been added
3002 * to the pending commands and p->mcast_list_len would have been
3003 * zeroed.
3005 if (p->mcast_list_len > 0)
3006 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
3008 /* We've pulled out some MACs - update the total number of
3009 * outstanding.
3011 o->total_pending_num -= cnt;
3013 /* send a ramrod */
3014 WARN_ON(o->total_pending_num < 0);
3015 WARN_ON(cnt > o->max_cmd_len);
3017 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3019 /* Update a registry size if there are no more pending operations.
3021 * We don't want to change the value of the registry size if there are
3022 * pending operations because we want it to always be equal to the
3023 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3024 * set bins after the last requested operation in order to properly
3025 * evaluate the size of the next DEL/RESTORE operation.
3027 * Note that we update the registry itself during command(s) handling
3028 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3029 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3030 * with a limited amount of update commands (per MAC/bin) and we don't
3031 * know in this scope what the actual state of bins configuration is
3032 * going to be after this ramrod.
3034 if (!o->total_pending_num)
3035 bnx2x_mcast_refresh_registry_e2(bp, o);
3038 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3039 * RAMROD_PENDING status immediately.
3041 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3042 raw->clear_pending(raw);
3043 return 0;
3044 } else {
3046 * No need for an explicit memory barrier here as long we would
3047 * need to ensure the ordering of writing to the SPQ element
3048 * and updating of the SPQ producer which involves a memory
3049 * read and we will have to put a full memory barrier there
3050 * (inside bnx2x_sp_post()).
3053 /* Send a ramrod */
3054 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3055 raw->cid, U64_HI(raw->rdata_mapping),
3056 U64_LO(raw->rdata_mapping),
3057 ETH_CONNECTION_TYPE);
3058 if (rc)
3059 return rc;
3061 /* Ramrod completion is pending */
3062 return 1;
3066 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3067 struct bnx2x_mcast_ramrod_params *p,
3068 enum bnx2x_mcast_cmd cmd)
3070 /* Mark, that there is a work to do */
3071 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3072 p->mcast_list_len = 1;
3074 return 0;
3077 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3078 struct bnx2x_mcast_ramrod_params *p,
3079 int old_num_bins)
3081 /* Do nothing */
3084 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3085 do { \
3086 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3087 } while (0)
3089 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3090 struct bnx2x_mcast_obj *o,
3091 struct bnx2x_mcast_ramrod_params *p,
3092 u32 *mc_filter)
3094 struct bnx2x_mcast_list_elem *mlist_pos;
3095 int bit;
3097 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3098 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3099 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3101 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3102 mlist_pos->mac, bit);
3104 /* bookkeeping... */
3105 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3106 bit);
3110 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3111 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3112 u32 *mc_filter)
3114 int bit;
3116 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3117 bit >= 0;
3118 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3119 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3120 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3124 /* On 57711 we write the multicast MACs' aproximate match
3125 * table by directly into the TSTORM's internal RAM. So we don't
3126 * really need to handle any tricks to make it work.
3128 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3129 struct bnx2x_mcast_ramrod_params *p,
3130 enum bnx2x_mcast_cmd cmd)
3132 int i;
3133 struct bnx2x_mcast_obj *o = p->mcast_obj;
3134 struct bnx2x_raw_obj *r = &o->raw;
3136 /* If CLEAR_ONLY has been requested - clear the registry
3137 * and clear a pending bit.
3139 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3140 u32 mc_filter[MC_HASH_SIZE] = {0};
3142 /* Set the multicast filter bits before writing it into
3143 * the internal memory.
3145 switch (cmd) {
3146 case BNX2X_MCAST_CMD_ADD:
3147 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3148 break;
3150 case BNX2X_MCAST_CMD_DEL:
3151 DP(BNX2X_MSG_SP,
3152 "Invalidating multicast MACs configuration\n");
3154 /* clear the registry */
3155 memset(o->registry.aprox_match.vec, 0,
3156 sizeof(o->registry.aprox_match.vec));
3157 break;
3159 case BNX2X_MCAST_CMD_RESTORE:
3160 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3161 break;
3163 default:
3164 BNX2X_ERR("Unknown command: %d\n", cmd);
3165 return -EINVAL;
3168 /* Set the mcast filter in the internal memory */
3169 for (i = 0; i < MC_HASH_SIZE; i++)
3170 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3171 } else
3172 /* clear the registry */
3173 memset(o->registry.aprox_match.vec, 0,
3174 sizeof(o->registry.aprox_match.vec));
3176 /* We are done */
3177 r->clear_pending(r);
3179 return 0;
3182 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3183 struct bnx2x_mcast_ramrod_params *p,
3184 enum bnx2x_mcast_cmd cmd)
3186 struct bnx2x_mcast_obj *o = p->mcast_obj;
3187 int reg_sz = o->get_registry_size(o);
3189 switch (cmd) {
3190 /* DEL command deletes all currently configured MACs */
3191 case BNX2X_MCAST_CMD_DEL:
3192 o->set_registry_size(o, 0);
3193 /* Don't break */
3195 /* RESTORE command will restore the entire multicast configuration */
3196 case BNX2X_MCAST_CMD_RESTORE:
3197 p->mcast_list_len = reg_sz;
3198 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3199 cmd, p->mcast_list_len);
3200 break;
3202 case BNX2X_MCAST_CMD_ADD:
3203 case BNX2X_MCAST_CMD_CONT:
3204 /* Multicast MACs on 57710 are configured as unicast MACs and
3205 * there is only a limited number of CAM entries for that
3206 * matter.
3208 if (p->mcast_list_len > o->max_cmd_len) {
3209 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3210 o->max_cmd_len);
3211 return -EINVAL;
3213 /* Every configured MAC should be cleared if DEL command is
3214 * called. Only the last ADD command is relevant as long as
3215 * every ADD commands overrides the previous configuration.
3217 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3218 if (p->mcast_list_len > 0)
3219 o->set_registry_size(o, p->mcast_list_len);
3221 break;
3223 default:
3224 BNX2X_ERR("Unknown command: %d\n", cmd);
3225 return -EINVAL;
3229 /* We want to ensure that commands are executed one by one for 57710.
3230 * Therefore each none-empty command will consume o->max_cmd_len.
3232 if (p->mcast_list_len)
3233 o->total_pending_num += o->max_cmd_len;
3235 return 0;
3238 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3239 struct bnx2x_mcast_ramrod_params *p,
3240 int old_num_macs)
3242 struct bnx2x_mcast_obj *o = p->mcast_obj;
3244 o->set_registry_size(o, old_num_macs);
3246 /* If current command hasn't been handled yet and we are
3247 * here means that it's meant to be dropped and we have to
3248 * update the number of outstandling MACs accordingly.
3250 if (p->mcast_list_len)
3251 o->total_pending_num -= o->max_cmd_len;
3254 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3255 struct bnx2x_mcast_obj *o, int idx,
3256 union bnx2x_mcast_config_data *cfg_data,
3257 enum bnx2x_mcast_cmd cmd)
3259 struct bnx2x_raw_obj *r = &o->raw;
3260 struct mac_configuration_cmd *data =
3261 (struct mac_configuration_cmd *)(r->rdata);
3263 /* copy mac */
3264 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3265 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3266 &data->config_table[idx].middle_mac_addr,
3267 &data->config_table[idx].lsb_mac_addr,
3268 cfg_data->mac);
3270 data->config_table[idx].vlan_id = 0;
3271 data->config_table[idx].pf_id = r->func_id;
3272 data->config_table[idx].clients_bit_vector =
3273 cpu_to_le32(1 << r->cl_id);
3275 SET_FLAG(data->config_table[idx].flags,
3276 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3277 T_ETH_MAC_COMMAND_SET);
3282 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3284 * @bp: device handle
3285 * @p:
3286 * @len: number of rules to handle
3288 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3289 struct bnx2x_mcast_ramrod_params *p,
3290 u8 len)
3292 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3293 struct mac_configuration_cmd *data =
3294 (struct mac_configuration_cmd *)(r->rdata);
3296 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3297 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3298 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3300 data->hdr.offset = offset;
3301 data->hdr.client_id = cpu_to_le16(0xff);
3302 data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3303 (BNX2X_FILTER_MCAST_PENDING <<
3304 BNX2X_SWCID_SHIFT));
3305 data->hdr.length = len;
3309 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3311 * @bp: device handle
3312 * @o:
3313 * @start_idx: index in the registry to start from
3314 * @rdata_idx: index in the ramrod data to start from
3316 * restore command for 57710 is like all other commands - always a stand alone
3317 * command - start_idx and rdata_idx will always be 0. This function will always
3318 * succeed.
3319 * returns -1 to comply with 57712 variant.
3321 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3322 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3323 int *rdata_idx)
3325 struct bnx2x_mcast_mac_elem *elem;
3326 int i = 0;
3327 union bnx2x_mcast_config_data cfg_data = {NULL};
3329 /* go through the registry and configure the MACs from it. */
3330 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3331 cfg_data.mac = &elem->mac[0];
3332 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3334 i++;
3336 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3337 cfg_data.mac);
3340 *rdata_idx = i;
3342 return -1;
3346 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3347 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3349 struct bnx2x_pending_mcast_cmd *cmd_pos;
3350 struct bnx2x_mcast_mac_elem *pmac_pos;
3351 struct bnx2x_mcast_obj *o = p->mcast_obj;
3352 union bnx2x_mcast_config_data cfg_data = {NULL};
3353 int cnt = 0;
3356 /* If nothing to be done - return */
3357 if (list_empty(&o->pending_cmds_head))
3358 return 0;
3360 /* Handle the first command */
3361 cmd_pos = list_first_entry(&o->pending_cmds_head,
3362 struct bnx2x_pending_mcast_cmd, link);
3364 switch (cmd_pos->type) {
3365 case BNX2X_MCAST_CMD_ADD:
3366 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3367 cfg_data.mac = &pmac_pos->mac[0];
3368 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3370 cnt++;
3372 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3373 pmac_pos->mac);
3375 break;
3377 case BNX2X_MCAST_CMD_DEL:
3378 cnt = cmd_pos->data.macs_num;
3379 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3380 break;
3382 case BNX2X_MCAST_CMD_RESTORE:
3383 o->hdl_restore(bp, o, 0, &cnt);
3384 break;
3386 default:
3387 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3388 return -EINVAL;
3391 list_del(&cmd_pos->link);
3392 kfree(cmd_pos);
3394 return cnt;
3398 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3400 * @fw_hi:
3401 * @fw_mid:
3402 * @fw_lo:
3403 * @mac:
3405 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3406 __le16 *fw_lo, u8 *mac)
3408 mac[1] = ((u8 *)fw_hi)[0];
3409 mac[0] = ((u8 *)fw_hi)[1];
3410 mac[3] = ((u8 *)fw_mid)[0];
3411 mac[2] = ((u8 *)fw_mid)[1];
3412 mac[5] = ((u8 *)fw_lo)[0];
3413 mac[4] = ((u8 *)fw_lo)[1];
3417 * bnx2x_mcast_refresh_registry_e1 -
3419 * @bp: device handle
3420 * @cnt:
3422 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3423 * and update the registry correspondingly: if ADD - allocate a memory and add
3424 * the entries to the registry (list), if DELETE - clear the registry and free
3425 * the memory.
3427 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3428 struct bnx2x_mcast_obj *o)
3430 struct bnx2x_raw_obj *raw = &o->raw;
3431 struct bnx2x_mcast_mac_elem *elem;
3432 struct mac_configuration_cmd *data =
3433 (struct mac_configuration_cmd *)(raw->rdata);
3435 /* If first entry contains a SET bit - the command was ADD,
3436 * otherwise - DEL_ALL
3438 if (GET_FLAG(data->config_table[0].flags,
3439 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3440 int i, len = data->hdr.length;
3442 /* Break if it was a RESTORE command */
3443 if (!list_empty(&o->registry.exact_match.macs))
3444 return 0;
3446 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3447 if (!elem) {
3448 BNX2X_ERR("Failed to allocate registry memory\n");
3449 return -ENOMEM;
3452 for (i = 0; i < len; i++, elem++) {
3453 bnx2x_get_fw_mac_addr(
3454 &data->config_table[i].msb_mac_addr,
3455 &data->config_table[i].middle_mac_addr,
3456 &data->config_table[i].lsb_mac_addr,
3457 elem->mac);
3458 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3459 elem->mac);
3460 list_add_tail(&elem->link,
3461 &o->registry.exact_match.macs);
3463 } else {
3464 elem = list_first_entry(&o->registry.exact_match.macs,
3465 struct bnx2x_mcast_mac_elem, link);
3466 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3467 kfree(elem);
3468 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3471 return 0;
3474 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3475 struct bnx2x_mcast_ramrod_params *p,
3476 enum bnx2x_mcast_cmd cmd)
3478 struct bnx2x_mcast_obj *o = p->mcast_obj;
3479 struct bnx2x_raw_obj *raw = &o->raw;
3480 struct mac_configuration_cmd *data =
3481 (struct mac_configuration_cmd *)(raw->rdata);
3482 int cnt = 0, i, rc;
3484 /* Reset the ramrod data buffer */
3485 memset(data, 0, sizeof(*data));
3487 /* First set all entries as invalid */
3488 for (i = 0; i < o->max_cmd_len ; i++)
3489 SET_FLAG(data->config_table[i].flags,
3490 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3491 T_ETH_MAC_COMMAND_INVALIDATE);
3493 /* Handle pending commands first */
3494 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3496 /* If there are no more pending commands - clear SCHEDULED state */
3497 if (list_empty(&o->pending_cmds_head))
3498 o->clear_sched(o);
3500 /* The below may be true iff there were no pending commands */
3501 if (!cnt)
3502 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3504 /* For 57710 every command has o->max_cmd_len length to ensure that
3505 * commands are done one at a time.
3507 o->total_pending_num -= o->max_cmd_len;
3509 /* send a ramrod */
3511 WARN_ON(cnt > o->max_cmd_len);
3513 /* Set ramrod header (in particular, a number of entries to update) */
3514 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3516 /* update a registry: we need the registry contents to be always up
3517 * to date in order to be able to execute a RESTORE opcode. Here
3518 * we use the fact that for 57710 we sent one command at a time
3519 * hence we may take the registry update out of the command handling
3520 * and do it in a simpler way here.
3522 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3523 if (rc)
3524 return rc;
3527 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3528 * RAMROD_PENDING status immediately.
3530 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3531 raw->clear_pending(raw);
3532 return 0;
3533 } else {
3535 * No need for an explicit memory barrier here as long we would
3536 * need to ensure the ordering of writing to the SPQ element
3537 * and updating of the SPQ producer which involves a memory
3538 * read and we will have to put a full memory barrier there
3539 * (inside bnx2x_sp_post()).
3542 /* Send a ramrod */
3543 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3544 U64_HI(raw->rdata_mapping),
3545 U64_LO(raw->rdata_mapping),
3546 ETH_CONNECTION_TYPE);
3547 if (rc)
3548 return rc;
3550 /* Ramrod completion is pending */
3551 return 1;
3556 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3558 return o->registry.exact_match.num_macs_set;
3561 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3563 return o->registry.aprox_match.num_bins_set;
3566 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3567 int n)
3569 o->registry.exact_match.num_macs_set = n;
3572 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3573 int n)
3575 o->registry.aprox_match.num_bins_set = n;
3578 int bnx2x_config_mcast(struct bnx2x *bp,
3579 struct bnx2x_mcast_ramrod_params *p,
3580 enum bnx2x_mcast_cmd cmd)
3582 struct bnx2x_mcast_obj *o = p->mcast_obj;
3583 struct bnx2x_raw_obj *r = &o->raw;
3584 int rc = 0, old_reg_size;
3586 /* This is needed to recover number of currently configured mcast macs
3587 * in case of failure.
3589 old_reg_size = o->get_registry_size(o);
3591 /* Do some calculations and checks */
3592 rc = o->validate(bp, p, cmd);
3593 if (rc)
3594 return rc;
3596 /* Return if there is no work to do */
3597 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3598 return 0;
3600 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3601 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3603 /* Enqueue the current command to the pending list if we can't complete
3604 * it in the current iteration
3606 if (r->check_pending(r) ||
3607 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3608 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3609 if (rc < 0)
3610 goto error_exit1;
3612 /* As long as the current command is in a command list we
3613 * don't need to handle it separately.
3615 p->mcast_list_len = 0;
3618 if (!r->check_pending(r)) {
3620 /* Set 'pending' state */
3621 r->set_pending(r);
3623 /* Configure the new classification in the chip */
3624 rc = o->config_mcast(bp, p, cmd);
3625 if (rc < 0)
3626 goto error_exit2;
3628 /* Wait for a ramrod completion if was requested */
3629 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3630 rc = o->wait_comp(bp, o);
3633 return rc;
3635 error_exit2:
3636 r->clear_pending(r);
3638 error_exit1:
3639 o->revert(bp, p, old_reg_size);
3641 return rc;
3644 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3646 smp_mb__before_clear_bit();
3647 clear_bit(o->sched_state, o->raw.pstate);
3648 smp_mb__after_clear_bit();
3651 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3653 smp_mb__before_clear_bit();
3654 set_bit(o->sched_state, o->raw.pstate);
3655 smp_mb__after_clear_bit();
3658 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3660 return !!test_bit(o->sched_state, o->raw.pstate);
3663 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3665 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3668 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3669 struct bnx2x_mcast_obj *mcast_obj,
3670 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3671 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3672 int state, unsigned long *pstate, bnx2x_obj_type type)
3674 memset(mcast_obj, 0, sizeof(*mcast_obj));
3676 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3677 rdata, rdata_mapping, state, pstate, type);
3679 mcast_obj->engine_id = engine_id;
3681 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3683 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3684 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3685 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3686 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3688 if (CHIP_IS_E1(bp)) {
3689 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3690 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3691 mcast_obj->hdl_restore =
3692 bnx2x_mcast_handle_restore_cmd_e1;
3693 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3695 if (CHIP_REV_IS_SLOW(bp))
3696 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3697 else
3698 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3700 mcast_obj->wait_comp = bnx2x_mcast_wait;
3701 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3702 mcast_obj->validate = bnx2x_mcast_validate_e1;
3703 mcast_obj->revert = bnx2x_mcast_revert_e1;
3704 mcast_obj->get_registry_size =
3705 bnx2x_mcast_get_registry_size_exact;
3706 mcast_obj->set_registry_size =
3707 bnx2x_mcast_set_registry_size_exact;
3709 /* 57710 is the only chip that uses the exact match for mcast
3710 * at the moment.
3712 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3714 } else if (CHIP_IS_E1H(bp)) {
3715 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3716 mcast_obj->enqueue_cmd = NULL;
3717 mcast_obj->hdl_restore = NULL;
3718 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3720 /* 57711 doesn't send a ramrod, so it has unlimited credit
3721 * for one command.
3723 mcast_obj->max_cmd_len = -1;
3724 mcast_obj->wait_comp = bnx2x_mcast_wait;
3725 mcast_obj->set_one_rule = NULL;
3726 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3727 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3728 mcast_obj->get_registry_size =
3729 bnx2x_mcast_get_registry_size_aprox;
3730 mcast_obj->set_registry_size =
3731 bnx2x_mcast_set_registry_size_aprox;
3732 } else {
3733 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3734 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3735 mcast_obj->hdl_restore =
3736 bnx2x_mcast_handle_restore_cmd_e2;
3737 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3738 /* TODO: There should be a proper HSI define for this number!!!
3740 mcast_obj->max_cmd_len = 16;
3741 mcast_obj->wait_comp = bnx2x_mcast_wait;
3742 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3743 mcast_obj->validate = bnx2x_mcast_validate_e2;
3744 mcast_obj->revert = bnx2x_mcast_revert_e2;
3745 mcast_obj->get_registry_size =
3746 bnx2x_mcast_get_registry_size_aprox;
3747 mcast_obj->set_registry_size =
3748 bnx2x_mcast_set_registry_size_aprox;
3752 /*************************** Credit handling **********************************/
3755 * atomic_add_ifless - add if the result is less than a given value.
3757 * @v: pointer of type atomic_t
3758 * @a: the amount to add to v...
3759 * @u: ...if (v + a) is less than u.
3761 * returns true if (v + a) was less than u, and false otherwise.
3764 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3766 int c, old;
3768 c = atomic_read(v);
3769 for (;;) {
3770 if (unlikely(c + a >= u))
3771 return false;
3773 old = atomic_cmpxchg((v), c, c + a);
3774 if (likely(old == c))
3775 break;
3776 c = old;
3779 return true;
3783 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3785 * @v: pointer of type atomic_t
3786 * @a: the amount to dec from v...
3787 * @u: ...if (v - a) is more or equal than u.
3789 * returns true if (v - a) was more or equal than u, and false
3790 * otherwise.
3792 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3794 int c, old;
3796 c = atomic_read(v);
3797 for (;;) {
3798 if (unlikely(c - a < u))
3799 return false;
3801 old = atomic_cmpxchg((v), c, c - a);
3802 if (likely(old == c))
3803 break;
3804 c = old;
3807 return true;
3810 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3812 bool rc;
3814 smp_mb();
3815 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3816 smp_mb();
3818 return rc;
3821 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3823 bool rc;
3825 smp_mb();
3827 /* Don't let to refill if credit + cnt > pool_sz */
3828 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3830 smp_mb();
3832 return rc;
3835 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3837 int cur_credit;
3839 smp_mb();
3840 cur_credit = atomic_read(&o->credit);
3842 return cur_credit;
3845 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3846 int cnt)
3848 return true;
3852 static bool bnx2x_credit_pool_get_entry(
3853 struct bnx2x_credit_pool_obj *o,
3854 int *offset)
3856 int idx, vec, i;
3858 *offset = -1;
3860 /* Find "internal cam-offset" then add to base for this object... */
3861 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3863 /* Skip the current vector if there are no free entries in it */
3864 if (!o->pool_mirror[vec])
3865 continue;
3867 /* If we've got here we are going to find a free entry */
3868 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3869 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3871 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3872 /* Got one!! */
3873 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3874 *offset = o->base_pool_offset + idx;
3875 return true;
3879 return false;
3882 static bool bnx2x_credit_pool_put_entry(
3883 struct bnx2x_credit_pool_obj *o,
3884 int offset)
3886 if (offset < o->base_pool_offset)
3887 return false;
3889 offset -= o->base_pool_offset;
3891 if (offset >= o->pool_sz)
3892 return false;
3894 /* Return the entry to the pool */
3895 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3897 return true;
3900 static bool bnx2x_credit_pool_put_entry_always_true(
3901 struct bnx2x_credit_pool_obj *o,
3902 int offset)
3904 return true;
3907 static bool bnx2x_credit_pool_get_entry_always_true(
3908 struct bnx2x_credit_pool_obj *o,
3909 int *offset)
3911 *offset = -1;
3912 return true;
3915 * bnx2x_init_credit_pool - initialize credit pool internals.
3917 * @p:
3918 * @base: Base entry in the CAM to use.
3919 * @credit: pool size.
3921 * If base is negative no CAM entries handling will be performed.
3922 * If credit is negative pool operations will always succeed (unlimited pool).
3925 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3926 int base, int credit)
3928 /* Zero the object first */
3929 memset(p, 0, sizeof(*p));
3931 /* Set the table to all 1s */
3932 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3934 /* Init a pool as full */
3935 atomic_set(&p->credit, credit);
3937 /* The total poll size */
3938 p->pool_sz = credit;
3940 p->base_pool_offset = base;
3942 /* Commit the change */
3943 smp_mb();
3945 p->check = bnx2x_credit_pool_check;
3947 /* if pool credit is negative - disable the checks */
3948 if (credit >= 0) {
3949 p->put = bnx2x_credit_pool_put;
3950 p->get = bnx2x_credit_pool_get;
3951 p->put_entry = bnx2x_credit_pool_put_entry;
3952 p->get_entry = bnx2x_credit_pool_get_entry;
3953 } else {
3954 p->put = bnx2x_credit_pool_always_true;
3955 p->get = bnx2x_credit_pool_always_true;
3956 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3957 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3960 /* If base is negative - disable entries handling */
3961 if (base < 0) {
3962 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3963 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3967 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3968 struct bnx2x_credit_pool_obj *p, u8 func_id,
3969 u8 func_num)
3971 /* TODO: this will be defined in consts as well... */
3972 #define BNX2X_CAM_SIZE_EMUL 5
3974 int cam_sz;
3976 if (CHIP_IS_E1(bp)) {
3977 /* In E1, Multicast is saved in cam... */
3978 if (!CHIP_REV_IS_SLOW(bp))
3979 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3980 else
3981 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3983 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3985 } else if (CHIP_IS_E1H(bp)) {
3986 /* CAM credit is equaly divided between all active functions
3987 * on the PORT!.
3989 if ((func_num > 0)) {
3990 if (!CHIP_REV_IS_SLOW(bp))
3991 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3992 else
3993 cam_sz = BNX2X_CAM_SIZE_EMUL;
3994 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3995 } else {
3996 /* this should never happen! Block MAC operations. */
3997 bnx2x_init_credit_pool(p, 0, 0);
4000 } else {
4003 * CAM credit is equaly divided between all active functions
4004 * on the PATH.
4006 if ((func_num > 0)) {
4007 if (!CHIP_REV_IS_SLOW(bp))
4008 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
4009 else
4010 cam_sz = BNX2X_CAM_SIZE_EMUL;
4013 * No need for CAM entries handling for 57712 and
4014 * newer.
4016 bnx2x_init_credit_pool(p, -1, cam_sz);
4017 } else {
4018 /* this should never happen! Block MAC operations. */
4019 bnx2x_init_credit_pool(p, 0, 0);
4025 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4026 struct bnx2x_credit_pool_obj *p,
4027 u8 func_id,
4028 u8 func_num)
4030 if (CHIP_IS_E1x(bp)) {
4032 * There is no VLAN credit in HW on 57710 and 57711 only
4033 * MAC / MAC-VLAN can be set
4035 bnx2x_init_credit_pool(p, 0, -1);
4036 } else {
4038 * CAM credit is equaly divided between all active functions
4039 * on the PATH.
4041 if (func_num > 0) {
4042 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4043 bnx2x_init_credit_pool(p, func_id * credit, credit);
4044 } else
4045 /* this should never happen! Block VLAN operations. */
4046 bnx2x_init_credit_pool(p, 0, 0);
4050 /****************** RSS Configuration ******************/
4052 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4054 * @bp: driver hanlde
4055 * @p: pointer to rss configuration
4057 * Prints it when NETIF_MSG_IFUP debug level is configured.
4059 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4060 struct bnx2x_config_rss_params *p)
4062 int i;
4064 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4065 DP(BNX2X_MSG_SP, "0x0000: ");
4066 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4067 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4069 /* Print 4 bytes in a line */
4070 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4071 (((i + 1) & 0x3) == 0)) {
4072 DP_CONT(BNX2X_MSG_SP, "\n");
4073 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4077 DP_CONT(BNX2X_MSG_SP, "\n");
4081 * bnx2x_setup_rss - configure RSS
4083 * @bp: device handle
4084 * @p: rss configuration
4086 * sends on UPDATE ramrod for that matter.
4088 static int bnx2x_setup_rss(struct bnx2x *bp,
4089 struct bnx2x_config_rss_params *p)
4091 struct bnx2x_rss_config_obj *o = p->rss_obj;
4092 struct bnx2x_raw_obj *r = &o->raw;
4093 struct eth_rss_update_ramrod_data *data =
4094 (struct eth_rss_update_ramrod_data *)(r->rdata);
4095 u8 rss_mode = 0;
4096 int rc;
4098 memset(data, 0, sizeof(*data));
4100 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4102 /* Set an echo field */
4103 data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4104 (r->state << BNX2X_SWCID_SHIFT));
4106 /* RSS mode */
4107 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4108 rss_mode = ETH_RSS_MODE_DISABLED;
4109 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4110 rss_mode = ETH_RSS_MODE_REGULAR;
4112 data->rss_mode = rss_mode;
4114 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4116 /* RSS capabilities */
4117 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4118 data->capabilities |=
4119 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4121 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4122 data->capabilities |=
4123 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4125 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4126 data->capabilities |=
4127 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4129 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4130 data->capabilities |=
4131 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4133 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4134 data->capabilities |=
4135 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4137 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4138 data->capabilities |=
4139 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4141 /* Hashing mask */
4142 data->rss_result_mask = p->rss_result_mask;
4144 /* RSS engine ID */
4145 data->rss_engine_id = o->engine_id;
4147 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4149 /* Indirection table */
4150 memcpy(data->indirection_table, p->ind_table,
4151 T_ETH_INDIRECTION_TABLE_SIZE);
4153 /* Remember the last configuration */
4154 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4156 /* Print the indirection table */
4157 if (netif_msg_ifup(bp))
4158 bnx2x_debug_print_ind_table(bp, p);
4160 /* RSS keys */
4161 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4162 memcpy(&data->rss_key[0], &p->rss_key[0],
4163 sizeof(data->rss_key));
4164 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4168 * No need for an explicit memory barrier here as long we would
4169 * need to ensure the ordering of writing to the SPQ element
4170 * and updating of the SPQ producer which involves a memory
4171 * read and we will have to put a full memory barrier there
4172 * (inside bnx2x_sp_post()).
4175 /* Send a ramrod */
4176 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4177 U64_HI(r->rdata_mapping),
4178 U64_LO(r->rdata_mapping),
4179 ETH_CONNECTION_TYPE);
4181 if (rc < 0)
4182 return rc;
4184 return 1;
4187 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4188 u8 *ind_table)
4190 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4193 int bnx2x_config_rss(struct bnx2x *bp,
4194 struct bnx2x_config_rss_params *p)
4196 int rc;
4197 struct bnx2x_rss_config_obj *o = p->rss_obj;
4198 struct bnx2x_raw_obj *r = &o->raw;
4200 /* Do nothing if only driver cleanup was requested */
4201 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4202 return 0;
4204 r->set_pending(r);
4206 rc = o->config_rss(bp, p);
4207 if (rc < 0) {
4208 r->clear_pending(r);
4209 return rc;
4212 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4213 rc = r->wait_comp(bp, r);
4215 return rc;
4219 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4220 struct bnx2x_rss_config_obj *rss_obj,
4221 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4222 void *rdata, dma_addr_t rdata_mapping,
4223 int state, unsigned long *pstate,
4224 bnx2x_obj_type type)
4226 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4227 rdata_mapping, state, pstate, type);
4229 rss_obj->engine_id = engine_id;
4230 rss_obj->config_rss = bnx2x_setup_rss;
4233 /********************** Queue state object ***********************************/
4236 * bnx2x_queue_state_change - perform Queue state change transition
4238 * @bp: device handle
4239 * @params: parameters to perform the transition
4241 * returns 0 in case of successfully completed transition, negative error
4242 * code in case of failure, positive (EBUSY) value if there is a completion
4243 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4244 * not set in params->ramrod_flags for asynchronous commands).
4247 int bnx2x_queue_state_change(struct bnx2x *bp,
4248 struct bnx2x_queue_state_params *params)
4250 struct bnx2x_queue_sp_obj *o = params->q_obj;
4251 int rc, pending_bit;
4252 unsigned long *pending = &o->pending;
4254 /* Check that the requested transition is legal */
4255 rc = o->check_transition(bp, o, params);
4256 if (rc) {
4257 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4258 return -EINVAL;
4261 /* Set "pending" bit */
4262 DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4263 pending_bit = o->set_pending(o, params);
4264 DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4266 /* Don't send a command if only driver cleanup was requested */
4267 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4268 o->complete_cmd(bp, o, pending_bit);
4269 else {
4270 /* Send a ramrod */
4271 rc = o->send_cmd(bp, params);
4272 if (rc) {
4273 o->next_state = BNX2X_Q_STATE_MAX;
4274 clear_bit(pending_bit, pending);
4275 smp_mb__after_clear_bit();
4276 return rc;
4279 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4280 rc = o->wait_comp(bp, o, pending_bit);
4281 if (rc)
4282 return rc;
4284 return 0;
4288 return !!test_bit(pending_bit, pending);
4292 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4293 struct bnx2x_queue_state_params *params)
4295 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4297 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4298 * UPDATE command.
4300 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4301 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4302 bit = BNX2X_Q_CMD_UPDATE;
4303 else
4304 bit = cmd;
4306 set_bit(bit, &obj->pending);
4307 return bit;
4310 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4311 struct bnx2x_queue_sp_obj *o,
4312 enum bnx2x_queue_cmd cmd)
4314 return bnx2x_state_wait(bp, cmd, &o->pending);
4318 * bnx2x_queue_comp_cmd - complete the state change command.
4320 * @bp: device handle
4321 * @o:
4322 * @cmd:
4324 * Checks that the arrived completion is expected.
4326 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4327 struct bnx2x_queue_sp_obj *o,
4328 enum bnx2x_queue_cmd cmd)
4330 unsigned long cur_pending = o->pending;
4332 if (!test_and_clear_bit(cmd, &cur_pending)) {
4333 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4334 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4335 o->state, cur_pending, o->next_state);
4336 return -EINVAL;
4339 if (o->next_tx_only >= o->max_cos)
4340 /* >= becuase tx only must always be smaller than cos since the
4341 * primary connection supports COS 0
4343 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4344 o->next_tx_only, o->max_cos);
4346 DP(BNX2X_MSG_SP,
4347 "Completing command %d for queue %d, setting state to %d\n",
4348 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4350 if (o->next_tx_only) /* print num tx-only if any exist */
4351 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4352 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4354 o->state = o->next_state;
4355 o->num_tx_only = o->next_tx_only;
4356 o->next_state = BNX2X_Q_STATE_MAX;
4358 /* It's important that o->state and o->next_state are
4359 * updated before o->pending.
4361 wmb();
4363 clear_bit(cmd, &o->pending);
4364 smp_mb__after_clear_bit();
4366 return 0;
4369 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4370 struct bnx2x_queue_state_params *cmd_params,
4371 struct client_init_ramrod_data *data)
4373 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4375 /* Rx data */
4377 /* IPv6 TPA supported for E2 and above only */
4378 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4379 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4382 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4383 struct bnx2x_queue_sp_obj *o,
4384 struct bnx2x_general_setup_params *params,
4385 struct client_init_general_data *gen_data,
4386 unsigned long *flags)
4388 gen_data->client_id = o->cl_id;
4390 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4391 gen_data->statistics_counter_id =
4392 params->stat_id;
4393 gen_data->statistics_en_flg = 1;
4394 gen_data->statistics_zero_flg =
4395 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4396 } else
4397 gen_data->statistics_counter_id =
4398 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4400 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4401 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4402 gen_data->sp_client_id = params->spcl_id;
4403 gen_data->mtu = cpu_to_le16(params->mtu);
4404 gen_data->func_id = o->func_id;
4407 gen_data->cos = params->cos;
4409 gen_data->traffic_type =
4410 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4411 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4413 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4414 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4417 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4418 struct bnx2x_txq_setup_params *params,
4419 struct client_init_tx_data *tx_data,
4420 unsigned long *flags)
4422 tx_data->enforce_security_flg =
4423 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4424 tx_data->default_vlan =
4425 cpu_to_le16(params->default_vlan);
4426 tx_data->default_vlan_flg =
4427 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4428 tx_data->tx_switching_flg =
4429 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4430 tx_data->anti_spoofing_flg =
4431 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4432 tx_data->force_default_pri_flg =
4433 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4435 tx_data->tunnel_non_lso_pcsum_location =
4436 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4437 PCSUM_ON_BD;
4439 tx_data->tx_status_block_id = params->fw_sb_id;
4440 tx_data->tx_sb_index_number = params->sb_cq_index;
4441 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4443 tx_data->tx_bd_page_base.lo =
4444 cpu_to_le32(U64_LO(params->dscr_map));
4445 tx_data->tx_bd_page_base.hi =
4446 cpu_to_le32(U64_HI(params->dscr_map));
4448 /* Don't configure any Tx switching mode during queue SETUP */
4449 tx_data->state = 0;
4452 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4453 struct rxq_pause_params *params,
4454 struct client_init_rx_data *rx_data)
4456 /* flow control data */
4457 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4458 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4459 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4460 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4461 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4462 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4463 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4466 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4467 struct bnx2x_rxq_setup_params *params,
4468 struct client_init_rx_data *rx_data,
4469 unsigned long *flags)
4471 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4472 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4473 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4474 CLIENT_INIT_RX_DATA_TPA_MODE;
4475 rx_data->vmqueue_mode_en_flg = 0;
4477 rx_data->cache_line_alignment_log_size =
4478 params->cache_line_log;
4479 rx_data->enable_dynamic_hc =
4480 test_bit(BNX2X_Q_FLG_DHC, flags);
4481 rx_data->max_sges_for_packet = params->max_sges_pkt;
4482 rx_data->client_qzone_id = params->cl_qzone_id;
4483 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4485 /* Always start in DROP_ALL mode */
4486 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4487 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4489 /* We don't set drop flags */
4490 rx_data->drop_ip_cs_err_flg = 0;
4491 rx_data->drop_tcp_cs_err_flg = 0;
4492 rx_data->drop_ttl0_flg = 0;
4493 rx_data->drop_udp_cs_err_flg = 0;
4494 rx_data->inner_vlan_removal_enable_flg =
4495 test_bit(BNX2X_Q_FLG_VLAN, flags);
4496 rx_data->outer_vlan_removal_enable_flg =
4497 test_bit(BNX2X_Q_FLG_OV, flags);
4498 rx_data->status_block_id = params->fw_sb_id;
4499 rx_data->rx_sb_index_number = params->sb_cq_index;
4500 rx_data->max_tpa_queues = params->max_tpa_queues;
4501 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4502 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4503 rx_data->bd_page_base.lo =
4504 cpu_to_le32(U64_LO(params->dscr_map));
4505 rx_data->bd_page_base.hi =
4506 cpu_to_le32(U64_HI(params->dscr_map));
4507 rx_data->sge_page_base.lo =
4508 cpu_to_le32(U64_LO(params->sge_map));
4509 rx_data->sge_page_base.hi =
4510 cpu_to_le32(U64_HI(params->sge_map));
4511 rx_data->cqe_page_base.lo =
4512 cpu_to_le32(U64_LO(params->rcq_map));
4513 rx_data->cqe_page_base.hi =
4514 cpu_to_le32(U64_HI(params->rcq_map));
4515 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4517 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4518 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4519 rx_data->is_approx_mcast = 1;
4522 rx_data->rss_engine_id = params->rss_engine_id;
4524 /* silent vlan removal */
4525 rx_data->silent_vlan_removal_flg =
4526 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4527 rx_data->silent_vlan_value =
4528 cpu_to_le16(params->silent_removal_value);
4529 rx_data->silent_vlan_mask =
4530 cpu_to_le16(params->silent_removal_mask);
4534 /* initialize the general, tx and rx parts of a queue object */
4535 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4536 struct bnx2x_queue_state_params *cmd_params,
4537 struct client_init_ramrod_data *data)
4539 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4540 &cmd_params->params.setup.gen_params,
4541 &data->general,
4542 &cmd_params->params.setup.flags);
4544 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4545 &cmd_params->params.setup.txq_params,
4546 &data->tx,
4547 &cmd_params->params.setup.flags);
4549 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4550 &cmd_params->params.setup.rxq_params,
4551 &data->rx,
4552 &cmd_params->params.setup.flags);
4554 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4555 &cmd_params->params.setup.pause_params,
4556 &data->rx);
4559 /* initialize the general and tx parts of a tx-only queue object */
4560 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4561 struct bnx2x_queue_state_params *cmd_params,
4562 struct tx_queue_init_ramrod_data *data)
4564 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4565 &cmd_params->params.tx_only.gen_params,
4566 &data->general,
4567 &cmd_params->params.tx_only.flags);
4569 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4570 &cmd_params->params.tx_only.txq_params,
4571 &data->tx,
4572 &cmd_params->params.tx_only.flags);
4574 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4575 cmd_params->q_obj->cids[0],
4576 data->tx.tx_bd_page_base.lo,
4577 data->tx.tx_bd_page_base.hi);
4581 * bnx2x_q_init - init HW/FW queue
4583 * @bp: device handle
4584 * @params:
4586 * HW/FW initial Queue configuration:
4587 * - HC: Rx and Tx
4588 * - CDU context validation
4591 static inline int bnx2x_q_init(struct bnx2x *bp,
4592 struct bnx2x_queue_state_params *params)
4594 struct bnx2x_queue_sp_obj *o = params->q_obj;
4595 struct bnx2x_queue_init_params *init = &params->params.init;
4596 u16 hc_usec;
4597 u8 cos;
4599 /* Tx HC configuration */
4600 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4601 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4602 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4604 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4605 init->tx.sb_cq_index,
4606 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4607 hc_usec);
4610 /* Rx HC configuration */
4611 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4612 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4613 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4615 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4616 init->rx.sb_cq_index,
4617 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4618 hc_usec);
4621 /* Set CDU context validation values */
4622 for (cos = 0; cos < o->max_cos; cos++) {
4623 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4624 o->cids[cos], cos);
4625 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4626 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4629 /* As no ramrod is sent, complete the command immediately */
4630 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4632 mmiowb();
4633 smp_mb();
4635 return 0;
4638 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4639 struct bnx2x_queue_state_params *params)
4641 struct bnx2x_queue_sp_obj *o = params->q_obj;
4642 struct client_init_ramrod_data *rdata =
4643 (struct client_init_ramrod_data *)o->rdata;
4644 dma_addr_t data_mapping = o->rdata_mapping;
4645 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4647 /* Clear the ramrod data */
4648 memset(rdata, 0, sizeof(*rdata));
4650 /* Fill the ramrod data */
4651 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4654 * No need for an explicit memory barrier here as long we would
4655 * need to ensure the ordering of writing to the SPQ element
4656 * and updating of the SPQ producer which involves a memory
4657 * read and we will have to put a full memory barrier there
4658 * (inside bnx2x_sp_post()).
4661 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4662 U64_HI(data_mapping),
4663 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4666 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4667 struct bnx2x_queue_state_params *params)
4669 struct bnx2x_queue_sp_obj *o = params->q_obj;
4670 struct client_init_ramrod_data *rdata =
4671 (struct client_init_ramrod_data *)o->rdata;
4672 dma_addr_t data_mapping = o->rdata_mapping;
4673 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4675 /* Clear the ramrod data */
4676 memset(rdata, 0, sizeof(*rdata));
4678 /* Fill the ramrod data */
4679 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4680 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4683 * No need for an explicit memory barrier here as long we would
4684 * need to ensure the ordering of writing to the SPQ element
4685 * and updating of the SPQ producer which involves a memory
4686 * read and we will have to put a full memory barrier there
4687 * (inside bnx2x_sp_post()).
4690 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4691 U64_HI(data_mapping),
4692 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4695 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4696 struct bnx2x_queue_state_params *params)
4698 struct bnx2x_queue_sp_obj *o = params->q_obj;
4699 struct tx_queue_init_ramrod_data *rdata =
4700 (struct tx_queue_init_ramrod_data *)o->rdata;
4701 dma_addr_t data_mapping = o->rdata_mapping;
4702 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4703 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4704 &params->params.tx_only;
4705 u8 cid_index = tx_only_params->cid_index;
4708 if (cid_index >= o->max_cos) {
4709 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4710 o->cl_id, cid_index);
4711 return -EINVAL;
4714 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4715 tx_only_params->gen_params.cos,
4716 tx_only_params->gen_params.spcl_id);
4718 /* Clear the ramrod data */
4719 memset(rdata, 0, sizeof(*rdata));
4721 /* Fill the ramrod data */
4722 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4724 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4725 o->cids[cid_index], rdata->general.client_id,
4726 rdata->general.sp_client_id, rdata->general.cos);
4729 * No need for an explicit memory barrier here as long we would
4730 * need to ensure the ordering of writing to the SPQ element
4731 * and updating of the SPQ producer which involves a memory
4732 * read and we will have to put a full memory barrier there
4733 * (inside bnx2x_sp_post()).
4736 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4737 U64_HI(data_mapping),
4738 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4741 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4742 struct bnx2x_queue_sp_obj *obj,
4743 struct bnx2x_queue_update_params *params,
4744 struct client_update_ramrod_data *data)
4746 /* Client ID of the client to update */
4747 data->client_id = obj->cl_id;
4749 /* Function ID of the client to update */
4750 data->func_id = obj->func_id;
4752 /* Default VLAN value */
4753 data->default_vlan = cpu_to_le16(params->def_vlan);
4755 /* Inner VLAN stripping */
4756 data->inner_vlan_removal_enable_flg =
4757 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4758 data->inner_vlan_removal_change_flg =
4759 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4760 &params->update_flags);
4762 /* Outer VLAN sripping */
4763 data->outer_vlan_removal_enable_flg =
4764 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4765 data->outer_vlan_removal_change_flg =
4766 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4767 &params->update_flags);
4769 /* Drop packets that have source MAC that doesn't belong to this
4770 * Queue.
4772 data->anti_spoofing_enable_flg =
4773 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4774 data->anti_spoofing_change_flg =
4775 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4777 /* Activate/Deactivate */
4778 data->activate_flg =
4779 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4780 data->activate_change_flg =
4781 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4783 /* Enable default VLAN */
4784 data->default_vlan_enable_flg =
4785 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4786 data->default_vlan_change_flg =
4787 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4788 &params->update_flags);
4790 /* silent vlan removal */
4791 data->silent_vlan_change_flg =
4792 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4793 &params->update_flags);
4794 data->silent_vlan_removal_flg =
4795 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4796 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4797 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4800 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4801 struct bnx2x_queue_state_params *params)
4803 struct bnx2x_queue_sp_obj *o = params->q_obj;
4804 struct client_update_ramrod_data *rdata =
4805 (struct client_update_ramrod_data *)o->rdata;
4806 dma_addr_t data_mapping = o->rdata_mapping;
4807 struct bnx2x_queue_update_params *update_params =
4808 &params->params.update;
4809 u8 cid_index = update_params->cid_index;
4811 if (cid_index >= o->max_cos) {
4812 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4813 o->cl_id, cid_index);
4814 return -EINVAL;
4818 /* Clear the ramrod data */
4819 memset(rdata, 0, sizeof(*rdata));
4821 /* Fill the ramrod data */
4822 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4825 * No need for an explicit memory barrier here as long we would
4826 * need to ensure the ordering of writing to the SPQ element
4827 * and updating of the SPQ producer which involves a memory
4828 * read and we will have to put a full memory barrier there
4829 * (inside bnx2x_sp_post()).
4832 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4833 o->cids[cid_index], U64_HI(data_mapping),
4834 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4838 * bnx2x_q_send_deactivate - send DEACTIVATE command
4840 * @bp: device handle
4841 * @params:
4843 * implemented using the UPDATE command.
4845 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4846 struct bnx2x_queue_state_params *params)
4848 struct bnx2x_queue_update_params *update = &params->params.update;
4850 memset(update, 0, sizeof(*update));
4852 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4854 return bnx2x_q_send_update(bp, params);
4858 * bnx2x_q_send_activate - send ACTIVATE command
4860 * @bp: device handle
4861 * @params:
4863 * implemented using the UPDATE command.
4865 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4866 struct bnx2x_queue_state_params *params)
4868 struct bnx2x_queue_update_params *update = &params->params.update;
4870 memset(update, 0, sizeof(*update));
4872 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4873 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4875 return bnx2x_q_send_update(bp, params);
4878 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4879 struct bnx2x_queue_state_params *params)
4881 /* TODO: Not implemented yet. */
4882 return -1;
4885 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4886 struct bnx2x_queue_state_params *params)
4888 struct bnx2x_queue_sp_obj *o = params->q_obj;
4890 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4891 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4892 ETH_CONNECTION_TYPE);
4895 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4896 struct bnx2x_queue_state_params *params)
4898 struct bnx2x_queue_sp_obj *o = params->q_obj;
4899 u8 cid_idx = params->params.cfc_del.cid_index;
4901 if (cid_idx >= o->max_cos) {
4902 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4903 o->cl_id, cid_idx);
4904 return -EINVAL;
4907 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4908 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4911 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4912 struct bnx2x_queue_state_params *params)
4914 struct bnx2x_queue_sp_obj *o = params->q_obj;
4915 u8 cid_index = params->params.terminate.cid_index;
4917 if (cid_index >= o->max_cos) {
4918 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4919 o->cl_id, cid_index);
4920 return -EINVAL;
4923 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4924 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4927 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4928 struct bnx2x_queue_state_params *params)
4930 struct bnx2x_queue_sp_obj *o = params->q_obj;
4932 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4933 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4934 ETH_CONNECTION_TYPE);
4937 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4938 struct bnx2x_queue_state_params *params)
4940 switch (params->cmd) {
4941 case BNX2X_Q_CMD_INIT:
4942 return bnx2x_q_init(bp, params);
4943 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4944 return bnx2x_q_send_setup_tx_only(bp, params);
4945 case BNX2X_Q_CMD_DEACTIVATE:
4946 return bnx2x_q_send_deactivate(bp, params);
4947 case BNX2X_Q_CMD_ACTIVATE:
4948 return bnx2x_q_send_activate(bp, params);
4949 case BNX2X_Q_CMD_UPDATE:
4950 return bnx2x_q_send_update(bp, params);
4951 case BNX2X_Q_CMD_UPDATE_TPA:
4952 return bnx2x_q_send_update_tpa(bp, params);
4953 case BNX2X_Q_CMD_HALT:
4954 return bnx2x_q_send_halt(bp, params);
4955 case BNX2X_Q_CMD_CFC_DEL:
4956 return bnx2x_q_send_cfc_del(bp, params);
4957 case BNX2X_Q_CMD_TERMINATE:
4958 return bnx2x_q_send_terminate(bp, params);
4959 case BNX2X_Q_CMD_EMPTY:
4960 return bnx2x_q_send_empty(bp, params);
4961 default:
4962 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4963 return -EINVAL;
4967 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4968 struct bnx2x_queue_state_params *params)
4970 switch (params->cmd) {
4971 case BNX2X_Q_CMD_SETUP:
4972 return bnx2x_q_send_setup_e1x(bp, params);
4973 case BNX2X_Q_CMD_INIT:
4974 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4975 case BNX2X_Q_CMD_DEACTIVATE:
4976 case BNX2X_Q_CMD_ACTIVATE:
4977 case BNX2X_Q_CMD_UPDATE:
4978 case BNX2X_Q_CMD_UPDATE_TPA:
4979 case BNX2X_Q_CMD_HALT:
4980 case BNX2X_Q_CMD_CFC_DEL:
4981 case BNX2X_Q_CMD_TERMINATE:
4982 case BNX2X_Q_CMD_EMPTY:
4983 return bnx2x_queue_send_cmd_cmn(bp, params);
4984 default:
4985 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4986 return -EINVAL;
4990 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4991 struct bnx2x_queue_state_params *params)
4993 switch (params->cmd) {
4994 case BNX2X_Q_CMD_SETUP:
4995 return bnx2x_q_send_setup_e2(bp, params);
4996 case BNX2X_Q_CMD_INIT:
4997 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4998 case BNX2X_Q_CMD_DEACTIVATE:
4999 case BNX2X_Q_CMD_ACTIVATE:
5000 case BNX2X_Q_CMD_UPDATE:
5001 case BNX2X_Q_CMD_UPDATE_TPA:
5002 case BNX2X_Q_CMD_HALT:
5003 case BNX2X_Q_CMD_CFC_DEL:
5004 case BNX2X_Q_CMD_TERMINATE:
5005 case BNX2X_Q_CMD_EMPTY:
5006 return bnx2x_queue_send_cmd_cmn(bp, params);
5007 default:
5008 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5009 return -EINVAL;
5014 * bnx2x_queue_chk_transition - check state machine of a regular Queue
5016 * @bp: device handle
5017 * @o:
5018 * @params:
5020 * (not Forwarding)
5021 * It both checks if the requested command is legal in a current
5022 * state and, if it's legal, sets a `next_state' in the object
5023 * that will be used in the completion flow to set the `state'
5024 * of the object.
5026 * returns 0 if a requested command is a legal transition,
5027 * -EINVAL otherwise.
5029 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5030 struct bnx2x_queue_sp_obj *o,
5031 struct bnx2x_queue_state_params *params)
5033 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5034 enum bnx2x_queue_cmd cmd = params->cmd;
5035 struct bnx2x_queue_update_params *update_params =
5036 &params->params.update;
5037 u8 next_tx_only = o->num_tx_only;
5040 * Forget all pending for completion commands if a driver only state
5041 * transition has been requested.
5043 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5044 o->pending = 0;
5045 o->next_state = BNX2X_Q_STATE_MAX;
5049 * Don't allow a next state transition if we are in the middle of
5050 * the previous one.
5052 if (o->pending) {
5053 BNX2X_ERR("Blocking transition since pending was %lx\n",
5054 o->pending);
5055 return -EBUSY;
5058 switch (state) {
5059 case BNX2X_Q_STATE_RESET:
5060 if (cmd == BNX2X_Q_CMD_INIT)
5061 next_state = BNX2X_Q_STATE_INITIALIZED;
5063 break;
5064 case BNX2X_Q_STATE_INITIALIZED:
5065 if (cmd == BNX2X_Q_CMD_SETUP) {
5066 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5067 &params->params.setup.flags))
5068 next_state = BNX2X_Q_STATE_ACTIVE;
5069 else
5070 next_state = BNX2X_Q_STATE_INACTIVE;
5073 break;
5074 case BNX2X_Q_STATE_ACTIVE:
5075 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5076 next_state = BNX2X_Q_STATE_INACTIVE;
5078 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5079 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5080 next_state = BNX2X_Q_STATE_ACTIVE;
5082 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5083 next_state = BNX2X_Q_STATE_MULTI_COS;
5084 next_tx_only = 1;
5087 else if (cmd == BNX2X_Q_CMD_HALT)
5088 next_state = BNX2X_Q_STATE_STOPPED;
5090 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5091 /* If "active" state change is requested, update the
5092 * state accordingly.
5094 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5095 &update_params->update_flags) &&
5096 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5097 &update_params->update_flags))
5098 next_state = BNX2X_Q_STATE_INACTIVE;
5099 else
5100 next_state = BNX2X_Q_STATE_ACTIVE;
5103 break;
5104 case BNX2X_Q_STATE_MULTI_COS:
5105 if (cmd == BNX2X_Q_CMD_TERMINATE)
5106 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5108 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5109 next_state = BNX2X_Q_STATE_MULTI_COS;
5110 next_tx_only = o->num_tx_only + 1;
5113 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5114 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5115 next_state = BNX2X_Q_STATE_MULTI_COS;
5117 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5118 /* If "active" state change is requested, update the
5119 * state accordingly.
5121 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5122 &update_params->update_flags) &&
5123 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5124 &update_params->update_flags))
5125 next_state = BNX2X_Q_STATE_INACTIVE;
5126 else
5127 next_state = BNX2X_Q_STATE_MULTI_COS;
5130 break;
5131 case BNX2X_Q_STATE_MCOS_TERMINATED:
5132 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5133 next_tx_only = o->num_tx_only - 1;
5134 if (next_tx_only == 0)
5135 next_state = BNX2X_Q_STATE_ACTIVE;
5136 else
5137 next_state = BNX2X_Q_STATE_MULTI_COS;
5140 break;
5141 case BNX2X_Q_STATE_INACTIVE:
5142 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5143 next_state = BNX2X_Q_STATE_ACTIVE;
5145 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5146 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5147 next_state = BNX2X_Q_STATE_INACTIVE;
5149 else if (cmd == BNX2X_Q_CMD_HALT)
5150 next_state = BNX2X_Q_STATE_STOPPED;
5152 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5153 /* If "active" state change is requested, update the
5154 * state accordingly.
5156 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5157 &update_params->update_flags) &&
5158 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5159 &update_params->update_flags)){
5160 if (o->num_tx_only == 0)
5161 next_state = BNX2X_Q_STATE_ACTIVE;
5162 else /* tx only queues exist for this queue */
5163 next_state = BNX2X_Q_STATE_MULTI_COS;
5164 } else
5165 next_state = BNX2X_Q_STATE_INACTIVE;
5168 break;
5169 case BNX2X_Q_STATE_STOPPED:
5170 if (cmd == BNX2X_Q_CMD_TERMINATE)
5171 next_state = BNX2X_Q_STATE_TERMINATED;
5173 break;
5174 case BNX2X_Q_STATE_TERMINATED:
5175 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5176 next_state = BNX2X_Q_STATE_RESET;
5178 break;
5179 default:
5180 BNX2X_ERR("Illegal state: %d\n", state);
5183 /* Transition is assured */
5184 if (next_state != BNX2X_Q_STATE_MAX) {
5185 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5186 state, cmd, next_state);
5187 o->next_state = next_state;
5188 o->next_tx_only = next_tx_only;
5189 return 0;
5192 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5194 return -EINVAL;
5197 void bnx2x_init_queue_obj(struct bnx2x *bp,
5198 struct bnx2x_queue_sp_obj *obj,
5199 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5200 void *rdata,
5201 dma_addr_t rdata_mapping, unsigned long type)
5203 memset(obj, 0, sizeof(*obj));
5205 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5206 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5208 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5209 obj->max_cos = cid_cnt;
5210 obj->cl_id = cl_id;
5211 obj->func_id = func_id;
5212 obj->rdata = rdata;
5213 obj->rdata_mapping = rdata_mapping;
5214 obj->type = type;
5215 obj->next_state = BNX2X_Q_STATE_MAX;
5217 if (CHIP_IS_E1x(bp))
5218 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5219 else
5220 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5222 obj->check_transition = bnx2x_queue_chk_transition;
5224 obj->complete_cmd = bnx2x_queue_comp_cmd;
5225 obj->wait_comp = bnx2x_queue_wait_comp;
5226 obj->set_pending = bnx2x_queue_set_pending;
5229 /* return a queue object's logical state*/
5230 int bnx2x_get_q_logical_state(struct bnx2x *bp,
5231 struct bnx2x_queue_sp_obj *obj)
5233 switch (obj->state) {
5234 case BNX2X_Q_STATE_ACTIVE:
5235 case BNX2X_Q_STATE_MULTI_COS:
5236 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5237 case BNX2X_Q_STATE_RESET:
5238 case BNX2X_Q_STATE_INITIALIZED:
5239 case BNX2X_Q_STATE_MCOS_TERMINATED:
5240 case BNX2X_Q_STATE_INACTIVE:
5241 case BNX2X_Q_STATE_STOPPED:
5242 case BNX2X_Q_STATE_TERMINATED:
5243 case BNX2X_Q_STATE_FLRED:
5244 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5245 default:
5246 return -EINVAL;
5250 /********************** Function state object *********************************/
5251 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5252 struct bnx2x_func_sp_obj *o)
5254 /* in the middle of transaction - return INVALID state */
5255 if (o->pending)
5256 return BNX2X_F_STATE_MAX;
5259 * unsure the order of reading of o->pending and o->state
5260 * o->pending should be read first
5262 rmb();
5264 return o->state;
5267 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5268 struct bnx2x_func_sp_obj *o,
5269 enum bnx2x_func_cmd cmd)
5271 return bnx2x_state_wait(bp, cmd, &o->pending);
5275 * bnx2x_func_state_change_comp - complete the state machine transition
5277 * @bp: device handle
5278 * @o:
5279 * @cmd:
5281 * Called on state change transition. Completes the state
5282 * machine transition only - no HW interaction.
5284 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5285 struct bnx2x_func_sp_obj *o,
5286 enum bnx2x_func_cmd cmd)
5288 unsigned long cur_pending = o->pending;
5290 if (!test_and_clear_bit(cmd, &cur_pending)) {
5291 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5292 cmd, BP_FUNC(bp), o->state,
5293 cur_pending, o->next_state);
5294 return -EINVAL;
5297 DP(BNX2X_MSG_SP,
5298 "Completing command %d for func %d, setting state to %d\n",
5299 cmd, BP_FUNC(bp), o->next_state);
5301 o->state = o->next_state;
5302 o->next_state = BNX2X_F_STATE_MAX;
5304 /* It's important that o->state and o->next_state are
5305 * updated before o->pending.
5307 wmb();
5309 clear_bit(cmd, &o->pending);
5310 smp_mb__after_clear_bit();
5312 return 0;
5316 * bnx2x_func_comp_cmd - complete the state change command
5318 * @bp: device handle
5319 * @o:
5320 * @cmd:
5322 * Checks that the arrived completion is expected.
5324 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5325 struct bnx2x_func_sp_obj *o,
5326 enum bnx2x_func_cmd cmd)
5328 /* Complete the state machine part first, check if it's a
5329 * legal completion.
5331 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5332 return rc;
5336 * bnx2x_func_chk_transition - perform function state machine transition
5338 * @bp: device handle
5339 * @o:
5340 * @params:
5342 * It both checks if the requested command is legal in a current
5343 * state and, if it's legal, sets a `next_state' in the object
5344 * that will be used in the completion flow to set the `state'
5345 * of the object.
5347 * returns 0 if a requested command is a legal transition,
5348 * -EINVAL otherwise.
5350 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5351 struct bnx2x_func_sp_obj *o,
5352 struct bnx2x_func_state_params *params)
5354 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5355 enum bnx2x_func_cmd cmd = params->cmd;
5358 * Forget all pending for completion commands if a driver only state
5359 * transition has been requested.
5361 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5362 o->pending = 0;
5363 o->next_state = BNX2X_F_STATE_MAX;
5367 * Don't allow a next state transition if we are in the middle of
5368 * the previous one.
5370 if (o->pending)
5371 return -EBUSY;
5373 switch (state) {
5374 case BNX2X_F_STATE_RESET:
5375 if (cmd == BNX2X_F_CMD_HW_INIT)
5376 next_state = BNX2X_F_STATE_INITIALIZED;
5378 break;
5379 case BNX2X_F_STATE_INITIALIZED:
5380 if (cmd == BNX2X_F_CMD_START)
5381 next_state = BNX2X_F_STATE_STARTED;
5383 else if (cmd == BNX2X_F_CMD_HW_RESET)
5384 next_state = BNX2X_F_STATE_RESET;
5386 break;
5387 case BNX2X_F_STATE_STARTED:
5388 if (cmd == BNX2X_F_CMD_STOP)
5389 next_state = BNX2X_F_STATE_INITIALIZED;
5390 /* afex ramrods can be sent only in started mode, and only
5391 * if not pending for function_stop ramrod completion
5392 * for these events - next state remained STARTED.
5394 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5395 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5396 next_state = BNX2X_F_STATE_STARTED;
5398 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5399 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5400 next_state = BNX2X_F_STATE_STARTED;
5402 /* Switch_update ramrod can be sent in either started or
5403 * tx_stopped state, and it doesn't change the state.
5405 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5406 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5407 next_state = BNX2X_F_STATE_STARTED;
5409 else if (cmd == BNX2X_F_CMD_TX_STOP)
5410 next_state = BNX2X_F_STATE_TX_STOPPED;
5412 break;
5413 case BNX2X_F_STATE_TX_STOPPED:
5414 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5415 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5416 next_state = BNX2X_F_STATE_TX_STOPPED;
5418 else if (cmd == BNX2X_F_CMD_TX_START)
5419 next_state = BNX2X_F_STATE_STARTED;
5421 break;
5422 default:
5423 BNX2X_ERR("Unknown state: %d\n", state);
5426 /* Transition is assured */
5427 if (next_state != BNX2X_F_STATE_MAX) {
5428 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5429 state, cmd, next_state);
5430 o->next_state = next_state;
5431 return 0;
5434 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5435 state, cmd);
5437 return -EINVAL;
5441 * bnx2x_func_init_func - performs HW init at function stage
5443 * @bp: device handle
5444 * @drv:
5446 * Init HW when the current phase is
5447 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5448 * HW blocks.
5450 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5451 const struct bnx2x_func_sp_drv_ops *drv)
5453 return drv->init_hw_func(bp);
5457 * bnx2x_func_init_port - performs HW init at port stage
5459 * @bp: device handle
5460 * @drv:
5462 * Init HW when the current phase is
5463 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5464 * FUNCTION-only HW blocks.
5467 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5468 const struct bnx2x_func_sp_drv_ops *drv)
5470 int rc = drv->init_hw_port(bp);
5471 if (rc)
5472 return rc;
5474 return bnx2x_func_init_func(bp, drv);
5478 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5480 * @bp: device handle
5481 * @drv:
5483 * Init HW when the current phase is
5484 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5485 * PORT-only and FUNCTION-only HW blocks.
5487 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5488 const struct bnx2x_func_sp_drv_ops *drv)
5490 int rc = drv->init_hw_cmn_chip(bp);
5491 if (rc)
5492 return rc;
5494 return bnx2x_func_init_port(bp, drv);
5498 * bnx2x_func_init_cmn - performs HW init at common stage
5500 * @bp: device handle
5501 * @drv:
5503 * Init HW when the current phase is
5504 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5505 * PORT-only and FUNCTION-only HW blocks.
5507 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5508 const struct bnx2x_func_sp_drv_ops *drv)
5510 int rc = drv->init_hw_cmn(bp);
5511 if (rc)
5512 return rc;
5514 return bnx2x_func_init_port(bp, drv);
5517 static int bnx2x_func_hw_init(struct bnx2x *bp,
5518 struct bnx2x_func_state_params *params)
5520 u32 load_code = params->params.hw_init.load_phase;
5521 struct bnx2x_func_sp_obj *o = params->f_obj;
5522 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5523 int rc = 0;
5525 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5526 BP_ABS_FUNC(bp), load_code);
5528 /* Prepare buffers for unzipping the FW */
5529 rc = drv->gunzip_init(bp);
5530 if (rc)
5531 return rc;
5533 /* Prepare FW */
5534 rc = drv->init_fw(bp);
5535 if (rc) {
5536 BNX2X_ERR("Error loading firmware\n");
5537 goto init_err;
5540 /* Handle the beginning of COMMON_XXX pases separatelly... */
5541 switch (load_code) {
5542 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5543 rc = bnx2x_func_init_cmn_chip(bp, drv);
5544 if (rc)
5545 goto init_err;
5547 break;
5548 case FW_MSG_CODE_DRV_LOAD_COMMON:
5549 rc = bnx2x_func_init_cmn(bp, drv);
5550 if (rc)
5551 goto init_err;
5553 break;
5554 case FW_MSG_CODE_DRV_LOAD_PORT:
5555 rc = bnx2x_func_init_port(bp, drv);
5556 if (rc)
5557 goto init_err;
5559 break;
5560 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5561 rc = bnx2x_func_init_func(bp, drv);
5562 if (rc)
5563 goto init_err;
5565 break;
5566 default:
5567 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5568 rc = -EINVAL;
5571 init_err:
5572 drv->gunzip_end(bp);
5574 /* In case of success, complete the comand immediatelly: no ramrods
5575 * have been sent.
5577 if (!rc)
5578 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5580 return rc;
5584 * bnx2x_func_reset_func - reset HW at function stage
5586 * @bp: device handle
5587 * @drv:
5589 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5590 * FUNCTION-only HW blocks.
5592 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5593 const struct bnx2x_func_sp_drv_ops *drv)
5595 drv->reset_hw_func(bp);
5599 * bnx2x_func_reset_port - reser HW at port stage
5601 * @bp: device handle
5602 * @drv:
5604 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5605 * FUNCTION-only and PORT-only HW blocks.
5607 * !!!IMPORTANT!!!
5609 * It's important to call reset_port before reset_func() as the last thing
5610 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5611 * makes impossible any DMAE transactions.
5613 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5614 const struct bnx2x_func_sp_drv_ops *drv)
5616 drv->reset_hw_port(bp);
5617 bnx2x_func_reset_func(bp, drv);
5621 * bnx2x_func_reset_cmn - reser HW at common stage
5623 * @bp: device handle
5624 * @drv:
5626 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5627 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5628 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5630 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5631 const struct bnx2x_func_sp_drv_ops *drv)
5633 bnx2x_func_reset_port(bp, drv);
5634 drv->reset_hw_cmn(bp);
5638 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5639 struct bnx2x_func_state_params *params)
5641 u32 reset_phase = params->params.hw_reset.reset_phase;
5642 struct bnx2x_func_sp_obj *o = params->f_obj;
5643 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5645 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5646 reset_phase);
5648 switch (reset_phase) {
5649 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5650 bnx2x_func_reset_cmn(bp, drv);
5651 break;
5652 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5653 bnx2x_func_reset_port(bp, drv);
5654 break;
5655 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5656 bnx2x_func_reset_func(bp, drv);
5657 break;
5658 default:
5659 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5660 reset_phase);
5661 break;
5664 /* Complete the comand immediatelly: no ramrods have been sent. */
5665 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5667 return 0;
5670 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5671 struct bnx2x_func_state_params *params)
5673 struct bnx2x_func_sp_obj *o = params->f_obj;
5674 struct function_start_data *rdata =
5675 (struct function_start_data *)o->rdata;
5676 dma_addr_t data_mapping = o->rdata_mapping;
5677 struct bnx2x_func_start_params *start_params = &params->params.start;
5679 memset(rdata, 0, sizeof(*rdata));
5681 /* Fill the ramrod data with provided parameters */
5682 rdata->function_mode = (u8)start_params->mf_mode;
5683 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5684 rdata->path_id = BP_PATH(bp);
5685 rdata->network_cos_mode = start_params->network_cos_mode;
5688 * No need for an explicit memory barrier here as long we would
5689 * need to ensure the ordering of writing to the SPQ element
5690 * and updating of the SPQ producer which involves a memory
5691 * read and we will have to put a full memory barrier there
5692 * (inside bnx2x_sp_post()).
5695 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5696 U64_HI(data_mapping),
5697 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5700 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5701 struct bnx2x_func_state_params *params)
5703 struct bnx2x_func_sp_obj *o = params->f_obj;
5704 struct function_update_data *rdata =
5705 (struct function_update_data *)o->rdata;
5706 dma_addr_t data_mapping = o->rdata_mapping;
5707 struct bnx2x_func_switch_update_params *switch_update_params =
5708 &params->params.switch_update;
5710 memset(rdata, 0, sizeof(*rdata));
5712 /* Fill the ramrod data with provided parameters */
5713 rdata->tx_switch_suspend_change_flg = 1;
5714 rdata->tx_switch_suspend = switch_update_params->suspend;
5715 rdata->echo = SWITCH_UPDATE;
5717 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5718 U64_HI(data_mapping),
5719 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5722 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5723 struct bnx2x_func_state_params *params)
5725 struct bnx2x_func_sp_obj *o = params->f_obj;
5726 struct function_update_data *rdata =
5727 (struct function_update_data *)o->afex_rdata;
5728 dma_addr_t data_mapping = o->afex_rdata_mapping;
5729 struct bnx2x_func_afex_update_params *afex_update_params =
5730 &params->params.afex_update;
5732 memset(rdata, 0, sizeof(*rdata));
5734 /* Fill the ramrod data with provided parameters */
5735 rdata->vif_id_change_flg = 1;
5736 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5737 rdata->afex_default_vlan_change_flg = 1;
5738 rdata->afex_default_vlan =
5739 cpu_to_le16(afex_update_params->afex_default_vlan);
5740 rdata->allowed_priorities_change_flg = 1;
5741 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5742 rdata->echo = AFEX_UPDATE;
5744 /* No need for an explicit memory barrier here as long we would
5745 * need to ensure the ordering of writing to the SPQ element
5746 * and updating of the SPQ producer which involves a memory
5747 * read and we will have to put a full memory barrier there
5748 * (inside bnx2x_sp_post()).
5750 DP(BNX2X_MSG_SP,
5751 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5752 rdata->vif_id,
5753 rdata->afex_default_vlan, rdata->allowed_priorities);
5755 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5756 U64_HI(data_mapping),
5757 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5760 static
5761 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5762 struct bnx2x_func_state_params *params)
5764 struct bnx2x_func_sp_obj *o = params->f_obj;
5765 struct afex_vif_list_ramrod_data *rdata =
5766 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5767 struct bnx2x_func_afex_viflists_params *afex_vif_params =
5768 &params->params.afex_viflists;
5769 u64 *p_rdata = (u64 *)rdata;
5771 memset(rdata, 0, sizeof(*rdata));
5773 /* Fill the ramrod data with provided parameters */
5774 rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5775 rdata->func_bit_map = afex_vif_params->func_bit_map;
5776 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5777 rdata->func_to_clear = afex_vif_params->func_to_clear;
5779 /* send in echo type of sub command */
5780 rdata->echo = afex_vif_params->afex_vif_list_command;
5782 /* No need for an explicit memory barrier here as long we would
5783 * need to ensure the ordering of writing to the SPQ element
5784 * and updating of the SPQ producer which involves a memory
5785 * read and we will have to put a full memory barrier there
5786 * (inside bnx2x_sp_post()).
5789 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5790 rdata->afex_vif_list_command, rdata->vif_list_index,
5791 rdata->func_bit_map, rdata->func_to_clear);
5793 /* this ramrod sends data directly and not through DMA mapping */
5794 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5795 U64_HI(*p_rdata), U64_LO(*p_rdata),
5796 NONE_CONNECTION_TYPE);
5799 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5800 struct bnx2x_func_state_params *params)
5802 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5803 NONE_CONNECTION_TYPE);
5806 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5807 struct bnx2x_func_state_params *params)
5809 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5810 NONE_CONNECTION_TYPE);
5812 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5813 struct bnx2x_func_state_params *params)
5815 struct bnx2x_func_sp_obj *o = params->f_obj;
5816 struct flow_control_configuration *rdata =
5817 (struct flow_control_configuration *)o->rdata;
5818 dma_addr_t data_mapping = o->rdata_mapping;
5819 struct bnx2x_func_tx_start_params *tx_start_params =
5820 &params->params.tx_start;
5821 int i;
5823 memset(rdata, 0, sizeof(*rdata));
5825 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5826 rdata->dcb_version = tx_start_params->dcb_version;
5827 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5829 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5830 rdata->traffic_type_to_priority_cos[i] =
5831 tx_start_params->traffic_type_to_priority_cos[i];
5833 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5834 U64_HI(data_mapping),
5835 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5838 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5839 struct bnx2x_func_state_params *params)
5841 switch (params->cmd) {
5842 case BNX2X_F_CMD_HW_INIT:
5843 return bnx2x_func_hw_init(bp, params);
5844 case BNX2X_F_CMD_START:
5845 return bnx2x_func_send_start(bp, params);
5846 case BNX2X_F_CMD_STOP:
5847 return bnx2x_func_send_stop(bp, params);
5848 case BNX2X_F_CMD_HW_RESET:
5849 return bnx2x_func_hw_reset(bp, params);
5850 case BNX2X_F_CMD_AFEX_UPDATE:
5851 return bnx2x_func_send_afex_update(bp, params);
5852 case BNX2X_F_CMD_AFEX_VIFLISTS:
5853 return bnx2x_func_send_afex_viflists(bp, params);
5854 case BNX2X_F_CMD_TX_STOP:
5855 return bnx2x_func_send_tx_stop(bp, params);
5856 case BNX2X_F_CMD_TX_START:
5857 return bnx2x_func_send_tx_start(bp, params);
5858 case BNX2X_F_CMD_SWITCH_UPDATE:
5859 return bnx2x_func_send_switch_update(bp, params);
5860 default:
5861 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5862 return -EINVAL;
5866 void bnx2x_init_func_obj(struct bnx2x *bp,
5867 struct bnx2x_func_sp_obj *obj,
5868 void *rdata, dma_addr_t rdata_mapping,
5869 void *afex_rdata, dma_addr_t afex_rdata_mapping,
5870 struct bnx2x_func_sp_drv_ops *drv_iface)
5872 memset(obj, 0, sizeof(*obj));
5874 mutex_init(&obj->one_pending_mutex);
5876 obj->rdata = rdata;
5877 obj->rdata_mapping = rdata_mapping;
5878 obj->afex_rdata = afex_rdata;
5879 obj->afex_rdata_mapping = afex_rdata_mapping;
5880 obj->send_cmd = bnx2x_func_send_cmd;
5881 obj->check_transition = bnx2x_func_chk_transition;
5882 obj->complete_cmd = bnx2x_func_comp_cmd;
5883 obj->wait_comp = bnx2x_func_wait_comp;
5885 obj->drv = drv_iface;
5889 * bnx2x_func_state_change - perform Function state change transition
5891 * @bp: device handle
5892 * @params: parameters to perform the transaction
5894 * returns 0 in case of successfully completed transition,
5895 * negative error code in case of failure, positive
5896 * (EBUSY) value if there is a completion to that is
5897 * still pending (possible only if RAMROD_COMP_WAIT is
5898 * not set in params->ramrod_flags for asynchronous
5899 * commands).
5901 int bnx2x_func_state_change(struct bnx2x *bp,
5902 struct bnx2x_func_state_params *params)
5904 struct bnx2x_func_sp_obj *o = params->f_obj;
5905 int rc, cnt = 300;
5906 enum bnx2x_func_cmd cmd = params->cmd;
5907 unsigned long *pending = &o->pending;
5909 mutex_lock(&o->one_pending_mutex);
5911 /* Check that the requested transition is legal */
5912 rc = o->check_transition(bp, o, params);
5913 if ((rc == -EBUSY) &&
5914 (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
5915 while ((rc == -EBUSY) && (--cnt > 0)) {
5916 mutex_unlock(&o->one_pending_mutex);
5917 msleep(10);
5918 mutex_lock(&o->one_pending_mutex);
5919 rc = o->check_transition(bp, o, params);
5921 if (rc == -EBUSY) {
5922 mutex_unlock(&o->one_pending_mutex);
5923 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5924 return rc;
5926 } else if (rc) {
5927 mutex_unlock(&o->one_pending_mutex);
5928 return rc;
5931 /* Set "pending" bit */
5932 set_bit(cmd, pending);
5934 /* Don't send a command if only driver cleanup was requested */
5935 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5936 bnx2x_func_state_change_comp(bp, o, cmd);
5937 mutex_unlock(&o->one_pending_mutex);
5938 } else {
5939 /* Send a ramrod */
5940 rc = o->send_cmd(bp, params);
5942 mutex_unlock(&o->one_pending_mutex);
5944 if (rc) {
5945 o->next_state = BNX2X_F_STATE_MAX;
5946 clear_bit(cmd, pending);
5947 smp_mb__after_clear_bit();
5948 return rc;
5951 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5952 rc = o->wait_comp(bp, o, cmd);
5953 if (rc)
5954 return rc;
5956 return 0;
5960 return !!test_bit(cmd, pending);