1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
35 #include <net/route.h>
37 #include <net/ip6_route.h>
38 #include <net/ip6_checksum.h>
39 #include <scsi/iscsi_if.h>
43 #include "bnx2x_reg.h"
44 #include "bnx2x_fw_defs.h"
45 #include "bnx2x_hsi.h"
46 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
47 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
49 #include "cnic_defs.h"
51 #define DRV_MODULE_NAME "cnic"
53 static char version
[] __devinitdata
=
54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME
" v" CNIC_MODULE_VERSION
" (" CNIC_MODULE_RELDATE
")\n";
56 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
57 "Chen (zongxi@broadcom.com");
58 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
59 MODULE_LICENSE("GPL");
60 MODULE_VERSION(CNIC_MODULE_VERSION
);
62 static LIST_HEAD(cnic_dev_list
);
63 static DEFINE_RWLOCK(cnic_dev_lock
);
64 static DEFINE_MUTEX(cnic_lock
);
66 static struct cnic_ulp_ops
*cnic_ulp_tbl
[MAX_CNIC_ULP_TYPE
];
68 static int cnic_service_bnx2(void *, void *);
69 static int cnic_service_bnx2x(void *, void *);
70 static int cnic_ctl(void *, struct cnic_ctl_info
*);
72 static struct cnic_ops cnic_bnx2_ops
= {
73 .cnic_owner
= THIS_MODULE
,
74 .cnic_handler
= cnic_service_bnx2
,
78 static struct cnic_ops cnic_bnx2x_ops
= {
79 .cnic_owner
= THIS_MODULE
,
80 .cnic_handler
= cnic_service_bnx2x
,
84 static void cnic_shutdown_rings(struct cnic_dev
*);
85 static void cnic_init_rings(struct cnic_dev
*);
86 static int cnic_cm_set_pg(struct cnic_sock
*);
88 static int cnic_uio_open(struct uio_info
*uinfo
, struct inode
*inode
)
90 struct cnic_dev
*dev
= uinfo
->priv
;
91 struct cnic_local
*cp
= dev
->cnic_priv
;
93 if (!capable(CAP_NET_ADMIN
))
96 if (cp
->uio_dev
!= -1)
100 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
105 cp
->uio_dev
= iminor(inode
);
107 cnic_init_rings(dev
);
113 static int cnic_uio_close(struct uio_info
*uinfo
, struct inode
*inode
)
115 struct cnic_dev
*dev
= uinfo
->priv
;
116 struct cnic_local
*cp
= dev
->cnic_priv
;
118 cnic_shutdown_rings(dev
);
124 static inline void cnic_hold(struct cnic_dev
*dev
)
126 atomic_inc(&dev
->ref_count
);
129 static inline void cnic_put(struct cnic_dev
*dev
)
131 atomic_dec(&dev
->ref_count
);
134 static inline void csk_hold(struct cnic_sock
*csk
)
136 atomic_inc(&csk
->ref_count
);
139 static inline void csk_put(struct cnic_sock
*csk
)
141 atomic_dec(&csk
->ref_count
);
144 static struct cnic_dev
*cnic_from_netdev(struct net_device
*netdev
)
146 struct cnic_dev
*cdev
;
148 read_lock(&cnic_dev_lock
);
149 list_for_each_entry(cdev
, &cnic_dev_list
, list
) {
150 if (netdev
== cdev
->netdev
) {
152 read_unlock(&cnic_dev_lock
);
156 read_unlock(&cnic_dev_lock
);
160 static inline void ulp_get(struct cnic_ulp_ops
*ulp_ops
)
162 atomic_inc(&ulp_ops
->ref_count
);
165 static inline void ulp_put(struct cnic_ulp_ops
*ulp_ops
)
167 atomic_dec(&ulp_ops
->ref_count
);
170 static void cnic_ctx_wr(struct cnic_dev
*dev
, u32 cid_addr
, u32 off
, u32 val
)
172 struct cnic_local
*cp
= dev
->cnic_priv
;
173 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
174 struct drv_ctl_info info
;
175 struct drv_ctl_io
*io
= &info
.data
.io
;
177 info
.cmd
= DRV_CTL_CTX_WR_CMD
;
178 io
->cid_addr
= cid_addr
;
181 ethdev
->drv_ctl(dev
->netdev
, &info
);
184 static void cnic_ctx_tbl_wr(struct cnic_dev
*dev
, u32 off
, dma_addr_t addr
)
186 struct cnic_local
*cp
= dev
->cnic_priv
;
187 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
188 struct drv_ctl_info info
;
189 struct drv_ctl_io
*io
= &info
.data
.io
;
191 info
.cmd
= DRV_CTL_CTXTBL_WR_CMD
;
194 ethdev
->drv_ctl(dev
->netdev
, &info
);
197 static void cnic_ring_ctl(struct cnic_dev
*dev
, u32 cid
, u32 cl_id
, int start
)
199 struct cnic_local
*cp
= dev
->cnic_priv
;
200 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
201 struct drv_ctl_info info
;
202 struct drv_ctl_l2_ring
*ring
= &info
.data
.ring
;
205 info
.cmd
= DRV_CTL_START_L2_CMD
;
207 info
.cmd
= DRV_CTL_STOP_L2_CMD
;
210 ring
->client_id
= cl_id
;
211 ethdev
->drv_ctl(dev
->netdev
, &info
);
214 static void cnic_reg_wr_ind(struct cnic_dev
*dev
, u32 off
, u32 val
)
216 struct cnic_local
*cp
= dev
->cnic_priv
;
217 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
218 struct drv_ctl_info info
;
219 struct drv_ctl_io
*io
= &info
.data
.io
;
221 info
.cmd
= DRV_CTL_IO_WR_CMD
;
224 ethdev
->drv_ctl(dev
->netdev
, &info
);
227 static u32
cnic_reg_rd_ind(struct cnic_dev
*dev
, u32 off
)
229 struct cnic_local
*cp
= dev
->cnic_priv
;
230 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
231 struct drv_ctl_info info
;
232 struct drv_ctl_io
*io
= &info
.data
.io
;
234 info
.cmd
= DRV_CTL_IO_RD_CMD
;
236 ethdev
->drv_ctl(dev
->netdev
, &info
);
240 static int cnic_in_use(struct cnic_sock
*csk
)
242 return test_bit(SK_F_INUSE
, &csk
->flags
);
245 static void cnic_kwq_completion(struct cnic_dev
*dev
, u32 count
)
247 struct cnic_local
*cp
= dev
->cnic_priv
;
248 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
249 struct drv_ctl_info info
;
251 info
.cmd
= DRV_CTL_COMPLETION_CMD
;
252 info
.data
.comp
.comp_count
= count
;
253 ethdev
->drv_ctl(dev
->netdev
, &info
);
256 static int cnic_get_l5_cid(struct cnic_local
*cp
, u32 cid
, u32
*l5_cid
)
260 for (i
= 0; i
< MAX_ISCSI_TBL_SZ
; i
++) {
261 if (cp
->ctx_tbl
[i
].cid
== cid
) {
269 static int cnic_send_nlmsg(struct cnic_local
*cp
, u32 type
,
270 struct cnic_sock
*csk
)
272 struct iscsi_path path_req
;
275 u32 msg_type
= ISCSI_KEVENT_IF_DOWN
;
276 struct cnic_ulp_ops
*ulp_ops
;
278 if (cp
->uio_dev
== -1)
282 len
= sizeof(path_req
);
283 buf
= (char *) &path_req
;
284 memset(&path_req
, 0, len
);
286 msg_type
= ISCSI_KEVENT_PATH_REQ
;
287 path_req
.handle
= (u64
) csk
->l5_cid
;
288 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
289 memcpy(&path_req
.dst
.v6_addr
, &csk
->dst_ip
[0],
290 sizeof(struct in6_addr
));
291 path_req
.ip_addr_len
= 16;
293 memcpy(&path_req
.dst
.v4_addr
, &csk
->dst_ip
[0],
294 sizeof(struct in_addr
));
295 path_req
.ip_addr_len
= 4;
297 path_req
.vlan_id
= csk
->vlan_id
;
298 path_req
.pmtu
= csk
->mtu
;
302 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[CNIC_ULP_ISCSI
]);
304 ulp_ops
->iscsi_nl_send_msg(cp
->dev
, msg_type
, buf
, len
);
309 static int cnic_iscsi_nl_msg_recv(struct cnic_dev
*dev
, u32 msg_type
,
315 case ISCSI_UEVENT_PATH_UPDATE
: {
316 struct cnic_local
*cp
;
318 struct cnic_sock
*csk
;
319 struct iscsi_path
*path_resp
;
321 if (len
< sizeof(*path_resp
))
324 path_resp
= (struct iscsi_path
*) buf
;
326 l5_cid
= (u32
) path_resp
->handle
;
327 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
331 if (!rcu_dereference(cp
->ulp_ops
[CNIC_ULP_L4
])) {
336 csk
= &cp
->csk_tbl
[l5_cid
];
338 if (cnic_in_use(csk
)) {
339 memcpy(csk
->ha
, path_resp
->mac_addr
, 6);
340 if (test_bit(SK_F_IPV6
, &csk
->flags
))
341 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v6_addr
,
342 sizeof(struct in6_addr
));
344 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v4_addr
,
345 sizeof(struct in_addr
));
346 if (is_valid_ether_addr(csk
->ha
))
358 static int cnic_offld_prep(struct cnic_sock
*csk
)
360 if (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
363 if (!test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
364 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
371 static int cnic_close_prep(struct cnic_sock
*csk
)
373 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
374 smp_mb__after_clear_bit();
376 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
377 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
385 static int cnic_abort_prep(struct cnic_sock
*csk
)
387 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
388 smp_mb__after_clear_bit();
390 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
393 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
394 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
401 static void cnic_uio_stop(void)
403 struct cnic_dev
*dev
;
405 read_lock(&cnic_dev_lock
);
406 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
407 struct cnic_local
*cp
= dev
->cnic_priv
;
410 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
412 read_unlock(&cnic_dev_lock
);
415 int cnic_register_driver(int ulp_type
, struct cnic_ulp_ops
*ulp_ops
)
417 struct cnic_dev
*dev
;
419 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
420 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
423 mutex_lock(&cnic_lock
);
424 if (cnic_ulp_tbl
[ulp_type
]) {
425 pr_err("%s: Type %d has already been registered\n",
427 mutex_unlock(&cnic_lock
);
431 read_lock(&cnic_dev_lock
);
432 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
433 struct cnic_local
*cp
= dev
->cnic_priv
;
435 clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]);
437 read_unlock(&cnic_dev_lock
);
439 atomic_set(&ulp_ops
->ref_count
, 0);
440 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], ulp_ops
);
441 mutex_unlock(&cnic_lock
);
443 /* Prevent race conditions with netdev_event */
445 read_lock(&cnic_dev_lock
);
446 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
447 struct cnic_local
*cp
= dev
->cnic_priv
;
449 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]))
450 ulp_ops
->cnic_init(dev
);
452 read_unlock(&cnic_dev_lock
);
458 int cnic_unregister_driver(int ulp_type
)
460 struct cnic_dev
*dev
;
461 struct cnic_ulp_ops
*ulp_ops
;
464 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
465 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
468 mutex_lock(&cnic_lock
);
469 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
471 pr_err("%s: Type %d has not been registered\n",
475 read_lock(&cnic_dev_lock
);
476 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
477 struct cnic_local
*cp
= dev
->cnic_priv
;
479 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
480 pr_err("%s: Type %d still has devices registered\n",
482 read_unlock(&cnic_dev_lock
);
486 read_unlock(&cnic_dev_lock
);
488 if (ulp_type
== CNIC_ULP_ISCSI
)
491 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], NULL
);
493 mutex_unlock(&cnic_lock
);
495 while ((atomic_read(&ulp_ops
->ref_count
) != 0) && (i
< 20)) {
500 if (atomic_read(&ulp_ops
->ref_count
) != 0)
501 netdev_warn(dev
->netdev
, "Failed waiting for ref count to go to zero\n");
505 mutex_unlock(&cnic_lock
);
509 static int cnic_start_hw(struct cnic_dev
*);
510 static void cnic_stop_hw(struct cnic_dev
*);
512 static int cnic_register_device(struct cnic_dev
*dev
, int ulp_type
,
515 struct cnic_local
*cp
= dev
->cnic_priv
;
516 struct cnic_ulp_ops
*ulp_ops
;
518 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
519 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
522 mutex_lock(&cnic_lock
);
523 if (cnic_ulp_tbl
[ulp_type
] == NULL
) {
524 pr_err("%s: Driver with type %d has not been registered\n",
526 mutex_unlock(&cnic_lock
);
529 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
530 pr_err("%s: Type %d has already been registered to this device\n",
532 mutex_unlock(&cnic_lock
);
536 clear_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]);
537 cp
->ulp_handle
[ulp_type
] = ulp_ctx
;
538 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
539 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], ulp_ops
);
542 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
543 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]))
544 ulp_ops
->cnic_start(cp
->ulp_handle
[ulp_type
]);
546 mutex_unlock(&cnic_lock
);
551 EXPORT_SYMBOL(cnic_register_driver
);
553 static int cnic_unregister_device(struct cnic_dev
*dev
, int ulp_type
)
555 struct cnic_local
*cp
= dev
->cnic_priv
;
558 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
559 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
562 mutex_lock(&cnic_lock
);
563 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
564 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], NULL
);
567 pr_err("%s: device not registered to this ulp type %d\n",
569 mutex_unlock(&cnic_lock
);
572 mutex_unlock(&cnic_lock
);
576 while (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]) &&
581 if (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]))
582 netdev_warn(dev
->netdev
, "Failed waiting for ULP up call to complete\n");
586 EXPORT_SYMBOL(cnic_unregister_driver
);
588 static int cnic_init_id_tbl(struct cnic_id_tbl
*id_tbl
, u32 size
, u32 start_id
)
590 id_tbl
->start
= start_id
;
593 spin_lock_init(&id_tbl
->lock
);
594 id_tbl
->table
= kzalloc(DIV_ROUND_UP(size
, 32) * 4, GFP_KERNEL
);
601 static void cnic_free_id_tbl(struct cnic_id_tbl
*id_tbl
)
603 kfree(id_tbl
->table
);
604 id_tbl
->table
= NULL
;
607 static int cnic_alloc_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
612 if (id
>= id_tbl
->max
)
615 spin_lock(&id_tbl
->lock
);
616 if (!test_bit(id
, id_tbl
->table
)) {
617 set_bit(id
, id_tbl
->table
);
620 spin_unlock(&id_tbl
->lock
);
624 /* Returns -1 if not successful */
625 static u32
cnic_alloc_new_id(struct cnic_id_tbl
*id_tbl
)
629 spin_lock(&id_tbl
->lock
);
630 id
= find_next_zero_bit(id_tbl
->table
, id_tbl
->max
, id_tbl
->next
);
631 if (id
>= id_tbl
->max
) {
633 if (id_tbl
->next
!= 0) {
634 id
= find_first_zero_bit(id_tbl
->table
, id_tbl
->next
);
635 if (id
>= id_tbl
->next
)
640 if (id
< id_tbl
->max
) {
641 set_bit(id
, id_tbl
->table
);
642 id_tbl
->next
= (id
+ 1) & (id_tbl
->max
- 1);
646 spin_unlock(&id_tbl
->lock
);
651 static void cnic_free_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
657 if (id
>= id_tbl
->max
)
660 clear_bit(id
, id_tbl
->table
);
663 static void cnic_free_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
670 for (i
= 0; i
< dma
->num_pages
; i
++) {
671 if (dma
->pg_arr
[i
]) {
672 dma_free_coherent(&dev
->pcidev
->dev
, BCM_PAGE_SIZE
,
673 dma
->pg_arr
[i
], dma
->pg_map_arr
[i
]);
674 dma
->pg_arr
[i
] = NULL
;
678 dma_free_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
679 dma
->pgtbl
, dma
->pgtbl_map
);
687 static void cnic_setup_page_tbl(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
690 u32
*page_table
= dma
->pgtbl
;
692 for (i
= 0; i
< dma
->num_pages
; i
++) {
693 /* Each entry needs to be in big endian format. */
694 *page_table
= (u32
) ((u64
) dma
->pg_map_arr
[i
] >> 32);
696 *page_table
= (u32
) dma
->pg_map_arr
[i
];
701 static void cnic_setup_page_tbl_le(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
704 u32
*page_table
= dma
->pgtbl
;
706 for (i
= 0; i
< dma
->num_pages
; i
++) {
707 /* Each entry needs to be in little endian format. */
708 *page_table
= dma
->pg_map_arr
[i
] & 0xffffffff;
710 *page_table
= (u32
) ((u64
) dma
->pg_map_arr
[i
] >> 32);
715 static int cnic_alloc_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
,
716 int pages
, int use_pg_tbl
)
719 struct cnic_local
*cp
= dev
->cnic_priv
;
721 size
= pages
* (sizeof(void *) + sizeof(dma_addr_t
));
722 dma
->pg_arr
= kzalloc(size
, GFP_ATOMIC
);
723 if (dma
->pg_arr
== NULL
)
726 dma
->pg_map_arr
= (dma_addr_t
*) (dma
->pg_arr
+ pages
);
727 dma
->num_pages
= pages
;
729 for (i
= 0; i
< pages
; i
++) {
730 dma
->pg_arr
[i
] = dma_alloc_coherent(&dev
->pcidev
->dev
,
734 if (dma
->pg_arr
[i
] == NULL
)
740 dma
->pgtbl_size
= ((pages
* 8) + BCM_PAGE_SIZE
- 1) &
741 ~(BCM_PAGE_SIZE
- 1);
742 dma
->pgtbl
= dma_alloc_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
743 &dma
->pgtbl_map
, GFP_ATOMIC
);
744 if (dma
->pgtbl
== NULL
)
747 cp
->setup_pgtbl(dev
, dma
);
752 cnic_free_dma(dev
, dma
);
756 static void cnic_free_context(struct cnic_dev
*dev
)
758 struct cnic_local
*cp
= dev
->cnic_priv
;
761 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
762 if (cp
->ctx_arr
[i
].ctx
) {
763 dma_free_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
765 cp
->ctx_arr
[i
].mapping
);
766 cp
->ctx_arr
[i
].ctx
= NULL
;
771 static void cnic_free_resc(struct cnic_dev
*dev
)
773 struct cnic_local
*cp
= dev
->cnic_priv
;
776 if (cp
->cnic_uinfo
) {
777 while (cp
->uio_dev
!= -1 && i
< 15) {
781 uio_unregister_device(cp
->cnic_uinfo
);
782 kfree(cp
->cnic_uinfo
);
783 cp
->cnic_uinfo
= NULL
;
787 dma_free_coherent(&dev
->pcidev
->dev
, cp
->l2_buf_size
,
788 cp
->l2_buf
, cp
->l2_buf_map
);
793 dma_free_coherent(&dev
->pcidev
->dev
, cp
->l2_ring_size
,
794 cp
->l2_ring
, cp
->l2_ring_map
);
798 cnic_free_context(dev
);
803 cnic_free_dma(dev
, &cp
->gbl_buf_info
);
804 cnic_free_dma(dev
, &cp
->conn_buf_info
);
805 cnic_free_dma(dev
, &cp
->kwq_info
);
806 cnic_free_dma(dev
, &cp
->kwq_16_data_info
);
807 cnic_free_dma(dev
, &cp
->kcq_info
);
808 kfree(cp
->iscsi_tbl
);
809 cp
->iscsi_tbl
= NULL
;
813 cnic_free_id_tbl(&cp
->cid_tbl
);
816 static int cnic_alloc_context(struct cnic_dev
*dev
)
818 struct cnic_local
*cp
= dev
->cnic_priv
;
820 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
823 cp
->ctx_blk_size
= BCM_PAGE_SIZE
;
824 cp
->cids_per_blk
= BCM_PAGE_SIZE
/ 128;
825 arr_size
= BNX2_MAX_CID
/ cp
->cids_per_blk
*
826 sizeof(struct cnic_ctx
);
827 cp
->ctx_arr
= kzalloc(arr_size
, GFP_KERNEL
);
828 if (cp
->ctx_arr
== NULL
)
832 for (i
= 0; i
< 2; i
++) {
833 u32 j
, reg
, off
, lo
, hi
;
836 off
= BNX2_PG_CTX_MAP
;
838 off
= BNX2_ISCSI_CTX_MAP
;
840 reg
= cnic_reg_rd_ind(dev
, off
);
843 for (j
= lo
; j
< hi
; j
+= cp
->cids_per_blk
, k
++)
844 cp
->ctx_arr
[k
].cid
= j
;
848 if (cp
->ctx_blks
>= (BNX2_MAX_CID
/ cp
->cids_per_blk
)) {
853 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
855 dma_alloc_coherent(&dev
->pcidev
->dev
,
857 &cp
->ctx_arr
[i
].mapping
,
859 if (cp
->ctx_arr
[i
].ctx
== NULL
)
866 static int cnic_alloc_l2_rings(struct cnic_dev
*dev
, int pages
)
868 struct cnic_local
*cp
= dev
->cnic_priv
;
870 cp
->l2_ring_size
= pages
* BCM_PAGE_SIZE
;
871 cp
->l2_ring
= dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->l2_ring_size
,
873 GFP_KERNEL
| __GFP_COMP
);
877 cp
->l2_buf_size
= (cp
->l2_rx_ring_size
+ 1) * cp
->l2_single_buf_size
;
878 cp
->l2_buf_size
= PAGE_ALIGN(cp
->l2_buf_size
);
879 cp
->l2_buf
= dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->l2_buf_size
,
881 GFP_KERNEL
| __GFP_COMP
);
888 static int cnic_alloc_uio(struct cnic_dev
*dev
) {
889 struct cnic_local
*cp
= dev
->cnic_priv
;
890 struct uio_info
*uinfo
;
893 uinfo
= kzalloc(sizeof(*uinfo
), GFP_ATOMIC
);
897 uinfo
->mem
[0].addr
= dev
->netdev
->base_addr
;
898 uinfo
->mem
[0].internal_addr
= dev
->regview
;
899 uinfo
->mem
[0].size
= dev
->netdev
->mem_end
- dev
->netdev
->mem_start
;
900 uinfo
->mem
[0].memtype
= UIO_MEM_PHYS
;
902 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
903 uinfo
->mem
[1].addr
= (unsigned long) cp
->status_blk
.gen
&
905 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
906 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
* 9;
908 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
;
910 uinfo
->name
= "bnx2_cnic";
911 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
912 uinfo
->mem
[1].addr
= (unsigned long) cp
->bnx2x_def_status_blk
&
914 uinfo
->mem
[1].size
= sizeof(struct host_def_status_block
);
916 uinfo
->name
= "bnx2x_cnic";
919 uinfo
->mem
[1].memtype
= UIO_MEM_LOGICAL
;
921 uinfo
->mem
[2].addr
= (unsigned long) cp
->l2_ring
;
922 uinfo
->mem
[2].size
= cp
->l2_ring_size
;
923 uinfo
->mem
[2].memtype
= UIO_MEM_LOGICAL
;
925 uinfo
->mem
[3].addr
= (unsigned long) cp
->l2_buf
;
926 uinfo
->mem
[3].size
= cp
->l2_buf_size
;
927 uinfo
->mem
[3].memtype
= UIO_MEM_LOGICAL
;
929 uinfo
->version
= CNIC_MODULE_VERSION
;
930 uinfo
->irq
= UIO_IRQ_CUSTOM
;
932 uinfo
->open
= cnic_uio_open
;
933 uinfo
->release
= cnic_uio_close
;
937 ret
= uio_register_device(&dev
->pcidev
->dev
, uinfo
);
943 cp
->cnic_uinfo
= uinfo
;
947 static int cnic_alloc_bnx2_resc(struct cnic_dev
*dev
)
949 struct cnic_local
*cp
= dev
->cnic_priv
;
952 ret
= cnic_alloc_dma(dev
, &cp
->kwq_info
, KWQ_PAGE_CNT
, 1);
955 cp
->kwq
= (struct kwqe
**) cp
->kwq_info
.pg_arr
;
957 ret
= cnic_alloc_dma(dev
, &cp
->kcq_info
, KCQ_PAGE_CNT
, 1);
960 cp
->kcq
= (struct kcqe
**) cp
->kcq_info
.pg_arr
;
962 ret
= cnic_alloc_context(dev
);
966 ret
= cnic_alloc_l2_rings(dev
, 2);
970 ret
= cnic_alloc_uio(dev
);
981 static int cnic_alloc_bnx2x_context(struct cnic_dev
*dev
)
983 struct cnic_local
*cp
= dev
->cnic_priv
;
984 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
985 int ctx_blk_size
= cp
->ethdev
->ctx_blk_size
;
986 int total_mem
, blks
, i
, cid_space
;
988 if (BNX2X_ISCSI_START_CID
< ethdev
->starting_cid
)
991 cid_space
= MAX_ISCSI_TBL_SZ
+
992 (BNX2X_ISCSI_START_CID
- ethdev
->starting_cid
);
994 total_mem
= BNX2X_CONTEXT_MEM_SIZE
* cid_space
;
995 blks
= total_mem
/ ctx_blk_size
;
996 if (total_mem
% ctx_blk_size
)
999 if (blks
> cp
->ethdev
->ctx_tbl_len
)
1002 cp
->ctx_arr
= kzalloc(blks
* sizeof(struct cnic_ctx
), GFP_KERNEL
);
1003 if (cp
->ctx_arr
== NULL
)
1006 cp
->ctx_blks
= blks
;
1007 cp
->ctx_blk_size
= ctx_blk_size
;
1008 if (BNX2X_CHIP_IS_E1H(cp
->chip_id
))
1011 cp
->ctx_align
= ctx_blk_size
;
1013 cp
->cids_per_blk
= ctx_blk_size
/ BNX2X_CONTEXT_MEM_SIZE
;
1015 for (i
= 0; i
< blks
; i
++) {
1016 cp
->ctx_arr
[i
].ctx
=
1017 dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
1018 &cp
->ctx_arr
[i
].mapping
,
1020 if (cp
->ctx_arr
[i
].ctx
== NULL
)
1023 if (cp
->ctx_align
&& cp
->ctx_blk_size
== ctx_blk_size
) {
1024 if (cp
->ctx_arr
[i
].mapping
& (cp
->ctx_align
- 1)) {
1025 cnic_free_context(dev
);
1026 cp
->ctx_blk_size
+= cp
->ctx_align
;
1035 static int cnic_alloc_bnx2x_resc(struct cnic_dev
*dev
)
1037 struct cnic_local
*cp
= dev
->cnic_priv
;
1038 int i
, j
, n
, ret
, pages
;
1039 struct cnic_dma
*kwq_16_dma
= &cp
->kwq_16_data_info
;
1041 cp
->iscsi_tbl
= kzalloc(sizeof(struct cnic_iscsi
) * MAX_ISCSI_TBL_SZ
,
1046 cp
->ctx_tbl
= kzalloc(sizeof(struct cnic_context
) *
1047 MAX_CNIC_L5_CONTEXT
, GFP_KERNEL
);
1051 for (i
= 0; i
< MAX_ISCSI_TBL_SZ
; i
++) {
1052 cp
->ctx_tbl
[i
].proto
.iscsi
= &cp
->iscsi_tbl
[i
];
1053 cp
->ctx_tbl
[i
].ulp_proto_id
= CNIC_ULP_ISCSI
;
1056 pages
= PAGE_ALIGN(MAX_CNIC_L5_CONTEXT
* CNIC_KWQ16_DATA_SIZE
) /
1059 ret
= cnic_alloc_dma(dev
, kwq_16_dma
, pages
, 0);
1063 n
= PAGE_SIZE
/ CNIC_KWQ16_DATA_SIZE
;
1064 for (i
= 0, j
= 0; i
< MAX_ISCSI_TBL_SZ
; i
++) {
1065 long off
= CNIC_KWQ16_DATA_SIZE
* (i
% n
);
1067 cp
->ctx_tbl
[i
].kwqe_data
= kwq_16_dma
->pg_arr
[j
] + off
;
1068 cp
->ctx_tbl
[i
].kwqe_data_mapping
= kwq_16_dma
->pg_map_arr
[j
] +
1071 if ((i
% n
) == (n
- 1))
1075 ret
= cnic_alloc_dma(dev
, &cp
->kcq_info
, KCQ_PAGE_CNT
, 0);
1078 cp
->kcq
= (struct kcqe
**) cp
->kcq_info
.pg_arr
;
1080 for (i
= 0; i
< KCQ_PAGE_CNT
; i
++) {
1081 struct bnx2x_bd_chain_next
*next
=
1082 (struct bnx2x_bd_chain_next
*)
1083 &cp
->kcq
[i
][MAX_KCQE_CNT
];
1086 if (j
>= KCQ_PAGE_CNT
)
1088 next
->addr_hi
= (u64
) cp
->kcq_info
.pg_map_arr
[j
] >> 32;
1089 next
->addr_lo
= cp
->kcq_info
.pg_map_arr
[j
] & 0xffffffff;
1092 pages
= PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS
*
1093 BNX2X_ISCSI_CONN_BUF_SIZE
) / PAGE_SIZE
;
1094 ret
= cnic_alloc_dma(dev
, &cp
->conn_buf_info
, pages
, 1);
1098 pages
= PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE
) / PAGE_SIZE
;
1099 ret
= cnic_alloc_dma(dev
, &cp
->gbl_buf_info
, pages
, 0);
1103 ret
= cnic_alloc_bnx2x_context(dev
);
1107 cp
->bnx2x_def_status_blk
= cp
->ethdev
->irq_arr
[1].status_blk
;
1109 memset(cp
->status_blk
.bnx2x
, 0, sizeof(*cp
->status_blk
.bnx2x
));
1111 cp
->l2_rx_ring_size
= 15;
1113 ret
= cnic_alloc_l2_rings(dev
, 4);
1117 ret
= cnic_alloc_uio(dev
);
1124 cnic_free_resc(dev
);
1128 static inline u32
cnic_kwq_avail(struct cnic_local
*cp
)
1130 return cp
->max_kwq_idx
-
1131 ((cp
->kwq_prod_idx
- cp
->kwq_con_idx
) & cp
->max_kwq_idx
);
1134 static int cnic_submit_bnx2_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1137 struct cnic_local
*cp
= dev
->cnic_priv
;
1138 struct kwqe
*prod_qe
;
1139 u16 prod
, sw_prod
, i
;
1141 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
1142 return -EAGAIN
; /* bnx2 is down */
1144 spin_lock_bh(&cp
->cnic_ulp_lock
);
1145 if (num_wqes
> cnic_kwq_avail(cp
) &&
1146 !test_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
)) {
1147 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1151 clear_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
1153 prod
= cp
->kwq_prod_idx
;
1154 sw_prod
= prod
& MAX_KWQ_IDX
;
1155 for (i
= 0; i
< num_wqes
; i
++) {
1156 prod_qe
= &cp
->kwq
[KWQ_PG(sw_prod
)][KWQ_IDX(sw_prod
)];
1157 memcpy(prod_qe
, wqes
[i
], sizeof(struct kwqe
));
1159 sw_prod
= prod
& MAX_KWQ_IDX
;
1161 cp
->kwq_prod_idx
= prod
;
1163 CNIC_WR16(dev
, cp
->kwq_io_addr
, cp
->kwq_prod_idx
);
1165 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1169 static void *cnic_get_kwqe_16_data(struct cnic_local
*cp
, u32 l5_cid
,
1170 union l5cm_specific_data
*l5_data
)
1172 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1175 map
= ctx
->kwqe_data_mapping
;
1176 l5_data
->phy_address
.lo
= (u64
) map
& 0xffffffff;
1177 l5_data
->phy_address
.hi
= (u64
) map
>> 32;
1178 return ctx
->kwqe_data
;
1181 static int cnic_submit_kwqe_16(struct cnic_dev
*dev
, u32 cmd
, u32 cid
,
1182 u32 type
, union l5cm_specific_data
*l5_data
)
1184 struct cnic_local
*cp
= dev
->cnic_priv
;
1185 struct l5cm_spe kwqe
;
1186 struct kwqe_16
*kwq
[1];
1189 kwqe
.hdr
.conn_and_cmd_data
=
1190 cpu_to_le32(((cmd
<< SPE_HDR_CMD_ID_SHIFT
) |
1191 BNX2X_HW_CID(cid
, cp
->func
)));
1192 kwqe
.hdr
.type
= cpu_to_le16(type
);
1193 kwqe
.hdr
.reserved
= 0;
1194 kwqe
.data
.phy_address
.lo
= cpu_to_le32(l5_data
->phy_address
.lo
);
1195 kwqe
.data
.phy_address
.hi
= cpu_to_le32(l5_data
->phy_address
.hi
);
1197 kwq
[0] = (struct kwqe_16
*) &kwqe
;
1199 spin_lock_bh(&cp
->cnic_ulp_lock
);
1200 ret
= cp
->ethdev
->drv_submit_kwqes_16(dev
->netdev
, kwq
, 1);
1201 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1209 static void cnic_reply_bnx2x_kcqes(struct cnic_dev
*dev
, int ulp_type
,
1210 struct kcqe
*cqes
[], u32 num_cqes
)
1212 struct cnic_local
*cp
= dev
->cnic_priv
;
1213 struct cnic_ulp_ops
*ulp_ops
;
1216 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
1217 if (likely(ulp_ops
)) {
1218 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
1224 static int cnic_bnx2x_iscsi_init1(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1226 struct cnic_local
*cp
= dev
->cnic_priv
;
1227 struct iscsi_kwqe_init1
*req1
= (struct iscsi_kwqe_init1
*) kwqe
;
1228 int func
= cp
->func
, pages
;
1231 cp
->num_iscsi_tasks
= req1
->num_tasks_per_conn
;
1232 cp
->num_ccells
= req1
->num_ccells_per_conn
;
1233 cp
->task_array_size
= BNX2X_ISCSI_TASK_CONTEXT_SIZE
*
1234 cp
->num_iscsi_tasks
;
1235 cp
->r2tq_size
= cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
*
1236 BNX2X_ISCSI_R2TQE_SIZE
;
1237 cp
->hq_size
= cp
->num_ccells
* BNX2X_ISCSI_HQ_BD_SIZE
;
1238 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1239 hq_bds
= pages
* (PAGE_SIZE
/ BNX2X_ISCSI_HQ_BD_SIZE
);
1240 cp
->num_cqs
= req1
->num_cqs
;
1242 if (!dev
->max_iscsi_conn
)
1245 /* init Tstorm RAM */
1246 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_RQ_SIZE_OFFSET(func
),
1248 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_PAGE_SIZE_OFFSET(func
),
1250 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1251 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func
), PAGE_SHIFT
);
1252 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
1253 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func
),
1254 req1
->num_tasks_per_conn
);
1256 /* init Ustorm RAM */
1257 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1258 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func
),
1259 req1
->rq_buffer_size
);
1260 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_PAGE_SIZE_OFFSET(func
),
1262 CNIC_WR8(dev
, BAR_USTRORM_INTMEM
+
1263 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func
), PAGE_SHIFT
);
1264 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1265 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func
),
1266 req1
->num_tasks_per_conn
);
1267 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_RQ_SIZE_OFFSET(func
),
1269 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_CQ_SIZE_OFFSET(func
),
1271 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_R2TQ_SIZE_OFFSET(func
),
1272 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1274 /* init Xstorm RAM */
1275 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_PAGE_SIZE_OFFSET(func
),
1277 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1278 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func
), PAGE_SHIFT
);
1279 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
1280 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func
),
1281 req1
->num_tasks_per_conn
);
1282 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_HQ_SIZE_OFFSET(func
),
1284 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_SQ_SIZE_OFFSET(func
),
1285 req1
->num_tasks_per_conn
);
1286 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func
),
1287 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1289 /* init Cstorm RAM */
1290 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_PAGE_SIZE_OFFSET(func
),
1292 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
1293 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func
), PAGE_SHIFT
);
1294 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1295 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func
),
1296 req1
->num_tasks_per_conn
);
1297 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_CQ_SIZE_OFFSET(func
),
1299 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_HQ_SIZE_OFFSET(func
),
1305 static int cnic_bnx2x_iscsi_init2(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1307 struct iscsi_kwqe_init2
*req2
= (struct iscsi_kwqe_init2
*) kwqe
;
1308 struct cnic_local
*cp
= dev
->cnic_priv
;
1309 int func
= cp
->func
;
1310 struct iscsi_kcqe kcqe
;
1311 struct kcqe
*cqes
[1];
1313 memset(&kcqe
, 0, sizeof(kcqe
));
1314 if (!dev
->max_iscsi_conn
) {
1315 kcqe
.completion_status
=
1316 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED
;
1320 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1321 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func
), req2
->error_bit_map
[0]);
1322 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1323 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func
) + 4,
1324 req2
->error_bit_map
[1]);
1326 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1327 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func
), req2
->max_cq_sqn
);
1328 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1329 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func
), req2
->error_bit_map
[0]);
1330 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1331 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func
) + 4,
1332 req2
->error_bit_map
[1]);
1334 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1335 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func
), req2
->max_cq_sqn
);
1337 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1340 kcqe
.op_code
= ISCSI_KCQE_OPCODE_INIT
;
1341 cqes
[0] = (struct kcqe
*) &kcqe
;
1342 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1347 static void cnic_free_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1349 struct cnic_local
*cp
= dev
->cnic_priv
;
1350 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1352 if (ctx
->ulp_proto_id
== CNIC_ULP_ISCSI
) {
1353 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1355 cnic_free_dma(dev
, &iscsi
->hq_info
);
1356 cnic_free_dma(dev
, &iscsi
->r2tq_info
);
1357 cnic_free_dma(dev
, &iscsi
->task_array_info
);
1359 cnic_free_id(&cp
->cid_tbl
, ctx
->cid
);
1363 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1367 struct cnic_local
*cp
= dev
->cnic_priv
;
1368 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1369 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1371 cid
= cnic_alloc_new_id(&cp
->cid_tbl
);
1378 pages
= PAGE_ALIGN(cp
->task_array_size
) / PAGE_SIZE
;
1380 ret
= cnic_alloc_dma(dev
, &iscsi
->task_array_info
, pages
, 1);
1384 pages
= PAGE_ALIGN(cp
->r2tq_size
) / PAGE_SIZE
;
1385 ret
= cnic_alloc_dma(dev
, &iscsi
->r2tq_info
, pages
, 1);
1389 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1390 ret
= cnic_alloc_dma(dev
, &iscsi
->hq_info
, pages
, 1);
1397 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1401 static void *cnic_get_bnx2x_ctx(struct cnic_dev
*dev
, u32 cid
, int init
,
1402 struct regpair
*ctx_addr
)
1404 struct cnic_local
*cp
= dev
->cnic_priv
;
1405 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1406 int blk
= (cid
- ethdev
->starting_cid
) / cp
->cids_per_blk
;
1407 int off
= (cid
- ethdev
->starting_cid
) % cp
->cids_per_blk
;
1408 unsigned long align_off
= 0;
1412 if (cp
->ctx_align
) {
1413 unsigned long mask
= cp
->ctx_align
- 1;
1415 if (cp
->ctx_arr
[blk
].mapping
& mask
)
1416 align_off
= cp
->ctx_align
-
1417 (cp
->ctx_arr
[blk
].mapping
& mask
);
1419 ctx_map
= cp
->ctx_arr
[blk
].mapping
+ align_off
+
1420 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1421 ctx
= cp
->ctx_arr
[blk
].ctx
+ align_off
+
1422 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1424 memset(ctx
, 0, BNX2X_CONTEXT_MEM_SIZE
);
1426 ctx_addr
->lo
= ctx_map
& 0xffffffff;
1427 ctx_addr
->hi
= (u64
) ctx_map
>> 32;
1431 static int cnic_setup_bnx2x_ctx(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1434 struct cnic_local
*cp
= dev
->cnic_priv
;
1435 struct iscsi_kwqe_conn_offload1
*req1
=
1436 (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1437 struct iscsi_kwqe_conn_offload2
*req2
=
1438 (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1439 struct iscsi_kwqe_conn_offload3
*req3
;
1440 struct cnic_context
*ctx
= &cp
->ctx_tbl
[req1
->iscsi_conn_id
];
1441 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1443 u32 hw_cid
= BNX2X_HW_CID(cid
, cp
->func
);
1444 struct iscsi_context
*ictx
;
1445 struct regpair context_addr
;
1446 int i
, j
, n
= 2, n_max
;
1449 if (!req2
->num_additional_wqes
)
1452 n_max
= req2
->num_additional_wqes
+ 2;
1454 ictx
= cnic_get_bnx2x_ctx(dev
, cid
, 1, &context_addr
);
1458 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1460 ictx
->xstorm_ag_context
.hq_prod
= 1;
1462 ictx
->xstorm_st_context
.iscsi
.first_burst_length
=
1463 ISCSI_DEF_FIRST_BURST_LEN
;
1464 ictx
->xstorm_st_context
.iscsi
.max_send_pdu_length
=
1465 ISCSI_DEF_MAX_RECV_SEG_LEN
;
1466 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.lo
=
1467 req1
->sq_page_table_addr_lo
;
1468 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.hi
=
1469 req1
->sq_page_table_addr_hi
;
1470 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.lo
= req2
->sq_first_pte
.hi
;
1471 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.hi
= req2
->sq_first_pte
.lo
;
1472 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.lo
=
1473 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1474 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.hi
=
1475 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1476 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.lo
=
1477 iscsi
->hq_info
.pgtbl
[0];
1478 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.hi
=
1479 iscsi
->hq_info
.pgtbl
[1];
1480 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.lo
=
1481 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1482 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.hi
=
1483 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1484 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.lo
=
1485 iscsi
->r2tq_info
.pgtbl
[0];
1486 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.hi
=
1487 iscsi
->r2tq_info
.pgtbl
[1];
1488 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.lo
=
1489 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1490 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.hi
=
1491 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1492 ictx
->xstorm_st_context
.iscsi
.task_pbl_cache_idx
=
1493 BNX2X_ISCSI_PBL_NOT_CACHED
;
1494 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1495 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA
;
1496 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1497 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T
;
1499 ictx
->tstorm_st_context
.iscsi
.hdr_bytes_2_fetch
= ISCSI_HEADER_SIZE
;
1500 /* TSTORM requires the base address of RQ DB & not PTE */
1501 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.lo
=
1502 req2
->rq_page_table_addr_lo
& PAGE_MASK
;
1503 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.hi
=
1504 req2
->rq_page_table_addr_hi
;
1505 ictx
->tstorm_st_context
.iscsi
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1506 ictx
->tstorm_st_context
.tcp
.cwnd
= 0x5A8;
1507 ictx
->tstorm_st_context
.tcp
.flags2
|=
1508 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN
;
1510 ictx
->timers_context
.flags
|= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG
;
1512 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.lo
=
1513 req2
->rq_page_table_addr_lo
;
1514 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.hi
=
1515 req2
->rq_page_table_addr_hi
;
1516 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.lo
= req3
->qp_first_pte
[0].hi
;
1517 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.hi
= req3
->qp_first_pte
[0].lo
;
1518 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.lo
=
1519 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1520 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.hi
=
1521 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1522 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.lo
=
1523 iscsi
->r2tq_info
.pgtbl
[0];
1524 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.hi
=
1525 iscsi
->r2tq_info
.pgtbl
[1];
1526 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.lo
=
1527 req1
->cq_page_table_addr_lo
;
1528 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.hi
=
1529 req1
->cq_page_table_addr_hi
;
1530 ictx
->ustorm_st_context
.ring
.cq
[0].cq_sn
= ISCSI_INITIAL_SN
;
1531 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.lo
= req2
->cq_first_pte
.hi
;
1532 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.hi
= req2
->cq_first_pte
.lo
;
1533 ictx
->ustorm_st_context
.task_pbe_cache_index
=
1534 BNX2X_ISCSI_PBL_NOT_CACHED
;
1535 ictx
->ustorm_st_context
.task_pdu_cache_index
=
1536 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED
;
1538 for (i
= 1, j
= 1; i
< cp
->num_cqs
; i
++, j
++) {
1542 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1545 ictx
->ustorm_st_context
.ring
.cq
[i
].cq_sn
= ISCSI_INITIAL_SN
;
1546 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.lo
=
1547 req3
->qp_first_pte
[j
].hi
;
1548 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.hi
=
1549 req3
->qp_first_pte
[j
].lo
;
1552 ictx
->ustorm_st_context
.task_pbl_base
.lo
=
1553 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1554 ictx
->ustorm_st_context
.task_pbl_base
.hi
=
1555 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1556 ictx
->ustorm_st_context
.tce_phy_addr
.lo
=
1557 iscsi
->task_array_info
.pgtbl
[0];
1558 ictx
->ustorm_st_context
.tce_phy_addr
.hi
=
1559 iscsi
->task_array_info
.pgtbl
[1];
1560 ictx
->ustorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1561 ictx
->ustorm_st_context
.num_cqs
= cp
->num_cqs
;
1562 ictx
->ustorm_st_context
.negotiated_rx
|= ISCSI_DEF_MAX_RECV_SEG_LEN
;
1563 ictx
->ustorm_st_context
.negotiated_rx_and_flags
|=
1564 ISCSI_DEF_MAX_BURST_LEN
;
1565 ictx
->ustorm_st_context
.negotiated_rx
|=
1566 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T
<<
1567 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT
;
1569 ictx
->cstorm_st_context
.hq_pbl_base
.lo
=
1570 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1571 ictx
->cstorm_st_context
.hq_pbl_base
.hi
=
1572 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1573 ictx
->cstorm_st_context
.hq_curr_pbe
.lo
= iscsi
->hq_info
.pgtbl
[0];
1574 ictx
->cstorm_st_context
.hq_curr_pbe
.hi
= iscsi
->hq_info
.pgtbl
[1];
1575 ictx
->cstorm_st_context
.task_pbl_base
.lo
=
1576 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1577 ictx
->cstorm_st_context
.task_pbl_base
.hi
=
1578 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1579 /* CSTORM and USTORM initialization is different, CSTORM requires
1580 * CQ DB base & not PTE addr */
1581 ictx
->cstorm_st_context
.cq_db_base
.lo
=
1582 req1
->cq_page_table_addr_lo
& PAGE_MASK
;
1583 ictx
->cstorm_st_context
.cq_db_base
.hi
= req1
->cq_page_table_addr_hi
;
1584 ictx
->cstorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1585 ictx
->cstorm_st_context
.cq_proc_en_bit_map
= (1 << cp
->num_cqs
) - 1;
1586 for (i
= 0; i
< cp
->num_cqs
; i
++) {
1587 ictx
->cstorm_st_context
.cq_c_prod_sqn_arr
.sqn
[i
] =
1589 ictx
->cstorm_st_context
.cq_c_sqn_2_notify_arr
.sqn
[i
] =
1593 ictx
->xstorm_ag_context
.cdu_reserved
=
1594 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_XCM_AG
,
1595 ISCSI_CONNECTION_TYPE
);
1596 ictx
->ustorm_ag_context
.cdu_usage
=
1597 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_UCM_AG
,
1598 ISCSI_CONNECTION_TYPE
);
1603 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1606 struct iscsi_kwqe_conn_offload1
*req1
;
1607 struct iscsi_kwqe_conn_offload2
*req2
;
1608 struct cnic_local
*cp
= dev
->cnic_priv
;
1609 struct iscsi_kcqe kcqe
;
1610 struct kcqe
*cqes
[1];
1619 req1
= (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1620 req2
= (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1621 if ((num
- 2) < req2
->num_additional_wqes
) {
1625 *work
= 2 + req2
->num_additional_wqes
;;
1627 l5_cid
= req1
->iscsi_conn_id
;
1628 if (l5_cid
>= MAX_ISCSI_TBL_SZ
)
1631 memset(&kcqe
, 0, sizeof(kcqe
));
1632 kcqe
.op_code
= ISCSI_KCQE_OPCODE_OFFLOAD_CONN
;
1633 kcqe
.iscsi_conn_id
= l5_cid
;
1634 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
;
1636 if (atomic_inc_return(&cp
->iscsi_conn
) > dev
->max_iscsi_conn
) {
1637 atomic_dec(&cp
->iscsi_conn
);
1641 ret
= cnic_alloc_bnx2x_conn_resc(dev
, l5_cid
);
1643 atomic_dec(&cp
->iscsi_conn
);
1647 ret
= cnic_setup_bnx2x_ctx(dev
, wqes
, num
);
1649 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1650 atomic_dec(&cp
->iscsi_conn
);
1654 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1655 kcqe
.iscsi_conn_context_id
= BNX2X_HW_CID(cp
->ctx_tbl
[l5_cid
].cid
,
1659 cqes
[0] = (struct kcqe
*) &kcqe
;
1660 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1665 static int cnic_bnx2x_iscsi_update(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1667 struct cnic_local
*cp
= dev
->cnic_priv
;
1668 struct iscsi_kwqe_conn_update
*req
=
1669 (struct iscsi_kwqe_conn_update
*) kwqe
;
1671 union l5cm_specific_data l5_data
;
1672 u32 l5_cid
, cid
= BNX2X_SW_CID(req
->context_id
);
1675 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) != 0)
1678 data
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
1682 memcpy(data
, kwqe
, sizeof(struct kwqe
));
1684 ret
= cnic_submit_kwqe_16(dev
, ISCSI_RAMROD_CMD_ID_UPDATE_CONN
,
1685 req
->context_id
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1689 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1691 struct cnic_local
*cp
= dev
->cnic_priv
;
1692 struct iscsi_kwqe_conn_destroy
*req
=
1693 (struct iscsi_kwqe_conn_destroy
*) kwqe
;
1694 union l5cm_specific_data l5_data
;
1695 u32 l5_cid
= req
->reserved0
;
1696 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1698 struct iscsi_kcqe kcqe
;
1699 struct kcqe
*cqes
[1];
1701 if (!(ctx
->ctx_flags
& CTX_FL_OFFLD_START
))
1702 goto skip_cfc_delete
;
1704 while (!time_after(jiffies
, ctx
->timestamp
+ (2 * HZ
)))
1707 init_waitqueue_head(&ctx
->waitq
);
1709 memset(&l5_data
, 0, sizeof(l5_data
));
1710 ret
= cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_CFC_DEL
,
1712 ETH_CONNECTION_TYPE
|
1713 (1 << SPE_HDR_COMMON_RAMROD_SHIFT
),
1716 wait_event(ctx
->waitq
, ctx
->wait_cond
);
1719 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1721 atomic_dec(&cp
->iscsi_conn
);
1723 memset(&kcqe
, 0, sizeof(kcqe
));
1724 kcqe
.op_code
= ISCSI_KCQE_OPCODE_DESTROY_CONN
;
1725 kcqe
.iscsi_conn_id
= l5_cid
;
1726 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1727 kcqe
.iscsi_conn_context_id
= req
->context_id
;
1729 cqes
[0] = (struct kcqe
*) &kcqe
;
1730 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1735 static void cnic_init_storm_conn_bufs(struct cnic_dev
*dev
,
1736 struct l4_kwq_connect_req1
*kwqe1
,
1737 struct l4_kwq_connect_req3
*kwqe3
,
1738 struct l5cm_active_conn_buffer
*conn_buf
)
1740 struct l5cm_conn_addr_params
*conn_addr
= &conn_buf
->conn_addr_buf
;
1741 struct l5cm_xstorm_conn_buffer
*xstorm_buf
=
1742 &conn_buf
->xstorm_conn_buffer
;
1743 struct l5cm_tstorm_conn_buffer
*tstorm_buf
=
1744 &conn_buf
->tstorm_conn_buffer
;
1745 struct regpair context_addr
;
1746 u32 cid
= BNX2X_SW_CID(kwqe1
->cid
);
1747 struct in6_addr src_ip
, dst_ip
;
1751 addrp
= (u32
*) &conn_addr
->local_ip_addr
;
1752 for (i
= 0; i
< 4; i
++, addrp
++)
1753 src_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1755 addrp
= (u32
*) &conn_addr
->remote_ip_addr
;
1756 for (i
= 0; i
< 4; i
++, addrp
++)
1757 dst_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1759 cnic_get_bnx2x_ctx(dev
, cid
, 0, &context_addr
);
1761 xstorm_buf
->context_addr
.hi
= context_addr
.hi
;
1762 xstorm_buf
->context_addr
.lo
= context_addr
.lo
;
1763 xstorm_buf
->mss
= 0xffff;
1764 xstorm_buf
->rcv_buf
= kwqe3
->rcv_buf
;
1765 if (kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
)
1766 xstorm_buf
->params
|= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE
;
1767 xstorm_buf
->pseudo_header_checksum
=
1768 swab16(~csum_ipv6_magic(&src_ip
, &dst_ip
, 0, IPPROTO_TCP
, 0));
1770 if (!(kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
))
1771 tstorm_buf
->params
|=
1772 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE
;
1773 if (kwqe3
->ka_timeout
) {
1774 tstorm_buf
->ka_enable
= 1;
1775 tstorm_buf
->ka_timeout
= kwqe3
->ka_timeout
;
1776 tstorm_buf
->ka_interval
= kwqe3
->ka_interval
;
1777 tstorm_buf
->ka_max_probe_count
= kwqe3
->ka_max_probe_count
;
1779 tstorm_buf
->rcv_buf
= kwqe3
->rcv_buf
;
1780 tstorm_buf
->snd_buf
= kwqe3
->snd_buf
;
1781 tstorm_buf
->max_rt_time
= 0xffffffff;
1784 static void cnic_init_bnx2x_mac(struct cnic_dev
*dev
)
1786 struct cnic_local
*cp
= dev
->cnic_priv
;
1787 int func
= CNIC_FUNC(cp
);
1788 u8
*mac
= dev
->mac_addr
;
1790 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1791 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func
), mac
[0]);
1792 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1793 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func
), mac
[1]);
1794 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1795 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func
), mac
[2]);
1796 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1797 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func
), mac
[3]);
1798 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1799 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func
), mac
[4]);
1800 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1801 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func
), mac
[5]);
1803 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1804 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func
), mac
[5]);
1805 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1806 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func
) + 1,
1808 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1809 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func
), mac
[3]);
1810 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1811 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func
) + 1,
1813 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1814 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func
) + 2,
1816 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1817 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func
) + 3,
1821 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev
*dev
, int tcp_ts
)
1823 struct cnic_local
*cp
= dev
->cnic_priv
;
1824 u8 xstorm_flags
= XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN
;
1825 u16 tstorm_flags
= 0;
1828 xstorm_flags
|= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
1829 tstorm_flags
|= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
1832 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1833 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->func
), xstorm_flags
);
1835 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
1836 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->func
), tstorm_flags
);
1839 static int cnic_bnx2x_connect(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1842 struct cnic_local
*cp
= dev
->cnic_priv
;
1843 struct l4_kwq_connect_req1
*kwqe1
=
1844 (struct l4_kwq_connect_req1
*) wqes
[0];
1845 struct l4_kwq_connect_req3
*kwqe3
;
1846 struct l5cm_active_conn_buffer
*conn_buf
;
1847 struct l5cm_conn_addr_params
*conn_addr
;
1848 union l5cm_specific_data l5_data
;
1849 u32 l5_cid
= kwqe1
->pg_cid
;
1850 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
1851 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1859 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
)
1869 if (sizeof(*conn_buf
) > CNIC_KWQ16_DATA_SIZE
) {
1870 netdev_err(dev
->netdev
, "conn_buf size too big\n");
1873 conn_buf
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
1877 memset(conn_buf
, 0, sizeof(*conn_buf
));
1879 conn_addr
= &conn_buf
->conn_addr_buf
;
1880 conn_addr
->remote_addr_0
= csk
->ha
[0];
1881 conn_addr
->remote_addr_1
= csk
->ha
[1];
1882 conn_addr
->remote_addr_2
= csk
->ha
[2];
1883 conn_addr
->remote_addr_3
= csk
->ha
[3];
1884 conn_addr
->remote_addr_4
= csk
->ha
[4];
1885 conn_addr
->remote_addr_5
= csk
->ha
[5];
1887 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
) {
1888 struct l4_kwq_connect_req2
*kwqe2
=
1889 (struct l4_kwq_connect_req2
*) wqes
[1];
1891 conn_addr
->local_ip_addr
.ip_addr_hi_hi
= kwqe2
->src_ip_v6_4
;
1892 conn_addr
->local_ip_addr
.ip_addr_hi_lo
= kwqe2
->src_ip_v6_3
;
1893 conn_addr
->local_ip_addr
.ip_addr_lo_hi
= kwqe2
->src_ip_v6_2
;
1895 conn_addr
->remote_ip_addr
.ip_addr_hi_hi
= kwqe2
->dst_ip_v6_4
;
1896 conn_addr
->remote_ip_addr
.ip_addr_hi_lo
= kwqe2
->dst_ip_v6_3
;
1897 conn_addr
->remote_ip_addr
.ip_addr_lo_hi
= kwqe2
->dst_ip_v6_2
;
1898 conn_addr
->params
|= L5CM_CONN_ADDR_PARAMS_IP_VERSION
;
1900 kwqe3
= (struct l4_kwq_connect_req3
*) wqes
[*work
- 1];
1902 conn_addr
->local_ip_addr
.ip_addr_lo_lo
= kwqe1
->src_ip
;
1903 conn_addr
->remote_ip_addr
.ip_addr_lo_lo
= kwqe1
->dst_ip
;
1904 conn_addr
->local_tcp_port
= kwqe1
->src_port
;
1905 conn_addr
->remote_tcp_port
= kwqe1
->dst_port
;
1907 conn_addr
->pmtu
= kwqe3
->pmtu
;
1908 cnic_init_storm_conn_bufs(dev
, kwqe1
, kwqe3
, conn_buf
);
1910 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
1911 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp
->func
), csk
->vlan_id
);
1913 cnic_bnx2x_set_tcp_timestamp(dev
,
1914 kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_TIME_STAMP
);
1916 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_TCP_CONNECT
,
1917 kwqe1
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1919 ctx
->ctx_flags
|= CTX_FL_OFFLD_START
;
1924 static int cnic_bnx2x_close(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1926 struct l4_kwq_close_req
*req
= (struct l4_kwq_close_req
*) kwqe
;
1927 union l5cm_specific_data l5_data
;
1930 memset(&l5_data
, 0, sizeof(l5_data
));
1931 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_CLOSE
,
1932 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1936 static int cnic_bnx2x_reset(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1938 struct l4_kwq_reset_req
*req
= (struct l4_kwq_reset_req
*) kwqe
;
1939 union l5cm_specific_data l5_data
;
1942 memset(&l5_data
, 0, sizeof(l5_data
));
1943 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_ABORT
,
1944 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1947 static int cnic_bnx2x_offload_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1949 struct l4_kwq_offload_pg
*req
= (struct l4_kwq_offload_pg
*) kwqe
;
1951 struct kcqe
*cqes
[1];
1953 memset(&kcqe
, 0, sizeof(kcqe
));
1954 kcqe
.pg_host_opaque
= req
->host_opaque
;
1955 kcqe
.pg_cid
= req
->host_opaque
;
1956 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
;
1957 cqes
[0] = (struct kcqe
*) &kcqe
;
1958 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
1962 static int cnic_bnx2x_update_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1964 struct l4_kwq_update_pg
*req
= (struct l4_kwq_update_pg
*) kwqe
;
1966 struct kcqe
*cqes
[1];
1968 memset(&kcqe
, 0, sizeof(kcqe
));
1969 kcqe
.pg_host_opaque
= req
->pg_host_opaque
;
1970 kcqe
.pg_cid
= req
->pg_cid
;
1971 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_UPDATE_PG
;
1972 cqes
[0] = (struct kcqe
*) &kcqe
;
1973 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
1977 static int cnic_submit_bnx2x_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1984 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
1985 return -EAGAIN
; /* bnx2 is down */
1987 for (i
= 0; i
< num_wqes
; ) {
1989 opcode
= KWQE_OPCODE(kwqe
->kwqe_op_flag
);
1993 case ISCSI_KWQE_OPCODE_INIT1
:
1994 ret
= cnic_bnx2x_iscsi_init1(dev
, kwqe
);
1996 case ISCSI_KWQE_OPCODE_INIT2
:
1997 ret
= cnic_bnx2x_iscsi_init2(dev
, kwqe
);
1999 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1
:
2000 ret
= cnic_bnx2x_iscsi_ofld1(dev
, &wqes
[i
],
2001 num_wqes
- i
, &work
);
2003 case ISCSI_KWQE_OPCODE_UPDATE_CONN
:
2004 ret
= cnic_bnx2x_iscsi_update(dev
, kwqe
);
2006 case ISCSI_KWQE_OPCODE_DESTROY_CONN
:
2007 ret
= cnic_bnx2x_iscsi_destroy(dev
, kwqe
);
2009 case L4_KWQE_OPCODE_VALUE_CONNECT1
:
2010 ret
= cnic_bnx2x_connect(dev
, &wqes
[i
], num_wqes
- i
,
2013 case L4_KWQE_OPCODE_VALUE_CLOSE
:
2014 ret
= cnic_bnx2x_close(dev
, kwqe
);
2016 case L4_KWQE_OPCODE_VALUE_RESET
:
2017 ret
= cnic_bnx2x_reset(dev
, kwqe
);
2019 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
:
2020 ret
= cnic_bnx2x_offload_pg(dev
, kwqe
);
2022 case L4_KWQE_OPCODE_VALUE_UPDATE_PG
:
2023 ret
= cnic_bnx2x_update_pg(dev
, kwqe
);
2025 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG
:
2030 netdev_err(dev
->netdev
, "Unknown type of KWQE(0x%x)\n",
2035 netdev_err(dev
->netdev
, "KWQE(0x%x) failed\n",
2042 static void service_kcqes(struct cnic_dev
*dev
, int num_cqes
)
2044 struct cnic_local
*cp
= dev
->cnic_priv
;
2050 struct cnic_ulp_ops
*ulp_ops
;
2052 u32 kcqe_op_flag
= cp
->completed_kcq
[i
]->kcqe_op_flag
;
2053 u32 kcqe_layer
= kcqe_op_flag
& KCQE_FLAGS_LAYER_MASK
;
2055 if (unlikely(kcqe_op_flag
& KCQE_RAMROD_COMPLETION
))
2056 cnic_kwq_completion(dev
, 1);
2058 while (j
< num_cqes
) {
2059 u32 next_op
= cp
->completed_kcq
[i
+ j
]->kcqe_op_flag
;
2061 if ((next_op
& KCQE_FLAGS_LAYER_MASK
) != kcqe_layer
)
2064 if (unlikely(next_op
& KCQE_RAMROD_COMPLETION
))
2065 cnic_kwq_completion(dev
, 1);
2069 if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_RDMA
)
2070 ulp_type
= CNIC_ULP_RDMA
;
2071 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_ISCSI
)
2072 ulp_type
= CNIC_ULP_ISCSI
;
2073 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L4
)
2074 ulp_type
= CNIC_ULP_L4
;
2075 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L2
)
2078 netdev_err(dev
->netdev
, "Unknown type of KCQE(0x%x)\n",
2084 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
2085 if (likely(ulp_ops
)) {
2086 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
2087 cp
->completed_kcq
+ i
, j
);
2097 static u16
cnic_bnx2_next_idx(u16 idx
)
2102 static u16
cnic_bnx2_hw_idx(u16 idx
)
2107 static u16
cnic_bnx2x_next_idx(u16 idx
)
2110 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
2116 static u16
cnic_bnx2x_hw_idx(u16 idx
)
2118 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
2123 static int cnic_get_kcqes(struct cnic_dev
*dev
, u16 hw_prod
, u16
*sw_prod
)
2125 struct cnic_local
*cp
= dev
->cnic_priv
;
2128 int kcqe_cnt
= 0, last_cnt
= 0;
2130 i
= ri
= last
= *sw_prod
;
2133 while ((i
!= hw_prod
) && (kcqe_cnt
< MAX_COMPLETED_KCQE
)) {
2134 kcqe
= &cp
->kcq
[KCQ_PG(ri
)][KCQ_IDX(ri
)];
2135 cp
->completed_kcq
[kcqe_cnt
++] = kcqe
;
2136 i
= cp
->next_idx(i
);
2137 ri
= i
& MAX_KCQ_IDX
;
2138 if (likely(!(kcqe
->kcqe_op_flag
& KCQE_FLAGS_NEXT
))) {
2139 last_cnt
= kcqe_cnt
;
2148 static void cnic_chk_pkt_rings(struct cnic_local
*cp
)
2150 u16 rx_cons
= *cp
->rx_cons_ptr
;
2151 u16 tx_cons
= *cp
->tx_cons_ptr
;
2153 if (cp
->tx_cons
!= tx_cons
|| cp
->rx_cons
!= rx_cons
) {
2154 cp
->tx_cons
= tx_cons
;
2155 cp
->rx_cons
= rx_cons
;
2157 uio_event_notify(cp
->cnic_uinfo
);
2161 static int cnic_service_bnx2(void *data
, void *status_blk
)
2163 struct cnic_dev
*dev
= data
;
2164 struct status_block
*sblk
= status_blk
;
2165 struct cnic_local
*cp
= dev
->cnic_priv
;
2166 u32 status_idx
= sblk
->status_idx
;
2167 u16 hw_prod
, sw_prod
;
2170 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
2173 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2175 hw_prod
= sblk
->status_completion_producer_index
;
2176 sw_prod
= cp
->kcq_prod_idx
;
2177 while (sw_prod
!= hw_prod
) {
2178 kcqe_cnt
= cnic_get_kcqes(dev
, hw_prod
, &sw_prod
);
2182 service_kcqes(dev
, kcqe_cnt
);
2184 /* Tell compiler that status_blk fields can change. */
2186 if (status_idx
!= sblk
->status_idx
) {
2187 status_idx
= sblk
->status_idx
;
2188 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2189 hw_prod
= sblk
->status_completion_producer_index
;
2195 CNIC_WR16(dev
, cp
->kcq_io_addr
, sw_prod
);
2197 cp
->kcq_prod_idx
= sw_prod
;
2199 cnic_chk_pkt_rings(cp
);
2203 static void cnic_service_bnx2_msix(unsigned long data
)
2205 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
2206 struct cnic_local
*cp
= dev
->cnic_priv
;
2207 struct status_block_msix
*status_blk
= cp
->status_blk
.bnx2
;
2208 u32 status_idx
= status_blk
->status_idx
;
2209 u16 hw_prod
, sw_prod
;
2212 cp
->kwq_con_idx
= status_blk
->status_cmd_consumer_index
;
2214 hw_prod
= status_blk
->status_completion_producer_index
;
2215 sw_prod
= cp
->kcq_prod_idx
;
2216 while (sw_prod
!= hw_prod
) {
2217 kcqe_cnt
= cnic_get_kcqes(dev
, hw_prod
, &sw_prod
);
2221 service_kcqes(dev
, kcqe_cnt
);
2223 /* Tell compiler that status_blk fields can change. */
2225 if (status_idx
!= status_blk
->status_idx
) {
2226 status_idx
= status_blk
->status_idx
;
2227 cp
->kwq_con_idx
= status_blk
->status_cmd_consumer_index
;
2228 hw_prod
= status_blk
->status_completion_producer_index
;
2234 CNIC_WR16(dev
, cp
->kcq_io_addr
, sw_prod
);
2235 cp
->kcq_prod_idx
= sw_prod
;
2237 cnic_chk_pkt_rings(cp
);
2239 cp
->last_status_idx
= status_idx
;
2240 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
2241 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
2244 static irqreturn_t
cnic_irq(int irq
, void *dev_instance
)
2246 struct cnic_dev
*dev
= dev_instance
;
2247 struct cnic_local
*cp
= dev
->cnic_priv
;
2248 u16 prod
= cp
->kcq_prod_idx
& MAX_KCQ_IDX
;
2253 prefetch(cp
->status_blk
.gen
);
2254 prefetch(&cp
->kcq
[KCQ_PG(prod
)][KCQ_IDX(prod
)]);
2256 if (likely(test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
2257 tasklet_schedule(&cp
->cnic_irq_task
);
2262 static inline void cnic_ack_bnx2x_int(struct cnic_dev
*dev
, u8 id
, u8 storm
,
2263 u16 index
, u8 op
, u8 update
)
2265 struct cnic_local
*cp
= dev
->cnic_priv
;
2266 u32 hc_addr
= (HC_REG_COMMAND_REG
+ CNIC_PORT(cp
) * 32 +
2267 COMMAND_REG_INT_ACK
);
2268 struct igu_ack_register igu_ack
;
2270 igu_ack
.status_block_index
= index
;
2271 igu_ack
.sb_id_and_flags
=
2272 ((id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
2273 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
2274 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
2275 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
2277 CNIC_WR(dev
, hc_addr
, (*(u32
*)&igu_ack
));
2280 static void cnic_ack_bnx2x_msix(struct cnic_dev
*dev
)
2282 struct cnic_local
*cp
= dev
->cnic_priv
;
2284 cnic_ack_bnx2x_int(dev
, cp
->status_blk_num
, CSTORM_ID
, 0,
2285 IGU_INT_DISABLE
, 0);
2288 static void cnic_service_bnx2x_bh(unsigned long data
)
2290 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
2291 struct cnic_local
*cp
= dev
->cnic_priv
;
2292 u16 hw_prod
, sw_prod
;
2293 struct cstorm_status_block_c
*sblk
=
2294 &cp
->status_blk
.bnx2x
->c_status_block
;
2295 u32 status_idx
= sblk
->status_block_index
;
2298 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
2301 hw_prod
= sblk
->index_values
[HC_INDEX_C_ISCSI_EQ_CONS
];
2302 hw_prod
= cp
->hw_idx(hw_prod
);
2303 sw_prod
= cp
->kcq_prod_idx
;
2304 while (sw_prod
!= hw_prod
) {
2305 kcqe_cnt
= cnic_get_kcqes(dev
, hw_prod
, &sw_prod
);
2309 service_kcqes(dev
, kcqe_cnt
);
2311 /* Tell compiler that sblk fields can change. */
2313 if (status_idx
== sblk
->status_block_index
)
2316 status_idx
= sblk
->status_block_index
;
2317 hw_prod
= sblk
->index_values
[HC_INDEX_C_ISCSI_EQ_CONS
];
2318 hw_prod
= cp
->hw_idx(hw_prod
);
2322 CNIC_WR16(dev
, cp
->kcq_io_addr
, sw_prod
+ MAX_KCQ_IDX
);
2323 cnic_ack_bnx2x_int(dev
, cp
->status_blk_num
, CSTORM_ID
,
2324 status_idx
, IGU_INT_ENABLE
, 1);
2326 cp
->kcq_prod_idx
= sw_prod
;
2329 static int cnic_service_bnx2x(void *data
, void *status_blk
)
2331 struct cnic_dev
*dev
= data
;
2332 struct cnic_local
*cp
= dev
->cnic_priv
;
2333 u16 prod
= cp
->kcq_prod_idx
& MAX_KCQ_IDX
;
2335 if (likely(test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))) {
2336 prefetch(cp
->status_blk
.bnx2x
);
2337 prefetch(&cp
->kcq
[KCQ_PG(prod
)][KCQ_IDX(prod
)]);
2339 tasklet_schedule(&cp
->cnic_irq_task
);
2340 cnic_chk_pkt_rings(cp
);
2346 static void cnic_ulp_stop(struct cnic_dev
*dev
)
2348 struct cnic_local
*cp
= dev
->cnic_priv
;
2352 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
2354 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
2355 struct cnic_ulp_ops
*ulp_ops
;
2357 mutex_lock(&cnic_lock
);
2358 ulp_ops
= cp
->ulp_ops
[if_type
];
2360 mutex_unlock(&cnic_lock
);
2363 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2364 mutex_unlock(&cnic_lock
);
2366 if (test_and_clear_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
2367 ulp_ops
->cnic_stop(cp
->ulp_handle
[if_type
]);
2369 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2373 static void cnic_ulp_start(struct cnic_dev
*dev
)
2375 struct cnic_local
*cp
= dev
->cnic_priv
;
2378 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
2379 struct cnic_ulp_ops
*ulp_ops
;
2381 mutex_lock(&cnic_lock
);
2382 ulp_ops
= cp
->ulp_ops
[if_type
];
2383 if (!ulp_ops
|| !ulp_ops
->cnic_start
) {
2384 mutex_unlock(&cnic_lock
);
2387 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2388 mutex_unlock(&cnic_lock
);
2390 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
2391 ulp_ops
->cnic_start(cp
->ulp_handle
[if_type
]);
2393 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2397 static int cnic_ctl(void *data
, struct cnic_ctl_info
*info
)
2399 struct cnic_dev
*dev
= data
;
2401 switch (info
->cmd
) {
2402 case CNIC_CTL_STOP_CMD
:
2410 case CNIC_CTL_START_CMD
:
2413 if (!cnic_start_hw(dev
))
2414 cnic_ulp_start(dev
);
2418 case CNIC_CTL_COMPLETION_CMD
: {
2419 u32 cid
= BNX2X_SW_CID(info
->data
.comp
.cid
);
2421 struct cnic_local
*cp
= dev
->cnic_priv
;
2423 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) == 0) {
2424 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
2427 wake_up(&ctx
->waitq
);
2437 static void cnic_ulp_init(struct cnic_dev
*dev
)
2440 struct cnic_local
*cp
= dev
->cnic_priv
;
2442 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
2443 struct cnic_ulp_ops
*ulp_ops
;
2445 mutex_lock(&cnic_lock
);
2446 ulp_ops
= cnic_ulp_tbl
[i
];
2447 if (!ulp_ops
|| !ulp_ops
->cnic_init
) {
2448 mutex_unlock(&cnic_lock
);
2452 mutex_unlock(&cnic_lock
);
2454 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
2455 ulp_ops
->cnic_init(dev
);
2461 static void cnic_ulp_exit(struct cnic_dev
*dev
)
2464 struct cnic_local
*cp
= dev
->cnic_priv
;
2466 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
2467 struct cnic_ulp_ops
*ulp_ops
;
2469 mutex_lock(&cnic_lock
);
2470 ulp_ops
= cnic_ulp_tbl
[i
];
2471 if (!ulp_ops
|| !ulp_ops
->cnic_exit
) {
2472 mutex_unlock(&cnic_lock
);
2476 mutex_unlock(&cnic_lock
);
2478 if (test_and_clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
2479 ulp_ops
->cnic_exit(dev
);
2485 static int cnic_cm_offload_pg(struct cnic_sock
*csk
)
2487 struct cnic_dev
*dev
= csk
->dev
;
2488 struct l4_kwq_offload_pg
*l4kwqe
;
2489 struct kwqe
*wqes
[1];
2491 l4kwqe
= (struct l4_kwq_offload_pg
*) &csk
->kwqe1
;
2492 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2493 wqes
[0] = (struct kwqe
*) l4kwqe
;
2495 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
;
2497 L4_LAYER_CODE
<< L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT
;
2498 l4kwqe
->l2hdr_nbytes
= ETH_HLEN
;
2500 l4kwqe
->da0
= csk
->ha
[0];
2501 l4kwqe
->da1
= csk
->ha
[1];
2502 l4kwqe
->da2
= csk
->ha
[2];
2503 l4kwqe
->da3
= csk
->ha
[3];
2504 l4kwqe
->da4
= csk
->ha
[4];
2505 l4kwqe
->da5
= csk
->ha
[5];
2507 l4kwqe
->sa0
= dev
->mac_addr
[0];
2508 l4kwqe
->sa1
= dev
->mac_addr
[1];
2509 l4kwqe
->sa2
= dev
->mac_addr
[2];
2510 l4kwqe
->sa3
= dev
->mac_addr
[3];
2511 l4kwqe
->sa4
= dev
->mac_addr
[4];
2512 l4kwqe
->sa5
= dev
->mac_addr
[5];
2514 l4kwqe
->etype
= ETH_P_IP
;
2515 l4kwqe
->ipid_start
= DEF_IPID_START
;
2516 l4kwqe
->host_opaque
= csk
->l5_cid
;
2519 l4kwqe
->pg_flags
|= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING
;
2520 l4kwqe
->vlan_tag
= csk
->vlan_id
;
2521 l4kwqe
->l2hdr_nbytes
+= 4;
2524 return dev
->submit_kwqes(dev
, wqes
, 1);
2527 static int cnic_cm_update_pg(struct cnic_sock
*csk
)
2529 struct cnic_dev
*dev
= csk
->dev
;
2530 struct l4_kwq_update_pg
*l4kwqe
;
2531 struct kwqe
*wqes
[1];
2533 l4kwqe
= (struct l4_kwq_update_pg
*) &csk
->kwqe1
;
2534 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2535 wqes
[0] = (struct kwqe
*) l4kwqe
;
2537 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPDATE_PG
;
2539 L4_LAYER_CODE
<< L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT
;
2540 l4kwqe
->pg_cid
= csk
->pg_cid
;
2542 l4kwqe
->da0
= csk
->ha
[0];
2543 l4kwqe
->da1
= csk
->ha
[1];
2544 l4kwqe
->da2
= csk
->ha
[2];
2545 l4kwqe
->da3
= csk
->ha
[3];
2546 l4kwqe
->da4
= csk
->ha
[4];
2547 l4kwqe
->da5
= csk
->ha
[5];
2549 l4kwqe
->pg_host_opaque
= csk
->l5_cid
;
2550 l4kwqe
->pg_valids
= L4_KWQ_UPDATE_PG_VALIDS_DA
;
2552 return dev
->submit_kwqes(dev
, wqes
, 1);
2555 static int cnic_cm_upload_pg(struct cnic_sock
*csk
)
2557 struct cnic_dev
*dev
= csk
->dev
;
2558 struct l4_kwq_upload
*l4kwqe
;
2559 struct kwqe
*wqes
[1];
2561 l4kwqe
= (struct l4_kwq_upload
*) &csk
->kwqe1
;
2562 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2563 wqes
[0] = (struct kwqe
*) l4kwqe
;
2565 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPLOAD_PG
;
2567 L4_LAYER_CODE
<< L4_KWQ_UPLOAD_LAYER_CODE_SHIFT
;
2568 l4kwqe
->cid
= csk
->pg_cid
;
2570 return dev
->submit_kwqes(dev
, wqes
, 1);
2573 static int cnic_cm_conn_req(struct cnic_sock
*csk
)
2575 struct cnic_dev
*dev
= csk
->dev
;
2576 struct l4_kwq_connect_req1
*l4kwqe1
;
2577 struct l4_kwq_connect_req2
*l4kwqe2
;
2578 struct l4_kwq_connect_req3
*l4kwqe3
;
2579 struct kwqe
*wqes
[3];
2583 l4kwqe1
= (struct l4_kwq_connect_req1
*) &csk
->kwqe1
;
2584 l4kwqe2
= (struct l4_kwq_connect_req2
*) &csk
->kwqe2
;
2585 l4kwqe3
= (struct l4_kwq_connect_req3
*) &csk
->kwqe3
;
2586 memset(l4kwqe1
, 0, sizeof(*l4kwqe1
));
2587 memset(l4kwqe2
, 0, sizeof(*l4kwqe2
));
2588 memset(l4kwqe3
, 0, sizeof(*l4kwqe3
));
2590 l4kwqe3
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT3
;
2592 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT
;
2593 l4kwqe3
->ka_timeout
= csk
->ka_timeout
;
2594 l4kwqe3
->ka_interval
= csk
->ka_interval
;
2595 l4kwqe3
->ka_max_probe_count
= csk
->ka_max_probe_count
;
2596 l4kwqe3
->tos
= csk
->tos
;
2597 l4kwqe3
->ttl
= csk
->ttl
;
2598 l4kwqe3
->snd_seq_scale
= csk
->snd_seq_scale
;
2599 l4kwqe3
->pmtu
= csk
->mtu
;
2600 l4kwqe3
->rcv_buf
= csk
->rcv_buf
;
2601 l4kwqe3
->snd_buf
= csk
->snd_buf
;
2602 l4kwqe3
->seed
= csk
->seed
;
2604 wqes
[0] = (struct kwqe
*) l4kwqe1
;
2605 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
2606 wqes
[1] = (struct kwqe
*) l4kwqe2
;
2607 wqes
[2] = (struct kwqe
*) l4kwqe3
;
2610 l4kwqe1
->conn_flags
= L4_KWQ_CONNECT_REQ1_IP_V6
;
2611 l4kwqe2
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT2
;
2613 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT
|
2614 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT
;
2615 l4kwqe2
->src_ip_v6_2
= be32_to_cpu(csk
->src_ip
[1]);
2616 l4kwqe2
->src_ip_v6_3
= be32_to_cpu(csk
->src_ip
[2]);
2617 l4kwqe2
->src_ip_v6_4
= be32_to_cpu(csk
->src_ip
[3]);
2618 l4kwqe2
->dst_ip_v6_2
= be32_to_cpu(csk
->dst_ip
[1]);
2619 l4kwqe2
->dst_ip_v6_3
= be32_to_cpu(csk
->dst_ip
[2]);
2620 l4kwqe2
->dst_ip_v6_4
= be32_to_cpu(csk
->dst_ip
[3]);
2621 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct ipv6hdr
) -
2622 sizeof(struct tcphdr
);
2624 wqes
[1] = (struct kwqe
*) l4kwqe3
;
2625 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct iphdr
) -
2626 sizeof(struct tcphdr
);
2629 l4kwqe1
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT1
;
2631 (L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT
) |
2632 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT
;
2633 l4kwqe1
->cid
= csk
->cid
;
2634 l4kwqe1
->pg_cid
= csk
->pg_cid
;
2635 l4kwqe1
->src_ip
= be32_to_cpu(csk
->src_ip
[0]);
2636 l4kwqe1
->dst_ip
= be32_to_cpu(csk
->dst_ip
[0]);
2637 l4kwqe1
->src_port
= be16_to_cpu(csk
->src_port
);
2638 l4kwqe1
->dst_port
= be16_to_cpu(csk
->dst_port
);
2639 if (csk
->tcp_flags
& SK_TCP_NO_DELAY_ACK
)
2640 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
;
2641 if (csk
->tcp_flags
& SK_TCP_KEEP_ALIVE
)
2642 tcp_flags
|= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE
;
2643 if (csk
->tcp_flags
& SK_TCP_NAGLE
)
2644 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
;
2645 if (csk
->tcp_flags
& SK_TCP_TIMESTAMP
)
2646 tcp_flags
|= L4_KWQ_CONNECT_REQ1_TIME_STAMP
;
2647 if (csk
->tcp_flags
& SK_TCP_SACK
)
2648 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SACK
;
2649 if (csk
->tcp_flags
& SK_TCP_SEG_SCALING
)
2650 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SEG_SCALING
;
2652 l4kwqe1
->tcp_flags
= tcp_flags
;
2654 return dev
->submit_kwqes(dev
, wqes
, num_wqes
);
2657 static int cnic_cm_close_req(struct cnic_sock
*csk
)
2659 struct cnic_dev
*dev
= csk
->dev
;
2660 struct l4_kwq_close_req
*l4kwqe
;
2661 struct kwqe
*wqes
[1];
2663 l4kwqe
= (struct l4_kwq_close_req
*) &csk
->kwqe2
;
2664 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2665 wqes
[0] = (struct kwqe
*) l4kwqe
;
2667 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_CLOSE
;
2668 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT
;
2669 l4kwqe
->cid
= csk
->cid
;
2671 return dev
->submit_kwqes(dev
, wqes
, 1);
2674 static int cnic_cm_abort_req(struct cnic_sock
*csk
)
2676 struct cnic_dev
*dev
= csk
->dev
;
2677 struct l4_kwq_reset_req
*l4kwqe
;
2678 struct kwqe
*wqes
[1];
2680 l4kwqe
= (struct l4_kwq_reset_req
*) &csk
->kwqe2
;
2681 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2682 wqes
[0] = (struct kwqe
*) l4kwqe
;
2684 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_RESET
;
2685 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT
;
2686 l4kwqe
->cid
= csk
->cid
;
2688 return dev
->submit_kwqes(dev
, wqes
, 1);
2691 static int cnic_cm_create(struct cnic_dev
*dev
, int ulp_type
, u32 cid
,
2692 u32 l5_cid
, struct cnic_sock
**csk
, void *context
)
2694 struct cnic_local
*cp
= dev
->cnic_priv
;
2695 struct cnic_sock
*csk1
;
2697 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
2700 csk1
= &cp
->csk_tbl
[l5_cid
];
2701 if (atomic_read(&csk1
->ref_count
))
2704 if (test_and_set_bit(SK_F_INUSE
, &csk1
->flags
))
2709 csk1
->l5_cid
= l5_cid
;
2710 csk1
->ulp_type
= ulp_type
;
2711 csk1
->context
= context
;
2713 csk1
->ka_timeout
= DEF_KA_TIMEOUT
;
2714 csk1
->ka_interval
= DEF_KA_INTERVAL
;
2715 csk1
->ka_max_probe_count
= DEF_KA_MAX_PROBE_COUNT
;
2716 csk1
->tos
= DEF_TOS
;
2717 csk1
->ttl
= DEF_TTL
;
2718 csk1
->snd_seq_scale
= DEF_SND_SEQ_SCALE
;
2719 csk1
->rcv_buf
= DEF_RCV_BUF
;
2720 csk1
->snd_buf
= DEF_SND_BUF
;
2721 csk1
->seed
= DEF_SEED
;
2727 static void cnic_cm_cleanup(struct cnic_sock
*csk
)
2729 if (csk
->src_port
) {
2730 struct cnic_dev
*dev
= csk
->dev
;
2731 struct cnic_local
*cp
= dev
->cnic_priv
;
2733 cnic_free_id(&cp
->csk_port_tbl
, csk
->src_port
);
2738 static void cnic_close_conn(struct cnic_sock
*csk
)
2740 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
)) {
2741 cnic_cm_upload_pg(csk
);
2742 clear_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
2744 cnic_cm_cleanup(csk
);
2747 static int cnic_cm_destroy(struct cnic_sock
*csk
)
2749 if (!cnic_in_use(csk
))
2753 clear_bit(SK_F_INUSE
, &csk
->flags
);
2754 smp_mb__after_clear_bit();
2755 while (atomic_read(&csk
->ref_count
) != 1)
2757 cnic_cm_cleanup(csk
);
2764 static inline u16
cnic_get_vlan(struct net_device
*dev
,
2765 struct net_device
**vlan_dev
)
2767 if (dev
->priv_flags
& IFF_802_1Q_VLAN
) {
2768 *vlan_dev
= vlan_dev_real_dev(dev
);
2769 return vlan_dev_vlan_id(dev
);
2775 static int cnic_get_v4_route(struct sockaddr_in
*dst_addr
,
2776 struct dst_entry
**dst
)
2778 #if defined(CONFIG_INET)
2783 memset(&fl
, 0, sizeof(fl
));
2784 fl
.nl_u
.ip4_u
.daddr
= dst_addr
->sin_addr
.s_addr
;
2786 err
= ip_route_output_key(&init_net
, &rt
, &fl
);
2791 return -ENETUNREACH
;
2795 static int cnic_get_v6_route(struct sockaddr_in6
*dst_addr
,
2796 struct dst_entry
**dst
)
2798 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
2801 memset(&fl
, 0, sizeof(fl
));
2802 ipv6_addr_copy(&fl
.fl6_dst
, &dst_addr
->sin6_addr
);
2803 if (ipv6_addr_type(&fl
.fl6_dst
) & IPV6_ADDR_LINKLOCAL
)
2804 fl
.oif
= dst_addr
->sin6_scope_id
;
2806 *dst
= ip6_route_output(&init_net
, NULL
, &fl
);
2811 return -ENETUNREACH
;
2814 static struct cnic_dev
*cnic_cm_select_dev(struct sockaddr_in
*dst_addr
,
2817 struct cnic_dev
*dev
= NULL
;
2818 struct dst_entry
*dst
;
2819 struct net_device
*netdev
= NULL
;
2820 int err
= -ENETUNREACH
;
2822 if (dst_addr
->sin_family
== AF_INET
)
2823 err
= cnic_get_v4_route(dst_addr
, &dst
);
2824 else if (dst_addr
->sin_family
== AF_INET6
) {
2825 struct sockaddr_in6
*dst_addr6
=
2826 (struct sockaddr_in6
*) dst_addr
;
2828 err
= cnic_get_v6_route(dst_addr6
, &dst
);
2838 cnic_get_vlan(dst
->dev
, &netdev
);
2840 dev
= cnic_from_netdev(netdev
);
2849 static int cnic_resolve_addr(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
2851 struct cnic_dev
*dev
= csk
->dev
;
2852 struct cnic_local
*cp
= dev
->cnic_priv
;
2854 return cnic_send_nlmsg(cp
, ISCSI_KEVENT_PATH_REQ
, csk
);
2857 static int cnic_get_route(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
2859 struct cnic_dev
*dev
= csk
->dev
;
2860 struct cnic_local
*cp
= dev
->cnic_priv
;
2862 struct dst_entry
*dst
= NULL
;
2863 struct net_device
*realdev
;
2866 if (saddr
->local
.v6
.sin6_family
== AF_INET6
&&
2867 saddr
->remote
.v6
.sin6_family
== AF_INET6
)
2869 else if (saddr
->local
.v4
.sin_family
== AF_INET
&&
2870 saddr
->remote
.v4
.sin_family
== AF_INET
)
2875 clear_bit(SK_F_IPV6
, &csk
->flags
);
2878 set_bit(SK_F_IPV6
, &csk
->flags
);
2879 cnic_get_v6_route(&saddr
->remote
.v6
, &dst
);
2881 memcpy(&csk
->dst_ip
[0], &saddr
->remote
.v6
.sin6_addr
,
2882 sizeof(struct in6_addr
));
2883 csk
->dst_port
= saddr
->remote
.v6
.sin6_port
;
2884 local_port
= saddr
->local
.v6
.sin6_port
;
2887 cnic_get_v4_route(&saddr
->remote
.v4
, &dst
);
2889 csk
->dst_ip
[0] = saddr
->remote
.v4
.sin_addr
.s_addr
;
2890 csk
->dst_port
= saddr
->remote
.v4
.sin_port
;
2891 local_port
= saddr
->local
.v4
.sin_port
;
2895 csk
->mtu
= dev
->netdev
->mtu
;
2896 if (dst
&& dst
->dev
) {
2897 u16 vlan
= cnic_get_vlan(dst
->dev
, &realdev
);
2898 if (realdev
== dev
->netdev
) {
2899 csk
->vlan_id
= vlan
;
2900 csk
->mtu
= dst_mtu(dst
);
2904 if (local_port
>= CNIC_LOCAL_PORT_MIN
&&
2905 local_port
< CNIC_LOCAL_PORT_MAX
) {
2906 if (cnic_alloc_id(&cp
->csk_port_tbl
, local_port
))
2912 local_port
= cnic_alloc_new_id(&cp
->csk_port_tbl
);
2913 if (local_port
== -1) {
2918 csk
->src_port
= local_port
;
2925 static void cnic_init_csk_state(struct cnic_sock
*csk
)
2928 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
2929 clear_bit(SK_F_CLOSING
, &csk
->flags
);
2932 static int cnic_cm_connect(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
2936 if (!cnic_in_use(csk
))
2939 if (test_and_set_bit(SK_F_CONNECT_START
, &csk
->flags
))
2942 cnic_init_csk_state(csk
);
2944 err
= cnic_get_route(csk
, saddr
);
2948 err
= cnic_resolve_addr(csk
, saddr
);
2953 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
2957 static int cnic_cm_abort(struct cnic_sock
*csk
)
2959 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
2962 if (!cnic_in_use(csk
))
2965 if (cnic_abort_prep(csk
))
2966 return cnic_cm_abort_req(csk
);
2968 /* Getting here means that we haven't started connect, or
2969 * connect was not successful.
2972 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
2973 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
2974 opcode
= csk
->state
;
2976 opcode
= L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
;
2977 cp
->close_conn(csk
, opcode
);
2982 static int cnic_cm_close(struct cnic_sock
*csk
)
2984 if (!cnic_in_use(csk
))
2987 if (cnic_close_prep(csk
)) {
2988 csk
->state
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
2989 return cnic_cm_close_req(csk
);
2994 static void cnic_cm_upcall(struct cnic_local
*cp
, struct cnic_sock
*csk
,
2997 struct cnic_ulp_ops
*ulp_ops
;
2998 int ulp_type
= csk
->ulp_type
;
3001 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
3003 if (opcode
== L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
)
3004 ulp_ops
->cm_connect_complete(csk
);
3005 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
3006 ulp_ops
->cm_close_complete(csk
);
3007 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
)
3008 ulp_ops
->cm_remote_abort(csk
);
3009 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_COMP
)
3010 ulp_ops
->cm_abort_complete(csk
);
3011 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
)
3012 ulp_ops
->cm_remote_close(csk
);
3017 static int cnic_cm_set_pg(struct cnic_sock
*csk
)
3019 if (cnic_offld_prep(csk
)) {
3020 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3021 cnic_cm_update_pg(csk
);
3023 cnic_cm_offload_pg(csk
);
3028 static void cnic_cm_process_offld_pg(struct cnic_dev
*dev
, struct l4_kcq
*kcqe
)
3030 struct cnic_local
*cp
= dev
->cnic_priv
;
3031 u32 l5_cid
= kcqe
->pg_host_opaque
;
3032 u8 opcode
= kcqe
->op_code
;
3033 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
3036 if (!cnic_in_use(csk
))
3039 if (opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3040 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3043 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3044 if (kcqe
->status
== L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL
) {
3045 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3046 cnic_cm_upcall(cp
, csk
,
3047 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
3051 csk
->pg_cid
= kcqe
->pg_cid
;
3052 set_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
3053 cnic_cm_conn_req(csk
);
3059 static void cnic_cm_process_kcqe(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
3061 struct cnic_local
*cp
= dev
->cnic_priv
;
3062 struct l4_kcq
*l4kcqe
= (struct l4_kcq
*) kcqe
;
3063 u8 opcode
= l4kcqe
->op_code
;
3065 struct cnic_sock
*csk
;
3067 if (opcode
== L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
||
3068 opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3069 cnic_cm_process_offld_pg(dev
, l4kcqe
);
3073 l5_cid
= l4kcqe
->conn_id
;
3075 l5_cid
= l4kcqe
->cid
;
3076 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
3079 csk
= &cp
->csk_tbl
[l5_cid
];
3082 if (!cnic_in_use(csk
)) {
3088 case L5CM_RAMROD_CMD_ID_TCP_CONNECT
:
3089 if (l4kcqe
->status
!= 0) {
3090 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3091 cnic_cm_upcall(cp
, csk
,
3092 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
3095 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
:
3096 if (l4kcqe
->status
== 0)
3097 set_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
);
3099 smp_mb__before_clear_bit();
3100 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3101 cnic_cm_upcall(cp
, csk
, opcode
);
3104 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
3105 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
3106 cnic_cm_upcall(cp
, csk
, opcode
);
3108 } else if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
))
3109 csk
->state
= opcode
;
3111 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
3112 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
3113 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
3114 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
3115 cp
->close_conn(csk
, opcode
);
3118 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
:
3119 cnic_cm_upcall(cp
, csk
, opcode
);
3125 static void cnic_cm_indicate_kcqe(void *data
, struct kcqe
*kcqe
[], u32 num
)
3127 struct cnic_dev
*dev
= data
;
3130 for (i
= 0; i
< num
; i
++)
3131 cnic_cm_process_kcqe(dev
, kcqe
[i
]);
3134 static struct cnic_ulp_ops cm_ulp_ops
= {
3135 .indicate_kcqes
= cnic_cm_indicate_kcqe
,
3138 static void cnic_cm_free_mem(struct cnic_dev
*dev
)
3140 struct cnic_local
*cp
= dev
->cnic_priv
;
3144 cnic_free_id_tbl(&cp
->csk_port_tbl
);
3147 static int cnic_cm_alloc_mem(struct cnic_dev
*dev
)
3149 struct cnic_local
*cp
= dev
->cnic_priv
;
3151 cp
->csk_tbl
= kzalloc(sizeof(struct cnic_sock
) * MAX_CM_SK_TBL_SZ
,
3156 if (cnic_init_id_tbl(&cp
->csk_port_tbl
, CNIC_LOCAL_PORT_RANGE
,
3157 CNIC_LOCAL_PORT_MIN
)) {
3158 cnic_cm_free_mem(dev
);
3164 static int cnic_ready_to_close(struct cnic_sock
*csk
, u32 opcode
)
3166 if ((opcode
== csk
->state
) ||
3167 (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
&&
3168 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)) {
3169 if (!test_and_set_bit(SK_F_CLOSING
, &csk
->flags
))
3172 /* 57710+ only workaround to handle unsolicited RESET_COMP
3173 * which will be treated like a RESET RCVD notification
3174 * which triggers the clean up procedure
3176 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_COMP
) {
3177 if (!test_and_set_bit(SK_F_CLOSING
, &csk
->flags
)) {
3178 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
;
3185 static void cnic_close_bnx2_conn(struct cnic_sock
*csk
, u32 opcode
)
3187 struct cnic_dev
*dev
= csk
->dev
;
3188 struct cnic_local
*cp
= dev
->cnic_priv
;
3190 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
3191 cnic_close_conn(csk
);
3192 cnic_cm_upcall(cp
, csk
, opcode
);
3195 static void cnic_cm_stop_bnx2_hw(struct cnic_dev
*dev
)
3199 static int cnic_cm_init_bnx2_hw(struct cnic_dev
*dev
)
3203 get_random_bytes(&seed
, 4);
3204 cnic_ctx_wr(dev
, 45, 0, seed
);
3208 static void cnic_close_bnx2x_conn(struct cnic_sock
*csk
, u32 opcode
)
3210 struct cnic_dev
*dev
= csk
->dev
;
3211 struct cnic_local
*cp
= dev
->cnic_priv
;
3212 struct cnic_context
*ctx
= &cp
->ctx_tbl
[csk
->l5_cid
];
3213 union l5cm_specific_data l5_data
;
3215 int close_complete
= 0;
3218 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
3219 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
3220 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
3221 if (cnic_ready_to_close(csk
, opcode
))
3222 cmd
= L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
;
3224 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
3225 cmd
= L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
;
3227 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
3232 memset(&l5_data
, 0, sizeof(l5_data
));
3234 cnic_submit_kwqe_16(dev
, cmd
, csk
->cid
, ISCSI_CONNECTION_TYPE
,
3236 } else if (close_complete
) {
3237 ctx
->timestamp
= jiffies
;
3238 cnic_close_conn(csk
);
3239 cnic_cm_upcall(cp
, csk
, csk
->state
);
3243 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev
*dev
)
3247 static int cnic_cm_init_bnx2x_hw(struct cnic_dev
*dev
)
3249 struct cnic_local
*cp
= dev
->cnic_priv
;
3250 int func
= CNIC_FUNC(cp
);
3252 cnic_init_bnx2x_mac(dev
);
3253 cnic_bnx2x_set_tcp_timestamp(dev
, 1);
3255 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
3256 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func
), 0);
3258 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3259 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func
), 1);
3260 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3261 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func
),
3264 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3265 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func
), DEF_TTL
);
3266 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3267 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func
), DEF_TOS
);
3268 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3269 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func
), 2);
3270 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3271 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func
), DEF_SWS_TIMER
);
3273 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_TCP_MAX_CWND_OFFSET(func
),
3278 static int cnic_cm_open(struct cnic_dev
*dev
)
3280 struct cnic_local
*cp
= dev
->cnic_priv
;
3283 err
= cnic_cm_alloc_mem(dev
);
3287 err
= cp
->start_cm(dev
);
3292 dev
->cm_create
= cnic_cm_create
;
3293 dev
->cm_destroy
= cnic_cm_destroy
;
3294 dev
->cm_connect
= cnic_cm_connect
;
3295 dev
->cm_abort
= cnic_cm_abort
;
3296 dev
->cm_close
= cnic_cm_close
;
3297 dev
->cm_select_dev
= cnic_cm_select_dev
;
3299 cp
->ulp_handle
[CNIC_ULP_L4
] = dev
;
3300 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], &cm_ulp_ops
);
3304 cnic_cm_free_mem(dev
);
3308 static int cnic_cm_shutdown(struct cnic_dev
*dev
)
3310 struct cnic_local
*cp
= dev
->cnic_priv
;
3318 for (i
= 0; i
< MAX_CM_SK_TBL_SZ
; i
++) {
3319 struct cnic_sock
*csk
= &cp
->csk_tbl
[i
];
3321 clear_bit(SK_F_INUSE
, &csk
->flags
);
3322 cnic_cm_cleanup(csk
);
3324 cnic_cm_free_mem(dev
);
3329 static void cnic_init_context(struct cnic_dev
*dev
, u32 cid
)
3331 struct cnic_local
*cp
= dev
->cnic_priv
;
3335 if (CHIP_NUM(cp
) == CHIP_NUM_5709
)
3338 cid_addr
= GET_CID_ADDR(cid
);
3340 for (i
= 0; i
< CTX_SIZE
; i
+= 4)
3341 cnic_ctx_wr(dev
, cid_addr
, i
, 0);
3344 static int cnic_setup_5709_context(struct cnic_dev
*dev
, int valid
)
3346 struct cnic_local
*cp
= dev
->cnic_priv
;
3348 u32 valid_bit
= valid
? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
: 0;
3350 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
3353 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
3355 u32 idx
= cp
->ctx_arr
[i
].cid
/ cp
->cids_per_blk
;
3358 memset(cp
->ctx_arr
[i
].ctx
, 0, BCM_PAGE_SIZE
);
3360 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
3361 (cp
->ctx_arr
[i
].mapping
& 0xffffffff) | valid_bit
);
3362 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
3363 (u64
) cp
->ctx_arr
[i
].mapping
>> 32);
3364 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, idx
|
3365 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
3366 for (j
= 0; j
< 10; j
++) {
3368 val
= CNIC_RD(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
3369 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
3373 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
3381 static void cnic_free_irq(struct cnic_dev
*dev
)
3383 struct cnic_local
*cp
= dev
->cnic_priv
;
3384 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3386 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3387 cp
->disable_int_sync(dev
);
3388 tasklet_disable(&cp
->cnic_irq_task
);
3389 free_irq(ethdev
->irq_arr
[0].vector
, dev
);
3393 static int cnic_init_bnx2_irq(struct cnic_dev
*dev
)
3395 struct cnic_local
*cp
= dev
->cnic_priv
;
3396 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3398 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3400 int sblk_num
= cp
->status_blk_num
;
3401 u32 base
= ((sblk_num
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
3402 BNX2_HC_SB_CONFIG_1
;
3404 CNIC_WR(dev
, base
, BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
3406 CNIC_WR(dev
, base
+ BNX2_HC_COMP_PROD_TRIP_OFF
, (2 << 16) | 8);
3407 CNIC_WR(dev
, base
+ BNX2_HC_COM_TICKS_OFF
, (64 << 16) | 220);
3408 CNIC_WR(dev
, base
+ BNX2_HC_CMD_TICKS_OFF
, (64 << 16) | 220);
3410 cp
->last_status_idx
= cp
->status_blk
.bnx2
->status_idx
;
3411 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2_msix
,
3412 (unsigned long) dev
);
3413 err
= request_irq(ethdev
->irq_arr
[0].vector
, cnic_irq
, 0,
3416 tasklet_disable(&cp
->cnic_irq_task
);
3419 while (cp
->status_blk
.bnx2
->status_completion_producer_index
&&
3421 CNIC_WR(dev
, BNX2_HC_COALESCE_NOW
,
3422 1 << (11 + sblk_num
));
3427 if (cp
->status_blk
.bnx2
->status_completion_producer_index
) {
3433 struct status_block
*sblk
= cp
->status_blk
.gen
;
3434 u32 hc_cmd
= CNIC_RD(dev
, BNX2_HC_COMMAND
);
3437 while (sblk
->status_completion_producer_index
&& i
< 10) {
3438 CNIC_WR(dev
, BNX2_HC_COMMAND
,
3439 hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
3444 if (sblk
->status_completion_producer_index
)
3451 netdev_err(dev
->netdev
, "KCQ index not resetting to 0\n");
3455 static void cnic_enable_bnx2_int(struct cnic_dev
*dev
)
3457 struct cnic_local
*cp
= dev
->cnic_priv
;
3458 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3460 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
3463 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
3464 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
3467 static void cnic_disable_bnx2_int_sync(struct cnic_dev
*dev
)
3469 struct cnic_local
*cp
= dev
->cnic_priv
;
3470 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3472 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
3475 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
3476 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3477 CNIC_RD(dev
, BNX2_PCICFG_INT_ACK_CMD
);
3478 synchronize_irq(ethdev
->irq_arr
[0].vector
);
3481 static void cnic_init_bnx2_tx_ring(struct cnic_dev
*dev
)
3483 struct cnic_local
*cp
= dev
->cnic_priv
;
3484 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3485 u32 cid_addr
, tx_cid
, sb_id
;
3486 u32 val
, offset0
, offset1
, offset2
, offset3
;
3490 struct status_block
*s_blk
= cp
->status_blk
.gen
;
3492 sb_id
= cp
->status_blk_num
;
3494 cnic_init_context(dev
, tx_cid
);
3495 cnic_init_context(dev
, tx_cid
+ 1);
3496 cp
->tx_cons_ptr
= &s_blk
->status_tx_quick_consumer_index2
;
3497 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3498 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
3500 tx_cid
= TX_TSS_CID
+ sb_id
- 1;
3501 cnic_init_context(dev
, tx_cid
);
3502 CNIC_WR(dev
, BNX2_TSCH_TSS_CFG
, (sb_id
<< 24) |
3504 cp
->tx_cons_ptr
= &sblk
->status_tx_quick_consumer_index
;
3506 cp
->tx_cons
= *cp
->tx_cons_ptr
;
3508 cid_addr
= GET_CID_ADDR(tx_cid
);
3509 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
3510 u32 cid_addr2
= GET_CID_ADDR(tx_cid
+ 4) + 0x40;
3512 for (i
= 0; i
< PHY_CTX_SIZE
; i
+= 4)
3513 cnic_ctx_wr(dev
, cid_addr2
, i
, 0);
3515 offset0
= BNX2_L2CTX_TYPE_XI
;
3516 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
3517 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
3518 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
3520 offset0
= BNX2_L2CTX_TYPE
;
3521 offset1
= BNX2_L2CTX_CMD_TYPE
;
3522 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
3523 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
3525 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
3526 cnic_ctx_wr(dev
, cid_addr
, offset0
, val
);
3528 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
3529 cnic_ctx_wr(dev
, cid_addr
, offset1
, val
);
3531 txbd
= (struct tx_bd
*) cp
->l2_ring
;
3533 buf_map
= cp
->l2_buf_map
;
3534 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
++, txbd
++) {
3535 txbd
->tx_bd_haddr_hi
= (u64
) buf_map
>> 32;
3536 txbd
->tx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
3538 val
= (u64
) cp
->l2_ring_map
>> 32;
3539 cnic_ctx_wr(dev
, cid_addr
, offset2
, val
);
3540 txbd
->tx_bd_haddr_hi
= val
;
3542 val
= (u64
) cp
->l2_ring_map
& 0xffffffff;
3543 cnic_ctx_wr(dev
, cid_addr
, offset3
, val
);
3544 txbd
->tx_bd_haddr_lo
= val
;
3547 static void cnic_init_bnx2_rx_ring(struct cnic_dev
*dev
)
3549 struct cnic_local
*cp
= dev
->cnic_priv
;
3550 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3551 u32 cid_addr
, sb_id
, val
, coal_reg
, coal_val
;
3554 struct status_block
*s_blk
= cp
->status_blk
.gen
;
3556 sb_id
= cp
->status_blk_num
;
3557 cnic_init_context(dev
, 2);
3558 cp
->rx_cons_ptr
= &s_blk
->status_rx_quick_consumer_index2
;
3559 coal_reg
= BNX2_HC_COMMAND
;
3560 coal_val
= CNIC_RD(dev
, coal_reg
);
3561 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3562 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
3564 cp
->rx_cons_ptr
= &sblk
->status_rx_quick_consumer_index
;
3565 coal_reg
= BNX2_HC_COALESCE_NOW
;
3566 coal_val
= 1 << (11 + sb_id
);
3569 while (!(*cp
->rx_cons_ptr
!= 0) && i
< 10) {
3570 CNIC_WR(dev
, coal_reg
, coal_val
);
3575 cp
->rx_cons
= *cp
->rx_cons_ptr
;
3577 cid_addr
= GET_CID_ADDR(2);
3578 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
|
3579 BNX2_L2CTX_CTX_TYPE_SIZE_L2
| (0x02 << 8);
3580 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
3583 val
= 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT
;
3585 val
= BNX2_L2CTX_L2_STATUSB_NUM(sb_id
);
3586 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_HOST_BDIDX
, val
);
3588 rxbd
= (struct rx_bd
*) (cp
->l2_ring
+ BCM_PAGE_SIZE
);
3589 for (i
= 0; i
< MAX_RX_DESC_CNT
; i
++, rxbd
++) {
3591 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
3593 buf_map
= cp
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
3594 rxbd
->rx_bd_len
= cp
->l2_single_buf_size
;
3595 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
3596 rxbd
->rx_bd_haddr_hi
= (u64
) buf_map
>> 32;
3597 rxbd
->rx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
3599 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) >> 32;
3600 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
3601 rxbd
->rx_bd_haddr_hi
= val
;
3603 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
3604 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
3605 rxbd
->rx_bd_haddr_lo
= val
;
3607 val
= cnic_reg_rd_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
);
3608 cnic_reg_wr_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
, val
| (1 << 2));
3611 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev
*dev
)
3613 struct kwqe
*wqes
[1], l2kwqe
;
3615 memset(&l2kwqe
, 0, sizeof(l2kwqe
));
3617 l2kwqe
.kwqe_op_flag
= (L2_LAYER_CODE
<< KWQE_FLAGS_LAYER_SHIFT
) |
3618 (L2_KWQE_OPCODE_VALUE_FLUSH
<<
3619 KWQE_OPCODE_SHIFT
) | 2;
3620 dev
->submit_kwqes(dev
, wqes
, 1);
3623 static void cnic_set_bnx2_mac(struct cnic_dev
*dev
)
3625 struct cnic_local
*cp
= dev
->cnic_priv
;
3628 val
= cp
->func
<< 2;
3630 cp
->shmem_base
= cnic_reg_rd_ind(dev
, BNX2_SHM_HDR_ADDR_0
+ val
);
3632 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
3633 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER
);
3634 dev
->mac_addr
[0] = (u8
) (val
>> 8);
3635 dev
->mac_addr
[1] = (u8
) val
;
3637 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH4
, val
);
3639 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
3640 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER
);
3641 dev
->mac_addr
[2] = (u8
) (val
>> 24);
3642 dev
->mac_addr
[3] = (u8
) (val
>> 16);
3643 dev
->mac_addr
[4] = (u8
) (val
>> 8);
3644 dev
->mac_addr
[5] = (u8
) val
;
3646 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH5
, val
);
3648 val
= 4 | BNX2_RPM_SORT_USER2_BC_EN
;
3649 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
3650 val
|= BNX2_RPM_SORT_USER2_PROM_VLAN
;
3652 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, 0x0);
3653 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
);
3654 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
| BNX2_RPM_SORT_USER2_ENA
);
3657 static int cnic_start_bnx2_hw(struct cnic_dev
*dev
)
3659 struct cnic_local
*cp
= dev
->cnic_priv
;
3660 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3661 struct status_block
*sblk
= cp
->status_blk
.gen
;
3665 cnic_set_bnx2_mac(dev
);
3667 val
= CNIC_RD(dev
, BNX2_MQ_CONFIG
);
3668 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
3669 if (BCM_PAGE_BITS
> 12)
3670 val
|= (12 - 8) << 4;
3672 val
|= (BCM_PAGE_BITS
- 8) << 4;
3674 CNIC_WR(dev
, BNX2_MQ_CONFIG
, val
);
3676 CNIC_WR(dev
, BNX2_HC_COMP_PROD_TRIP
, (2 << 16) | 8);
3677 CNIC_WR(dev
, BNX2_HC_COM_TICKS
, (64 << 16) | 220);
3678 CNIC_WR(dev
, BNX2_HC_CMD_TICKS
, (64 << 16) | 220);
3680 err
= cnic_setup_5709_context(dev
, 1);
3684 cnic_init_context(dev
, KWQ_CID
);
3685 cnic_init_context(dev
, KCQ_CID
);
3687 cp
->kwq_cid_addr
= GET_CID_ADDR(KWQ_CID
);
3688 cp
->kwq_io_addr
= MB_GET_CID_ADDR(KWQ_CID
) + L5_KRNLQ_HOST_QIDX
;
3690 cp
->max_kwq_idx
= MAX_KWQ_IDX
;
3691 cp
->kwq_prod_idx
= 0;
3692 cp
->kwq_con_idx
= 0;
3693 set_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
3695 if (CHIP_NUM(cp
) == CHIP_NUM_5706
|| CHIP_NUM(cp
) == CHIP_NUM_5708
)
3696 cp
->kwq_con_idx_ptr
= &sblk
->status_rx_quick_consumer_index15
;
3698 cp
->kwq_con_idx_ptr
= &sblk
->status_cmd_consumer_index
;
3700 /* Initialize the kernel work queue context. */
3701 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
3702 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
3703 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_TYPE
, val
);
3705 val
= (BCM_PAGE_SIZE
/ sizeof(struct kwqe
) - 1) << 16;
3706 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
3708 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kwqe
)) << 16) | KWQ_PAGE_CNT
;
3709 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
3711 val
= (u32
) ((u64
) cp
->kwq_info
.pgtbl_map
>> 32);
3712 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
3714 val
= (u32
) cp
->kwq_info
.pgtbl_map
;
3715 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
3717 cp
->kcq_cid_addr
= GET_CID_ADDR(KCQ_CID
);
3718 cp
->kcq_io_addr
= MB_GET_CID_ADDR(KCQ_CID
) + L5_KRNLQ_HOST_QIDX
;
3720 cp
->kcq_prod_idx
= 0;
3722 /* Initialize the kernel complete queue context. */
3723 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
3724 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
3725 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_TYPE
, val
);
3727 val
= (BCM_PAGE_SIZE
/ sizeof(struct kcqe
) - 1) << 16;
3728 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
3730 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kcqe
)) << 16) | KCQ_PAGE_CNT
;
3731 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
3733 val
= (u32
) ((u64
) cp
->kcq_info
.pgtbl_map
>> 32);
3734 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
3736 val
= (u32
) cp
->kcq_info
.pgtbl_map
;
3737 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
3740 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3741 u32 sb_id
= cp
->status_blk_num
;
3742 u32 sb
= BNX2_L2CTX_L5_STATUSB_NUM(sb_id
);
3744 cp
->int_num
= sb_id
<< BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT
;
3745 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
3746 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
3749 /* Enable Commnad Scheduler notification when we write to the
3750 * host producer index of the kernel contexts. */
3751 CNIC_WR(dev
, BNX2_MQ_KNL_CMD_MASK1
, 2);
3753 /* Enable Command Scheduler notification when we write to either
3754 * the Send Queue or Receive Queue producer indexes of the kernel
3755 * bypass contexts. */
3756 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_CMD_MASK1
, 7);
3757 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_WRITE_MASK1
, 7);
3759 /* Notify COM when the driver post an application buffer. */
3760 CNIC_WR(dev
, BNX2_MQ_KNL_RX_V2P_MASK2
, 0x2000);
3762 /* Set the CP and COM doorbells. These two processors polls the
3763 * doorbell for a non zero value before running. This must be done
3764 * after setting up the kernel queue contexts. */
3765 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 1);
3766 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 1);
3768 cnic_init_bnx2_tx_ring(dev
);
3769 cnic_init_bnx2_rx_ring(dev
);
3771 err
= cnic_init_bnx2_irq(dev
);
3773 netdev_err(dev
->netdev
, "cnic_init_irq failed\n");
3774 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
3775 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
3782 static void cnic_setup_bnx2x_context(struct cnic_dev
*dev
)
3784 struct cnic_local
*cp
= dev
->cnic_priv
;
3785 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3786 u32 start_offset
= ethdev
->ctx_tbl_offset
;
3789 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
3790 struct cnic_ctx
*ctx
= &cp
->ctx_arr
[i
];
3791 dma_addr_t map
= ctx
->mapping
;
3793 if (cp
->ctx_align
) {
3794 unsigned long mask
= cp
->ctx_align
- 1;
3796 map
= (map
+ mask
) & ~mask
;
3799 cnic_ctx_tbl_wr(dev
, start_offset
+ i
, map
);
3803 static int cnic_init_bnx2x_irq(struct cnic_dev
*dev
)
3805 struct cnic_local
*cp
= dev
->cnic_priv
;
3806 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3809 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2x_bh
,
3810 (unsigned long) dev
);
3811 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3812 err
= request_irq(ethdev
->irq_arr
[0].vector
, cnic_irq
, 0,
3815 tasklet_disable(&cp
->cnic_irq_task
);
3820 static void cnic_enable_bnx2x_int(struct cnic_dev
*dev
)
3822 struct cnic_local
*cp
= dev
->cnic_priv
;
3823 u8 sb_id
= cp
->status_blk_num
;
3824 int port
= CNIC_PORT(cp
);
3826 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
3827 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port
, sb_id
,
3828 HC_INDEX_C_ISCSI_EQ_CONS
),
3830 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
3831 CSTORM_SB_HC_DISABLE_C_OFFSET(port
, sb_id
,
3832 HC_INDEX_C_ISCSI_EQ_CONS
), 0);
3835 static void cnic_disable_bnx2x_int_sync(struct cnic_dev
*dev
)
3839 static void cnic_init_bnx2x_tx_ring(struct cnic_dev
*dev
)
3841 struct cnic_local
*cp
= dev
->cnic_priv
;
3842 union eth_tx_bd_types
*txbd
= (union eth_tx_bd_types
*) cp
->l2_ring
;
3843 struct eth_context
*context
;
3844 struct regpair context_addr
;
3846 int func
= CNIC_FUNC(cp
);
3847 int port
= CNIC_PORT(cp
);
3849 int cli
= BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp
));
3852 memset(txbd
, 0, BCM_PAGE_SIZE
);
3854 buf_map
= cp
->l2_buf_map
;
3855 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
+= 3, txbd
+= 3) {
3856 struct eth_tx_start_bd
*start_bd
= &txbd
->start_bd
;
3857 struct eth_tx_bd
*reg_bd
= &((txbd
+ 2)->reg_bd
);
3859 start_bd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
3860 start_bd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
3861 reg_bd
->addr_hi
= start_bd
->addr_hi
;
3862 reg_bd
->addr_lo
= start_bd
->addr_lo
+ 0x10;
3863 start_bd
->nbytes
= cpu_to_le16(0x10);
3864 start_bd
->nbd
= cpu_to_le16(3);
3865 start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
3866 start_bd
->general_data
= (UNICAST_ADDRESS
<<
3867 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT
);
3868 start_bd
->general_data
|= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
);
3871 context
= cnic_get_bnx2x_ctx(dev
, BNX2X_ISCSI_L2_CID
, 1, &context_addr
);
3873 val
= (u64
) cp
->l2_ring_map
>> 32;
3874 txbd
->next_bd
.addr_hi
= cpu_to_le32(val
);
3876 context
->xstorm_st_context
.tx_bd_page_base_hi
= val
;
3878 val
= (u64
) cp
->l2_ring_map
& 0xffffffff;
3879 txbd
->next_bd
.addr_lo
= cpu_to_le32(val
);
3881 context
->xstorm_st_context
.tx_bd_page_base_lo
= val
;
3883 context
->cstorm_st_context
.sb_index_number
=
3884 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS
;
3885 context
->cstorm_st_context
.status_block_id
= BNX2X_DEF_SB_ID
;
3887 context
->xstorm_st_context
.statistics_data
= (cli
|
3888 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE
);
3890 context
->xstorm_ag_context
.cdu_reserved
=
3891 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID
, func
),
3892 CDU_REGION_NUMBER_XCM_AG
,
3893 ETH_CONNECTION_TYPE
);
3895 /* reset xstorm per client statistics */
3896 val
= BAR_XSTRORM_INTMEM
+
3897 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
3898 for (i
= 0; i
< sizeof(struct xstorm_per_client_stats
) / 4; i
++)
3899 CNIC_WR(dev
, val
+ i
* 4, 0);
3902 &cp
->bnx2x_def_status_blk
->c_def_status_block
.index_values
[
3903 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS
];
3906 static void cnic_init_bnx2x_rx_ring(struct cnic_dev
*dev
)
3908 struct cnic_local
*cp
= dev
->cnic_priv
;
3909 struct eth_rx_bd
*rxbd
= (struct eth_rx_bd
*) (cp
->l2_ring
+
3911 struct eth_rx_cqe_next_page
*rxcqe
= (struct eth_rx_cqe_next_page
*)
3912 (cp
->l2_ring
+ (2 * BCM_PAGE_SIZE
));
3913 struct eth_context
*context
;
3914 struct regpair context_addr
;
3916 int port
= CNIC_PORT(cp
);
3917 int func
= CNIC_FUNC(cp
);
3918 int cli
= BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp
));
3920 struct tstorm_eth_client_config tstorm_client
= {0};
3922 for (i
= 0; i
< BNX2X_MAX_RX_DESC_CNT
; i
++, rxbd
++) {
3924 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
3926 buf_map
= cp
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
3927 rxbd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
3928 rxbd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
3930 context
= cnic_get_bnx2x_ctx(dev
, BNX2X_ISCSI_L2_CID
, 0, &context_addr
);
3932 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) >> 32;
3933 rxbd
->addr_hi
= cpu_to_le32(val
);
3935 context
->ustorm_st_context
.common
.bd_page_base_hi
= val
;
3937 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
3938 rxbd
->addr_lo
= cpu_to_le32(val
);
3940 context
->ustorm_st_context
.common
.bd_page_base_lo
= val
;
3942 context
->ustorm_st_context
.common
.sb_index_numbers
=
3943 BNX2X_ISCSI_RX_SB_INDEX_NUM
;
3944 context
->ustorm_st_context
.common
.clientId
= cli
;
3945 context
->ustorm_st_context
.common
.status_block_id
= BNX2X_DEF_SB_ID
;
3946 context
->ustorm_st_context
.common
.flags
=
3947 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS
;
3948 context
->ustorm_st_context
.common
.statistics_counter_id
= cli
;
3949 context
->ustorm_st_context
.common
.mc_alignment_log_size
= 0;
3950 context
->ustorm_st_context
.common
.bd_buff_size
=
3951 cp
->l2_single_buf_size
;
3953 context
->ustorm_ag_context
.cdu_usage
=
3954 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID
, func
),
3955 CDU_REGION_NUMBER_UCM_AG
,
3956 ETH_CONNECTION_TYPE
);
3958 rxcqe
+= BNX2X_MAX_RCQ_DESC_CNT
;
3959 val
= (u64
) (cp
->l2_ring_map
+ (2 * BCM_PAGE_SIZE
)) >> 32;
3960 rxcqe
->addr_hi
= cpu_to_le32(val
);
3962 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
3963 USTORM_CQE_PAGE_BASE_OFFSET(port
, cli
) + 4, val
);
3965 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
3966 USTORM_CQE_PAGE_NEXT_OFFSET(port
, cli
) + 4, val
);
3968 val
= (u64
) (cp
->l2_ring_map
+ (2 * BCM_PAGE_SIZE
)) & 0xffffffff;
3969 rxcqe
->addr_lo
= cpu_to_le32(val
);
3971 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
3972 USTORM_CQE_PAGE_BASE_OFFSET(port
, cli
), val
);
3974 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
3975 USTORM_CQE_PAGE_NEXT_OFFSET(port
, cli
), val
);
3977 /* client tstorm info */
3978 tstorm_client
.mtu
= cp
->l2_single_buf_size
- 14;
3979 tstorm_client
.config_flags
=
3980 (TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE
|
3981 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
);
3982 tstorm_client
.statistics_counter_id
= cli
;
3984 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
3985 TSTORM_CLIENT_CONFIG_OFFSET(port
, cli
),
3986 ((u32
*)&tstorm_client
)[0]);
3987 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
3988 TSTORM_CLIENT_CONFIG_OFFSET(port
, cli
) + 4,
3989 ((u32
*)&tstorm_client
)[1]);
3991 /* reset tstorm per client statistics */
3992 val
= BAR_TSTRORM_INTMEM
+
3993 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
3994 for (i
= 0; i
< sizeof(struct tstorm_per_client_stats
) / 4; i
++)
3995 CNIC_WR(dev
, val
+ i
* 4, 0);
3997 /* reset ustorm per client statistics */
3998 val
= BAR_USTRORM_INTMEM
+
3999 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
4000 for (i
= 0; i
< sizeof(struct ustorm_per_client_stats
) / 4; i
++)
4001 CNIC_WR(dev
, val
+ i
* 4, 0);
4004 &cp
->bnx2x_def_status_blk
->u_def_status_block
.index_values
[
4005 HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS
];
4008 static void cnic_get_bnx2x_iscsi_info(struct cnic_dev
*dev
)
4010 struct cnic_local
*cp
= dev
->cnic_priv
;
4011 u32 base
, addr
, val
;
4012 int port
= CNIC_PORT(cp
);
4014 dev
->max_iscsi_conn
= 0;
4015 base
= CNIC_RD(dev
, MISC_REG_SHARED_MEM_ADDR
);
4016 if (base
< 0xa0000 || base
>= 0xc0000)
4019 addr
= BNX2X_SHMEM_ADDR(base
,
4020 dev_info
.port_hw_config
[port
].iscsi_mac_upper
);
4022 val
= CNIC_RD(dev
, addr
);
4024 dev
->mac_addr
[0] = (u8
) (val
>> 8);
4025 dev
->mac_addr
[1] = (u8
) val
;
4027 addr
= BNX2X_SHMEM_ADDR(base
,
4028 dev_info
.port_hw_config
[port
].iscsi_mac_lower
);
4030 val
= CNIC_RD(dev
, addr
);
4032 dev
->mac_addr
[2] = (u8
) (val
>> 24);
4033 dev
->mac_addr
[3] = (u8
) (val
>> 16);
4034 dev
->mac_addr
[4] = (u8
) (val
>> 8);
4035 dev
->mac_addr
[5] = (u8
) val
;
4037 addr
= BNX2X_SHMEM_ADDR(base
, validity_map
[port
]);
4038 val
= CNIC_RD(dev
, addr
);
4040 if (!(val
& SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT
)) {
4043 addr
= BNX2X_SHMEM_ADDR(base
,
4044 drv_lic_key
[port
].max_iscsi_init_conn
);
4045 val16
= CNIC_RD16(dev
, addr
);
4049 dev
->max_iscsi_conn
= val16
;
4051 if (BNX2X_CHIP_IS_E1H(cp
->chip_id
)) {
4052 int func
= CNIC_FUNC(cp
);
4054 addr
= BNX2X_SHMEM_ADDR(base
,
4055 mf_cfg
.func_mf_config
[func
].e1hov_tag
);
4056 val
= CNIC_RD(dev
, addr
);
4057 val
&= FUNC_MF_CFG_E1HOV_TAG_MASK
;
4058 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
4059 addr
= BNX2X_SHMEM_ADDR(base
,
4060 mf_cfg
.func_mf_config
[func
].config
);
4061 val
= CNIC_RD(dev
, addr
);
4062 val
&= FUNC_MF_CFG_PROTOCOL_MASK
;
4063 if (val
!= FUNC_MF_CFG_PROTOCOL_ISCSI
)
4064 dev
->max_iscsi_conn
= 0;
4069 static int cnic_start_bnx2x_hw(struct cnic_dev
*dev
)
4071 struct cnic_local
*cp
= dev
->cnic_priv
;
4072 int func
= CNIC_FUNC(cp
), ret
, i
;
4073 int port
= CNIC_PORT(cp
);
4075 u8 sb_id
= cp
->status_blk_num
;
4077 ret
= cnic_init_id_tbl(&cp
->cid_tbl
, MAX_ISCSI_TBL_SZ
,
4078 BNX2X_ISCSI_START_CID
);
4083 cp
->kcq_io_addr
= BAR_CSTRORM_INTMEM
+
4084 CSTORM_ISCSI_EQ_PROD_OFFSET(func
, 0);
4085 cp
->kcq_prod_idx
= 0;
4087 cnic_get_bnx2x_iscsi_info(dev
);
4090 CNIC_WR16(dev
, cp
->kcq_io_addr
, MAX_KCQ_IDX
);
4091 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4092 CSTORM_ISCSI_EQ_CONS_OFFSET(func
, 0), 0);
4093 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4094 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func
, 0),
4095 cp
->kcq_info
.pg_map_arr
[1] & 0xffffffff);
4096 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4097 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func
, 0) + 4,
4098 (u64
) cp
->kcq_info
.pg_map_arr
[1] >> 32);
4099 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4100 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func
, 0),
4101 cp
->kcq_info
.pg_map_arr
[0] & 0xffffffff);
4102 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4103 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func
, 0) + 4,
4104 (u64
) cp
->kcq_info
.pg_map_arr
[0] >> 32);
4105 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4106 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func
, 0), 1);
4107 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
4108 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func
, 0), cp
->status_blk_num
);
4109 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4110 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func
, 0),
4111 HC_INDEX_C_ISCSI_EQ_CONS
);
4113 for (i
= 0; i
< cp
->conn_buf_info
.num_pages
; i
++) {
4114 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4115 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func
, i
),
4116 cp
->conn_buf_info
.pgtbl
[2 * i
]);
4117 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4118 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func
, i
) + 4,
4119 cp
->conn_buf_info
.pgtbl
[(2 * i
) + 1]);
4122 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
4123 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func
),
4124 cp
->gbl_buf_info
.pg_map_arr
[0] & 0xffffffff);
4125 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
4126 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func
) + 4,
4127 (u64
) cp
->gbl_buf_info
.pg_map_arr
[0] >> 32);
4129 cnic_setup_bnx2x_context(dev
);
4131 eq_idx
= CNIC_RD16(dev
, BAR_CSTRORM_INTMEM
+
4132 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
) +
4133 offsetof(struct cstorm_status_block_c
,
4134 index_values
[HC_INDEX_C_ISCSI_EQ_CONS
]));
4136 netdev_err(dev
->netdev
, "EQ cons index %x != 0\n", eq_idx
);
4139 ret
= cnic_init_bnx2x_irq(dev
);
4143 cnic_init_bnx2x_tx_ring(dev
);
4144 cnic_init_bnx2x_rx_ring(dev
);
4149 static void cnic_init_rings(struct cnic_dev
*dev
)
4151 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
4152 cnic_init_bnx2_tx_ring(dev
);
4153 cnic_init_bnx2_rx_ring(dev
);
4154 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
4155 struct cnic_local
*cp
= dev
->cnic_priv
;
4156 u32 cli
= BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp
));
4157 union l5cm_specific_data l5_data
;
4158 struct ustorm_eth_rx_producers rx_prods
= {0};
4161 rx_prods
.bd_prod
= 0;
4162 rx_prods
.cqe_prod
= BNX2X_MAX_RCQ_DESC_CNT
;
4165 off
= BAR_USTRORM_INTMEM
+
4166 USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp
), cli
);
4168 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
) / 4; i
++)
4169 CNIC_WR(dev
, off
+ i
* 4, ((u32
*) &rx_prods
)[i
]);
4171 cnic_init_bnx2x_tx_ring(dev
);
4172 cnic_init_bnx2x_rx_ring(dev
);
4174 l5_data
.phy_address
.lo
= cli
;
4175 l5_data
.phy_address
.hi
= 0;
4176 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
,
4177 BNX2X_ISCSI_L2_CID
, ETH_CONNECTION_TYPE
, &l5_data
);
4178 cnic_ring_ctl(dev
, BNX2X_ISCSI_L2_CID
, cli
, 1);
4182 static void cnic_shutdown_rings(struct cnic_dev
*dev
)
4184 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
4185 cnic_shutdown_bnx2_rx_ring(dev
);
4186 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
4187 struct cnic_local
*cp
= dev
->cnic_priv
;
4188 u32 cli
= BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp
));
4189 union l5cm_specific_data l5_data
;
4191 cnic_ring_ctl(dev
, BNX2X_ISCSI_L2_CID
, cli
, 0);
4193 l5_data
.phy_address
.lo
= cli
;
4194 l5_data
.phy_address
.hi
= 0;
4195 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_HALT
,
4196 BNX2X_ISCSI_L2_CID
, ETH_CONNECTION_TYPE
, &l5_data
);
4199 memset(&l5_data
, 0, sizeof(l5_data
));
4200 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_CFC_DEL
,
4201 BNX2X_ISCSI_L2_CID
, ETH_CONNECTION_TYPE
|
4202 (1 << SPE_HDR_COMMON_RAMROD_SHIFT
), &l5_data
);
4207 static int cnic_register_netdev(struct cnic_dev
*dev
)
4209 struct cnic_local
*cp
= dev
->cnic_priv
;
4210 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4216 if (ethdev
->drv_state
& CNIC_DRV_STATE_REGD
)
4219 err
= ethdev
->drv_register_cnic(dev
->netdev
, cp
->cnic_ops
, dev
);
4221 netdev_err(dev
->netdev
, "register_cnic failed\n");
4226 static void cnic_unregister_netdev(struct cnic_dev
*dev
)
4228 struct cnic_local
*cp
= dev
->cnic_priv
;
4229 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4234 ethdev
->drv_unregister_cnic(dev
->netdev
);
4237 static int cnic_start_hw(struct cnic_dev
*dev
)
4239 struct cnic_local
*cp
= dev
->cnic_priv
;
4240 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4243 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
4246 dev
->regview
= ethdev
->io_base
;
4247 cp
->chip_id
= ethdev
->chip_id
;
4248 pci_dev_get(dev
->pcidev
);
4249 cp
->func
= PCI_FUNC(dev
->pcidev
->devfn
);
4250 cp
->status_blk
.gen
= ethdev
->irq_arr
[0].status_blk
;
4251 cp
->status_blk_num
= ethdev
->irq_arr
[0].status_blk_num
;
4253 err
= cp
->alloc_resc(dev
);
4255 netdev_err(dev
->netdev
, "allocate resource failure\n");
4259 err
= cp
->start_hw(dev
);
4263 err
= cnic_cm_open(dev
);
4267 set_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
4269 cp
->enable_int(dev
);
4275 pci_dev_put(dev
->pcidev
);
4279 static void cnic_stop_bnx2_hw(struct cnic_dev
*dev
)
4281 cnic_disable_bnx2_int_sync(dev
);
4283 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
4284 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
4286 cnic_init_context(dev
, KWQ_CID
);
4287 cnic_init_context(dev
, KCQ_CID
);
4289 cnic_setup_5709_context(dev
, 0);
4292 cnic_free_resc(dev
);
4296 static void cnic_stop_bnx2x_hw(struct cnic_dev
*dev
)
4298 struct cnic_local
*cp
= dev
->cnic_priv
;
4299 u8 sb_id
= cp
->status_blk_num
;
4300 int port
= CNIC_PORT(cp
);
4303 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
4304 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
) +
4305 offsetof(struct cstorm_status_block_c
,
4306 index_values
[HC_INDEX_C_ISCSI_EQ_CONS
]),
4308 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4309 CSTORM_ISCSI_EQ_CONS_OFFSET(cp
->func
, 0), 0);
4310 CNIC_WR16(dev
, cp
->kcq_io_addr
, 0);
4311 cnic_free_resc(dev
);
4314 static void cnic_stop_hw(struct cnic_dev
*dev
)
4316 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
4317 struct cnic_local
*cp
= dev
->cnic_priv
;
4319 clear_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
4320 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], NULL
);
4322 cnic_cm_shutdown(dev
);
4324 pci_dev_put(dev
->pcidev
);
4328 static void cnic_free_dev(struct cnic_dev
*dev
)
4332 while ((atomic_read(&dev
->ref_count
) != 0) && i
< 10) {
4336 if (atomic_read(&dev
->ref_count
) != 0)
4337 netdev_err(dev
->netdev
, "Failed waiting for ref count to go to zero\n");
4339 netdev_info(dev
->netdev
, "Removed CNIC device\n");
4340 dev_put(dev
->netdev
);
4344 static struct cnic_dev
*cnic_alloc_dev(struct net_device
*dev
,
4345 struct pci_dev
*pdev
)
4347 struct cnic_dev
*cdev
;
4348 struct cnic_local
*cp
;
4351 alloc_size
= sizeof(struct cnic_dev
) + sizeof(struct cnic_local
);
4353 cdev
= kzalloc(alloc_size
, GFP_KERNEL
);
4355 netdev_err(dev
, "allocate dev struct failure\n");
4360 cdev
->cnic_priv
= (char *)cdev
+ sizeof(struct cnic_dev
);
4361 cdev
->register_device
= cnic_register_device
;
4362 cdev
->unregister_device
= cnic_unregister_device
;
4363 cdev
->iscsi_nl_msg_recv
= cnic_iscsi_nl_msg_recv
;
4365 cp
= cdev
->cnic_priv
;
4368 cp
->l2_single_buf_size
= 0x400;
4369 cp
->l2_rx_ring_size
= 3;
4371 spin_lock_init(&cp
->cnic_ulp_lock
);
4373 netdev_info(dev
, "Added CNIC device\n");
4378 static struct cnic_dev
*init_bnx2_cnic(struct net_device
*dev
)
4380 struct pci_dev
*pdev
;
4381 struct cnic_dev
*cdev
;
4382 struct cnic_local
*cp
;
4383 struct cnic_eth_dev
*ethdev
= NULL
;
4384 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
4386 probe
= symbol_get(bnx2_cnic_probe
);
4388 ethdev
= (*probe
)(dev
);
4389 symbol_put(bnx2_cnic_probe
);
4394 pdev
= ethdev
->pdev
;
4400 if (pdev
->device
== PCI_DEVICE_ID_NX2_5709
||
4401 pdev
->device
== PCI_DEVICE_ID_NX2_5709S
) {
4404 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev
);
4412 cdev
= cnic_alloc_dev(dev
, pdev
);
4416 set_bit(CNIC_F_BNX2_CLASS
, &cdev
->flags
);
4417 cdev
->submit_kwqes
= cnic_submit_bnx2_kwqes
;
4419 cp
= cdev
->cnic_priv
;
4420 cp
->ethdev
= ethdev
;
4421 cdev
->pcidev
= pdev
;
4423 cp
->cnic_ops
= &cnic_bnx2_ops
;
4424 cp
->start_hw
= cnic_start_bnx2_hw
;
4425 cp
->stop_hw
= cnic_stop_bnx2_hw
;
4426 cp
->setup_pgtbl
= cnic_setup_page_tbl
;
4427 cp
->alloc_resc
= cnic_alloc_bnx2_resc
;
4428 cp
->free_resc
= cnic_free_resc
;
4429 cp
->start_cm
= cnic_cm_init_bnx2_hw
;
4430 cp
->stop_cm
= cnic_cm_stop_bnx2_hw
;
4431 cp
->enable_int
= cnic_enable_bnx2_int
;
4432 cp
->disable_int_sync
= cnic_disable_bnx2_int_sync
;
4433 cp
->close_conn
= cnic_close_bnx2_conn
;
4434 cp
->next_idx
= cnic_bnx2_next_idx
;
4435 cp
->hw_idx
= cnic_bnx2_hw_idx
;
4443 static struct cnic_dev
*init_bnx2x_cnic(struct net_device
*dev
)
4445 struct pci_dev
*pdev
;
4446 struct cnic_dev
*cdev
;
4447 struct cnic_local
*cp
;
4448 struct cnic_eth_dev
*ethdev
= NULL
;
4449 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
4451 probe
= symbol_get(bnx2x_cnic_probe
);
4453 ethdev
= (*probe
)(dev
);
4454 symbol_put(bnx2x_cnic_probe
);
4459 pdev
= ethdev
->pdev
;
4464 cdev
= cnic_alloc_dev(dev
, pdev
);
4470 set_bit(CNIC_F_BNX2X_CLASS
, &cdev
->flags
);
4471 cdev
->submit_kwqes
= cnic_submit_bnx2x_kwqes
;
4473 cp
= cdev
->cnic_priv
;
4474 cp
->ethdev
= ethdev
;
4475 cdev
->pcidev
= pdev
;
4477 cp
->cnic_ops
= &cnic_bnx2x_ops
;
4478 cp
->start_hw
= cnic_start_bnx2x_hw
;
4479 cp
->stop_hw
= cnic_stop_bnx2x_hw
;
4480 cp
->setup_pgtbl
= cnic_setup_page_tbl_le
;
4481 cp
->alloc_resc
= cnic_alloc_bnx2x_resc
;
4482 cp
->free_resc
= cnic_free_resc
;
4483 cp
->start_cm
= cnic_cm_init_bnx2x_hw
;
4484 cp
->stop_cm
= cnic_cm_stop_bnx2x_hw
;
4485 cp
->enable_int
= cnic_enable_bnx2x_int
;
4486 cp
->disable_int_sync
= cnic_disable_bnx2x_int_sync
;
4487 cp
->ack_int
= cnic_ack_bnx2x_msix
;
4488 cp
->close_conn
= cnic_close_bnx2x_conn
;
4489 cp
->next_idx
= cnic_bnx2x_next_idx
;
4490 cp
->hw_idx
= cnic_bnx2x_hw_idx
;
4494 static struct cnic_dev
*is_cnic_dev(struct net_device
*dev
)
4496 struct ethtool_drvinfo drvinfo
;
4497 struct cnic_dev
*cdev
= NULL
;
4499 if (dev
->ethtool_ops
&& dev
->ethtool_ops
->get_drvinfo
) {
4500 memset(&drvinfo
, 0, sizeof(drvinfo
));
4501 dev
->ethtool_ops
->get_drvinfo(dev
, &drvinfo
);
4503 if (!strcmp(drvinfo
.driver
, "bnx2"))
4504 cdev
= init_bnx2_cnic(dev
);
4505 if (!strcmp(drvinfo
.driver
, "bnx2x"))
4506 cdev
= init_bnx2x_cnic(dev
);
4508 write_lock(&cnic_dev_lock
);
4509 list_add(&cdev
->list
, &cnic_dev_list
);
4510 write_unlock(&cnic_dev_lock
);
4517 * netdev event handler
4519 static int cnic_netdev_event(struct notifier_block
*this, unsigned long event
,
4522 struct net_device
*netdev
= ptr
;
4523 struct cnic_dev
*dev
;
4527 dev
= cnic_from_netdev(netdev
);
4529 if (!dev
&& (event
== NETDEV_REGISTER
|| event
== NETDEV_UP
)) {
4530 /* Check for the hot-plug device */
4531 dev
= is_cnic_dev(netdev
);
4538 struct cnic_local
*cp
= dev
->cnic_priv
;
4542 else if (event
== NETDEV_UNREGISTER
)
4545 if (event
== NETDEV_UP
) {
4546 if (cnic_register_netdev(dev
) != 0) {
4550 if (!cnic_start_hw(dev
))
4551 cnic_ulp_start(dev
);
4555 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
4556 struct cnic_ulp_ops
*ulp_ops
;
4559 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
4560 if (!ulp_ops
|| !ulp_ops
->indicate_netevent
)
4563 ctx
= cp
->ulp_handle
[if_type
];
4565 ulp_ops
->indicate_netevent(ctx
, event
);
4569 if (event
== NETDEV_GOING_DOWN
) {
4572 cnic_unregister_netdev(dev
);
4573 } else if (event
== NETDEV_UNREGISTER
) {
4574 write_lock(&cnic_dev_lock
);
4575 list_del_init(&dev
->list
);
4576 write_unlock(&cnic_dev_lock
);
4588 static struct notifier_block cnic_netdev_notifier
= {
4589 .notifier_call
= cnic_netdev_event
4592 static void cnic_release(void)
4594 struct cnic_dev
*dev
;
4596 while (!list_empty(&cnic_dev_list
)) {
4597 dev
= list_entry(cnic_dev_list
.next
, struct cnic_dev
, list
);
4598 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
4604 cnic_unregister_netdev(dev
);
4605 list_del_init(&dev
->list
);
4610 static int __init
cnic_init(void)
4614 pr_info("%s", version
);
4616 rc
= register_netdevice_notifier(&cnic_netdev_notifier
);
4625 static void __exit
cnic_exit(void)
4627 unregister_netdevice_notifier(&cnic_netdev_notifier
);
4631 module_init(cnic_init
);
4632 module_exit(cnic_exit
);