1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
35 #include <net/route.h>
37 #include <net/ip6_route.h>
38 #include <net/ip6_checksum.h>
39 #include <scsi/iscsi_if.h>
43 #include "bnx2x/bnx2x_reg.h"
44 #include "bnx2x/bnx2x_fw_defs.h"
45 #include "bnx2x/bnx2x_hsi.h"
46 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
47 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
49 #include "cnic_defs.h"
51 #define DRV_MODULE_NAME "cnic"
53 static char version
[] __devinitdata
=
54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME
" v" CNIC_MODULE_VERSION
" (" CNIC_MODULE_RELDATE
")\n";
56 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
57 "Chen (zongxi@broadcom.com");
58 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
59 MODULE_LICENSE("GPL");
60 MODULE_VERSION(CNIC_MODULE_VERSION
);
62 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
63 static LIST_HEAD(cnic_dev_list
);
64 static LIST_HEAD(cnic_udev_list
);
65 static DEFINE_RWLOCK(cnic_dev_lock
);
66 static DEFINE_MUTEX(cnic_lock
);
68 static struct cnic_ulp_ops
*cnic_ulp_tbl
[MAX_CNIC_ULP_TYPE
];
70 static int cnic_service_bnx2(void *, void *);
71 static int cnic_service_bnx2x(void *, void *);
72 static int cnic_ctl(void *, struct cnic_ctl_info
*);
74 static struct cnic_ops cnic_bnx2_ops
= {
75 .cnic_owner
= THIS_MODULE
,
76 .cnic_handler
= cnic_service_bnx2
,
80 static struct cnic_ops cnic_bnx2x_ops
= {
81 .cnic_owner
= THIS_MODULE
,
82 .cnic_handler
= cnic_service_bnx2x
,
86 static struct workqueue_struct
*cnic_wq
;
88 static void cnic_shutdown_rings(struct cnic_dev
*);
89 static void cnic_init_rings(struct cnic_dev
*);
90 static int cnic_cm_set_pg(struct cnic_sock
*);
92 static int cnic_uio_open(struct uio_info
*uinfo
, struct inode
*inode
)
94 struct cnic_uio_dev
*udev
= uinfo
->priv
;
97 if (!capable(CAP_NET_ADMIN
))
100 if (udev
->uio_dev
!= -1)
106 if (!dev
|| !test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
111 udev
->uio_dev
= iminor(inode
);
113 cnic_shutdown_rings(dev
);
114 cnic_init_rings(dev
);
120 static int cnic_uio_close(struct uio_info
*uinfo
, struct inode
*inode
)
122 struct cnic_uio_dev
*udev
= uinfo
->priv
;
128 static inline void cnic_hold(struct cnic_dev
*dev
)
130 atomic_inc(&dev
->ref_count
);
133 static inline void cnic_put(struct cnic_dev
*dev
)
135 atomic_dec(&dev
->ref_count
);
138 static inline void csk_hold(struct cnic_sock
*csk
)
140 atomic_inc(&csk
->ref_count
);
143 static inline void csk_put(struct cnic_sock
*csk
)
145 atomic_dec(&csk
->ref_count
);
148 static struct cnic_dev
*cnic_from_netdev(struct net_device
*netdev
)
150 struct cnic_dev
*cdev
;
152 read_lock(&cnic_dev_lock
);
153 list_for_each_entry(cdev
, &cnic_dev_list
, list
) {
154 if (netdev
== cdev
->netdev
) {
156 read_unlock(&cnic_dev_lock
);
160 read_unlock(&cnic_dev_lock
);
164 static inline void ulp_get(struct cnic_ulp_ops
*ulp_ops
)
166 atomic_inc(&ulp_ops
->ref_count
);
169 static inline void ulp_put(struct cnic_ulp_ops
*ulp_ops
)
171 atomic_dec(&ulp_ops
->ref_count
);
174 static void cnic_ctx_wr(struct cnic_dev
*dev
, u32 cid_addr
, u32 off
, u32 val
)
176 struct cnic_local
*cp
= dev
->cnic_priv
;
177 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
178 struct drv_ctl_info info
;
179 struct drv_ctl_io
*io
= &info
.data
.io
;
181 info
.cmd
= DRV_CTL_CTX_WR_CMD
;
182 io
->cid_addr
= cid_addr
;
185 ethdev
->drv_ctl(dev
->netdev
, &info
);
188 static void cnic_ctx_tbl_wr(struct cnic_dev
*dev
, u32 off
, dma_addr_t addr
)
190 struct cnic_local
*cp
= dev
->cnic_priv
;
191 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
192 struct drv_ctl_info info
;
193 struct drv_ctl_io
*io
= &info
.data
.io
;
195 info
.cmd
= DRV_CTL_CTXTBL_WR_CMD
;
198 ethdev
->drv_ctl(dev
->netdev
, &info
);
201 static void cnic_ring_ctl(struct cnic_dev
*dev
, u32 cid
, u32 cl_id
, int start
)
203 struct cnic_local
*cp
= dev
->cnic_priv
;
204 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
205 struct drv_ctl_info info
;
206 struct drv_ctl_l2_ring
*ring
= &info
.data
.ring
;
209 info
.cmd
= DRV_CTL_START_L2_CMD
;
211 info
.cmd
= DRV_CTL_STOP_L2_CMD
;
214 ring
->client_id
= cl_id
;
215 ethdev
->drv_ctl(dev
->netdev
, &info
);
218 static void cnic_reg_wr_ind(struct cnic_dev
*dev
, u32 off
, u32 val
)
220 struct cnic_local
*cp
= dev
->cnic_priv
;
221 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
222 struct drv_ctl_info info
;
223 struct drv_ctl_io
*io
= &info
.data
.io
;
225 info
.cmd
= DRV_CTL_IO_WR_CMD
;
228 ethdev
->drv_ctl(dev
->netdev
, &info
);
231 static u32
cnic_reg_rd_ind(struct cnic_dev
*dev
, u32 off
)
233 struct cnic_local
*cp
= dev
->cnic_priv
;
234 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
235 struct drv_ctl_info info
;
236 struct drv_ctl_io
*io
= &info
.data
.io
;
238 info
.cmd
= DRV_CTL_IO_RD_CMD
;
240 ethdev
->drv_ctl(dev
->netdev
, &info
);
244 static int cnic_in_use(struct cnic_sock
*csk
)
246 return test_bit(SK_F_INUSE
, &csk
->flags
);
249 static void cnic_spq_completion(struct cnic_dev
*dev
, int cmd
, u32 count
)
251 struct cnic_local
*cp
= dev
->cnic_priv
;
252 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
253 struct drv_ctl_info info
;
256 info
.data
.credit
.credit_count
= count
;
257 ethdev
->drv_ctl(dev
->netdev
, &info
);
260 static int cnic_get_l5_cid(struct cnic_local
*cp
, u32 cid
, u32
*l5_cid
)
264 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
265 if (cp
->ctx_tbl
[i
].cid
== cid
) {
273 static int cnic_send_nlmsg(struct cnic_local
*cp
, u32 type
,
274 struct cnic_sock
*csk
)
276 struct iscsi_path path_req
;
279 u32 msg_type
= ISCSI_KEVENT_IF_DOWN
;
280 struct cnic_ulp_ops
*ulp_ops
;
281 struct cnic_uio_dev
*udev
= cp
->udev
;
282 int rc
= 0, retry
= 0;
284 if (!udev
|| udev
->uio_dev
== -1)
288 len
= sizeof(path_req
);
289 buf
= (char *) &path_req
;
290 memset(&path_req
, 0, len
);
292 msg_type
= ISCSI_KEVENT_PATH_REQ
;
293 path_req
.handle
= (u64
) csk
->l5_cid
;
294 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
295 memcpy(&path_req
.dst
.v6_addr
, &csk
->dst_ip
[0],
296 sizeof(struct in6_addr
));
297 path_req
.ip_addr_len
= 16;
299 memcpy(&path_req
.dst
.v4_addr
, &csk
->dst_ip
[0],
300 sizeof(struct in_addr
));
301 path_req
.ip_addr_len
= 4;
303 path_req
.vlan_id
= csk
->vlan_id
;
304 path_req
.pmtu
= csk
->mtu
;
310 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[CNIC_ULP_ISCSI
]);
312 rc
= ulp_ops
->iscsi_nl_send_msg(
313 cp
->ulp_handle
[CNIC_ULP_ISCSI
],
316 if (rc
== 0 || msg_type
!= ISCSI_KEVENT_PATH_REQ
)
325 static int cnic_iscsi_nl_msg_recv(struct cnic_dev
*dev
, u32 msg_type
,
331 case ISCSI_UEVENT_PATH_UPDATE
: {
332 struct cnic_local
*cp
;
334 struct cnic_sock
*csk
;
335 struct iscsi_path
*path_resp
;
337 if (len
< sizeof(*path_resp
))
340 path_resp
= (struct iscsi_path
*) buf
;
342 l5_cid
= (u32
) path_resp
->handle
;
343 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
347 if (!rcu_dereference(cp
->ulp_ops
[CNIC_ULP_L4
])) {
352 csk
= &cp
->csk_tbl
[l5_cid
];
354 if (cnic_in_use(csk
)) {
355 memcpy(csk
->ha
, path_resp
->mac_addr
, 6);
356 if (test_bit(SK_F_IPV6
, &csk
->flags
))
357 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v6_addr
,
358 sizeof(struct in6_addr
));
360 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v4_addr
,
361 sizeof(struct in_addr
));
362 if (is_valid_ether_addr(csk
->ha
))
374 static int cnic_offld_prep(struct cnic_sock
*csk
)
376 if (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
379 if (!test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
380 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
387 static int cnic_close_prep(struct cnic_sock
*csk
)
389 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
390 smp_mb__after_clear_bit();
392 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
393 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
401 static int cnic_abort_prep(struct cnic_sock
*csk
)
403 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
404 smp_mb__after_clear_bit();
406 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
409 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
410 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
417 static void cnic_uio_stop(void)
419 struct cnic_dev
*dev
;
421 read_lock(&cnic_dev_lock
);
422 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
423 struct cnic_local
*cp
= dev
->cnic_priv
;
425 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
427 read_unlock(&cnic_dev_lock
);
430 int cnic_register_driver(int ulp_type
, struct cnic_ulp_ops
*ulp_ops
)
432 struct cnic_dev
*dev
;
434 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
435 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
438 mutex_lock(&cnic_lock
);
439 if (cnic_ulp_tbl
[ulp_type
]) {
440 pr_err("%s: Type %d has already been registered\n",
442 mutex_unlock(&cnic_lock
);
446 read_lock(&cnic_dev_lock
);
447 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
448 struct cnic_local
*cp
= dev
->cnic_priv
;
450 clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]);
452 read_unlock(&cnic_dev_lock
);
454 atomic_set(&ulp_ops
->ref_count
, 0);
455 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], ulp_ops
);
456 mutex_unlock(&cnic_lock
);
458 /* Prevent race conditions with netdev_event */
460 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
461 struct cnic_local
*cp
= dev
->cnic_priv
;
463 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]))
464 ulp_ops
->cnic_init(dev
);
471 int cnic_unregister_driver(int ulp_type
)
473 struct cnic_dev
*dev
;
474 struct cnic_ulp_ops
*ulp_ops
;
477 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
478 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
481 mutex_lock(&cnic_lock
);
482 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
484 pr_err("%s: Type %d has not been registered\n",
488 read_lock(&cnic_dev_lock
);
489 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
490 struct cnic_local
*cp
= dev
->cnic_priv
;
492 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
493 pr_err("%s: Type %d still has devices registered\n",
495 read_unlock(&cnic_dev_lock
);
499 read_unlock(&cnic_dev_lock
);
501 if (ulp_type
== CNIC_ULP_ISCSI
)
504 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], NULL
);
506 mutex_unlock(&cnic_lock
);
508 while ((atomic_read(&ulp_ops
->ref_count
) != 0) && (i
< 20)) {
513 if (atomic_read(&ulp_ops
->ref_count
) != 0)
514 netdev_warn(dev
->netdev
, "Failed waiting for ref count to go to zero\n");
518 mutex_unlock(&cnic_lock
);
522 static int cnic_start_hw(struct cnic_dev
*);
523 static void cnic_stop_hw(struct cnic_dev
*);
525 static int cnic_register_device(struct cnic_dev
*dev
, int ulp_type
,
528 struct cnic_local
*cp
= dev
->cnic_priv
;
529 struct cnic_ulp_ops
*ulp_ops
;
531 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
532 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
535 mutex_lock(&cnic_lock
);
536 if (cnic_ulp_tbl
[ulp_type
] == NULL
) {
537 pr_err("%s: Driver with type %d has not been registered\n",
539 mutex_unlock(&cnic_lock
);
542 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
543 pr_err("%s: Type %d has already been registered to this device\n",
545 mutex_unlock(&cnic_lock
);
549 clear_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]);
550 cp
->ulp_handle
[ulp_type
] = ulp_ctx
;
551 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
552 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], ulp_ops
);
555 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
556 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]))
557 ulp_ops
->cnic_start(cp
->ulp_handle
[ulp_type
]);
559 mutex_unlock(&cnic_lock
);
564 EXPORT_SYMBOL(cnic_register_driver
);
566 static int cnic_unregister_device(struct cnic_dev
*dev
, int ulp_type
)
568 struct cnic_local
*cp
= dev
->cnic_priv
;
571 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
572 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
575 mutex_lock(&cnic_lock
);
576 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
577 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], NULL
);
580 pr_err("%s: device not registered to this ulp type %d\n",
582 mutex_unlock(&cnic_lock
);
585 mutex_unlock(&cnic_lock
);
589 while (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]) &&
594 if (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]))
595 netdev_warn(dev
->netdev
, "Failed waiting for ULP up call to complete\n");
599 EXPORT_SYMBOL(cnic_unregister_driver
);
601 static int cnic_init_id_tbl(struct cnic_id_tbl
*id_tbl
, u32 size
, u32 start_id
)
603 id_tbl
->start
= start_id
;
606 spin_lock_init(&id_tbl
->lock
);
607 id_tbl
->table
= kzalloc(DIV_ROUND_UP(size
, 32) * 4, GFP_KERNEL
);
614 static void cnic_free_id_tbl(struct cnic_id_tbl
*id_tbl
)
616 kfree(id_tbl
->table
);
617 id_tbl
->table
= NULL
;
620 static int cnic_alloc_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
625 if (id
>= id_tbl
->max
)
628 spin_lock(&id_tbl
->lock
);
629 if (!test_bit(id
, id_tbl
->table
)) {
630 set_bit(id
, id_tbl
->table
);
633 spin_unlock(&id_tbl
->lock
);
637 /* Returns -1 if not successful */
638 static u32
cnic_alloc_new_id(struct cnic_id_tbl
*id_tbl
)
642 spin_lock(&id_tbl
->lock
);
643 id
= find_next_zero_bit(id_tbl
->table
, id_tbl
->max
, id_tbl
->next
);
644 if (id
>= id_tbl
->max
) {
646 if (id_tbl
->next
!= 0) {
647 id
= find_first_zero_bit(id_tbl
->table
, id_tbl
->next
);
648 if (id
>= id_tbl
->next
)
653 if (id
< id_tbl
->max
) {
654 set_bit(id
, id_tbl
->table
);
655 id_tbl
->next
= (id
+ 1) & (id_tbl
->max
- 1);
659 spin_unlock(&id_tbl
->lock
);
664 static void cnic_free_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
670 if (id
>= id_tbl
->max
)
673 clear_bit(id
, id_tbl
->table
);
676 static void cnic_free_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
683 for (i
= 0; i
< dma
->num_pages
; i
++) {
684 if (dma
->pg_arr
[i
]) {
685 dma_free_coherent(&dev
->pcidev
->dev
, BCM_PAGE_SIZE
,
686 dma
->pg_arr
[i
], dma
->pg_map_arr
[i
]);
687 dma
->pg_arr
[i
] = NULL
;
691 dma_free_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
692 dma
->pgtbl
, dma
->pgtbl_map
);
700 static void cnic_setup_page_tbl(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
703 u32
*page_table
= dma
->pgtbl
;
705 for (i
= 0; i
< dma
->num_pages
; i
++) {
706 /* Each entry needs to be in big endian format. */
707 *page_table
= (u32
) ((u64
) dma
->pg_map_arr
[i
] >> 32);
709 *page_table
= (u32
) dma
->pg_map_arr
[i
];
714 static void cnic_setup_page_tbl_le(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
717 u32
*page_table
= dma
->pgtbl
;
719 for (i
= 0; i
< dma
->num_pages
; i
++) {
720 /* Each entry needs to be in little endian format. */
721 *page_table
= dma
->pg_map_arr
[i
] & 0xffffffff;
723 *page_table
= (u32
) ((u64
) dma
->pg_map_arr
[i
] >> 32);
728 static int cnic_alloc_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
,
729 int pages
, int use_pg_tbl
)
732 struct cnic_local
*cp
= dev
->cnic_priv
;
734 size
= pages
* (sizeof(void *) + sizeof(dma_addr_t
));
735 dma
->pg_arr
= kzalloc(size
, GFP_ATOMIC
);
736 if (dma
->pg_arr
== NULL
)
739 dma
->pg_map_arr
= (dma_addr_t
*) (dma
->pg_arr
+ pages
);
740 dma
->num_pages
= pages
;
742 for (i
= 0; i
< pages
; i
++) {
743 dma
->pg_arr
[i
] = dma_alloc_coherent(&dev
->pcidev
->dev
,
747 if (dma
->pg_arr
[i
] == NULL
)
753 dma
->pgtbl_size
= ((pages
* 8) + BCM_PAGE_SIZE
- 1) &
754 ~(BCM_PAGE_SIZE
- 1);
755 dma
->pgtbl
= dma_alloc_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
756 &dma
->pgtbl_map
, GFP_ATOMIC
);
757 if (dma
->pgtbl
== NULL
)
760 cp
->setup_pgtbl(dev
, dma
);
765 cnic_free_dma(dev
, dma
);
769 static void cnic_free_context(struct cnic_dev
*dev
)
771 struct cnic_local
*cp
= dev
->cnic_priv
;
774 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
775 if (cp
->ctx_arr
[i
].ctx
) {
776 dma_free_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
778 cp
->ctx_arr
[i
].mapping
);
779 cp
->ctx_arr
[i
].ctx
= NULL
;
784 static void __cnic_free_uio(struct cnic_uio_dev
*udev
)
786 uio_unregister_device(&udev
->cnic_uinfo
);
789 dma_free_coherent(&udev
->pdev
->dev
, udev
->l2_buf_size
,
790 udev
->l2_buf
, udev
->l2_buf_map
);
795 dma_free_coherent(&udev
->pdev
->dev
, udev
->l2_ring_size
,
796 udev
->l2_ring
, udev
->l2_ring_map
);
797 udev
->l2_ring
= NULL
;
800 pci_dev_put(udev
->pdev
);
804 static void cnic_free_uio(struct cnic_uio_dev
*udev
)
809 write_lock(&cnic_dev_lock
);
810 list_del_init(&udev
->list
);
811 write_unlock(&cnic_dev_lock
);
812 __cnic_free_uio(udev
);
815 static void cnic_free_resc(struct cnic_dev
*dev
)
817 struct cnic_local
*cp
= dev
->cnic_priv
;
818 struct cnic_uio_dev
*udev
= cp
->udev
;
825 cnic_free_context(dev
);
830 cnic_free_dma(dev
, &cp
->gbl_buf_info
);
831 cnic_free_dma(dev
, &cp
->conn_buf_info
);
832 cnic_free_dma(dev
, &cp
->kwq_info
);
833 cnic_free_dma(dev
, &cp
->kwq_16_data_info
);
834 cnic_free_dma(dev
, &cp
->kcq1
.dma
);
835 kfree(cp
->iscsi_tbl
);
836 cp
->iscsi_tbl
= NULL
;
840 cnic_free_id_tbl(&cp
->cid_tbl
);
843 static int cnic_alloc_context(struct cnic_dev
*dev
)
845 struct cnic_local
*cp
= dev
->cnic_priv
;
847 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
850 cp
->ctx_blk_size
= BCM_PAGE_SIZE
;
851 cp
->cids_per_blk
= BCM_PAGE_SIZE
/ 128;
852 arr_size
= BNX2_MAX_CID
/ cp
->cids_per_blk
*
853 sizeof(struct cnic_ctx
);
854 cp
->ctx_arr
= kzalloc(arr_size
, GFP_KERNEL
);
855 if (cp
->ctx_arr
== NULL
)
859 for (i
= 0; i
< 2; i
++) {
860 u32 j
, reg
, off
, lo
, hi
;
863 off
= BNX2_PG_CTX_MAP
;
865 off
= BNX2_ISCSI_CTX_MAP
;
867 reg
= cnic_reg_rd_ind(dev
, off
);
870 for (j
= lo
; j
< hi
; j
+= cp
->cids_per_blk
, k
++)
871 cp
->ctx_arr
[k
].cid
= j
;
875 if (cp
->ctx_blks
>= (BNX2_MAX_CID
/ cp
->cids_per_blk
)) {
880 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
882 dma_alloc_coherent(&dev
->pcidev
->dev
,
884 &cp
->ctx_arr
[i
].mapping
,
886 if (cp
->ctx_arr
[i
].ctx
== NULL
)
893 static int cnic_alloc_kcq(struct cnic_dev
*dev
, struct kcq_info
*info
)
895 int err
, i
, is_bnx2
= 0;
898 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
))
901 err
= cnic_alloc_dma(dev
, &info
->dma
, KCQ_PAGE_CNT
, is_bnx2
);
905 kcq
= (struct kcqe
**) info
->dma
.pg_arr
;
911 for (i
= 0; i
< KCQ_PAGE_CNT
; i
++) {
912 struct bnx2x_bd_chain_next
*next
=
913 (struct bnx2x_bd_chain_next
*) &kcq
[i
][MAX_KCQE_CNT
];
916 if (j
>= KCQ_PAGE_CNT
)
918 next
->addr_hi
= (u64
) info
->dma
.pg_map_arr
[j
] >> 32;
919 next
->addr_lo
= info
->dma
.pg_map_arr
[j
] & 0xffffffff;
924 static int cnic_alloc_uio_rings(struct cnic_dev
*dev
, int pages
)
926 struct cnic_local
*cp
= dev
->cnic_priv
;
927 struct cnic_uio_dev
*udev
;
929 read_lock(&cnic_dev_lock
);
930 list_for_each_entry(udev
, &cnic_udev_list
, list
) {
931 if (udev
->pdev
== dev
->pcidev
) {
934 read_unlock(&cnic_dev_lock
);
938 read_unlock(&cnic_dev_lock
);
940 udev
= kzalloc(sizeof(struct cnic_uio_dev
), GFP_ATOMIC
);
947 udev
->pdev
= dev
->pcidev
;
948 udev
->l2_ring_size
= pages
* BCM_PAGE_SIZE
;
949 udev
->l2_ring
= dma_alloc_coherent(&udev
->pdev
->dev
, udev
->l2_ring_size
,
951 GFP_KERNEL
| __GFP_COMP
);
955 udev
->l2_buf_size
= (cp
->l2_rx_ring_size
+ 1) * cp
->l2_single_buf_size
;
956 udev
->l2_buf_size
= PAGE_ALIGN(udev
->l2_buf_size
);
957 udev
->l2_buf
= dma_alloc_coherent(&udev
->pdev
->dev
, udev
->l2_buf_size
,
959 GFP_KERNEL
| __GFP_COMP
);
963 write_lock(&cnic_dev_lock
);
964 list_add(&udev
->list
, &cnic_udev_list
);
965 write_unlock(&cnic_dev_lock
);
967 pci_dev_get(udev
->pdev
);
974 static int cnic_init_uio(struct cnic_dev
*dev
)
976 struct cnic_local
*cp
= dev
->cnic_priv
;
977 struct cnic_uio_dev
*udev
= cp
->udev
;
978 struct uio_info
*uinfo
;
984 uinfo
= &udev
->cnic_uinfo
;
986 uinfo
->mem
[0].addr
= dev
->netdev
->base_addr
;
987 uinfo
->mem
[0].internal_addr
= dev
->regview
;
988 uinfo
->mem
[0].size
= dev
->netdev
->mem_end
- dev
->netdev
->mem_start
;
989 uinfo
->mem
[0].memtype
= UIO_MEM_PHYS
;
991 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
992 uinfo
->mem
[1].addr
= (unsigned long) cp
->status_blk
.gen
&
994 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
995 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
* 9;
997 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
;
999 uinfo
->name
= "bnx2_cnic";
1000 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
1001 uinfo
->mem
[1].addr
= (unsigned long) cp
->bnx2x_def_status_blk
&
1003 uinfo
->mem
[1].size
= sizeof(*cp
->bnx2x_def_status_blk
);
1005 uinfo
->name
= "bnx2x_cnic";
1008 uinfo
->mem
[1].memtype
= UIO_MEM_LOGICAL
;
1010 uinfo
->mem
[2].addr
= (unsigned long) udev
->l2_ring
;
1011 uinfo
->mem
[2].size
= udev
->l2_ring_size
;
1012 uinfo
->mem
[2].memtype
= UIO_MEM_LOGICAL
;
1014 uinfo
->mem
[3].addr
= (unsigned long) udev
->l2_buf
;
1015 uinfo
->mem
[3].size
= udev
->l2_buf_size
;
1016 uinfo
->mem
[3].memtype
= UIO_MEM_LOGICAL
;
1018 uinfo
->version
= CNIC_MODULE_VERSION
;
1019 uinfo
->irq
= UIO_IRQ_CUSTOM
;
1021 uinfo
->open
= cnic_uio_open
;
1022 uinfo
->release
= cnic_uio_close
;
1024 if (udev
->uio_dev
== -1) {
1028 ret
= uio_register_device(&udev
->pdev
->dev
, uinfo
);
1031 cnic_init_rings(dev
);
1037 static int cnic_alloc_bnx2_resc(struct cnic_dev
*dev
)
1039 struct cnic_local
*cp
= dev
->cnic_priv
;
1042 ret
= cnic_alloc_dma(dev
, &cp
->kwq_info
, KWQ_PAGE_CNT
, 1);
1045 cp
->kwq
= (struct kwqe
**) cp
->kwq_info
.pg_arr
;
1047 ret
= cnic_alloc_kcq(dev
, &cp
->kcq1
);
1051 ret
= cnic_alloc_context(dev
);
1055 ret
= cnic_alloc_uio_rings(dev
, 2);
1059 ret
= cnic_init_uio(dev
);
1066 cnic_free_resc(dev
);
1070 static int cnic_alloc_bnx2x_context(struct cnic_dev
*dev
)
1072 struct cnic_local
*cp
= dev
->cnic_priv
;
1073 int ctx_blk_size
= cp
->ethdev
->ctx_blk_size
;
1074 int total_mem
, blks
, i
;
1076 total_mem
= BNX2X_CONTEXT_MEM_SIZE
* cp
->max_cid_space
;
1077 blks
= total_mem
/ ctx_blk_size
;
1078 if (total_mem
% ctx_blk_size
)
1081 if (blks
> cp
->ethdev
->ctx_tbl_len
)
1084 cp
->ctx_arr
= kcalloc(blks
, sizeof(struct cnic_ctx
), GFP_KERNEL
);
1085 if (cp
->ctx_arr
== NULL
)
1088 cp
->ctx_blks
= blks
;
1089 cp
->ctx_blk_size
= ctx_blk_size
;
1090 if (!BNX2X_CHIP_IS_57710(cp
->chip_id
))
1093 cp
->ctx_align
= ctx_blk_size
;
1095 cp
->cids_per_blk
= ctx_blk_size
/ BNX2X_CONTEXT_MEM_SIZE
;
1097 for (i
= 0; i
< blks
; i
++) {
1098 cp
->ctx_arr
[i
].ctx
=
1099 dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
1100 &cp
->ctx_arr
[i
].mapping
,
1102 if (cp
->ctx_arr
[i
].ctx
== NULL
)
1105 if (cp
->ctx_align
&& cp
->ctx_blk_size
== ctx_blk_size
) {
1106 if (cp
->ctx_arr
[i
].mapping
& (cp
->ctx_align
- 1)) {
1107 cnic_free_context(dev
);
1108 cp
->ctx_blk_size
+= cp
->ctx_align
;
1117 static int cnic_alloc_bnx2x_resc(struct cnic_dev
*dev
)
1119 struct cnic_local
*cp
= dev
->cnic_priv
;
1120 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1121 u32 start_cid
= ethdev
->starting_cid
;
1122 int i
, j
, n
, ret
, pages
;
1123 struct cnic_dma
*kwq_16_dma
= &cp
->kwq_16_data_info
;
1125 cp
->iro_arr
= ethdev
->iro_arr
;
1127 cp
->max_cid_space
= MAX_ISCSI_TBL_SZ
;
1128 cp
->iscsi_start_cid
= start_cid
;
1129 if (start_cid
< BNX2X_ISCSI_START_CID
) {
1130 u32 delta
= BNX2X_ISCSI_START_CID
- start_cid
;
1132 cp
->iscsi_start_cid
= BNX2X_ISCSI_START_CID
;
1133 cp
->max_cid_space
+= delta
;
1136 cp
->iscsi_tbl
= kzalloc(sizeof(struct cnic_iscsi
) * MAX_ISCSI_TBL_SZ
,
1141 cp
->ctx_tbl
= kzalloc(sizeof(struct cnic_context
) *
1142 cp
->max_cid_space
, GFP_KERNEL
);
1146 for (i
= 0; i
< MAX_ISCSI_TBL_SZ
; i
++) {
1147 cp
->ctx_tbl
[i
].proto
.iscsi
= &cp
->iscsi_tbl
[i
];
1148 cp
->ctx_tbl
[i
].ulp_proto_id
= CNIC_ULP_ISCSI
;
1151 pages
= PAGE_ALIGN(cp
->max_cid_space
* CNIC_KWQ16_DATA_SIZE
) /
1154 ret
= cnic_alloc_dma(dev
, kwq_16_dma
, pages
, 0);
1158 n
= PAGE_SIZE
/ CNIC_KWQ16_DATA_SIZE
;
1159 for (i
= 0, j
= 0; i
< cp
->max_cid_space
; i
++) {
1160 long off
= CNIC_KWQ16_DATA_SIZE
* (i
% n
);
1162 cp
->ctx_tbl
[i
].kwqe_data
= kwq_16_dma
->pg_arr
[j
] + off
;
1163 cp
->ctx_tbl
[i
].kwqe_data_mapping
= kwq_16_dma
->pg_map_arr
[j
] +
1166 if ((i
% n
) == (n
- 1))
1170 ret
= cnic_alloc_kcq(dev
, &cp
->kcq1
);
1174 pages
= PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS
*
1175 BNX2X_ISCSI_CONN_BUF_SIZE
) / PAGE_SIZE
;
1176 ret
= cnic_alloc_dma(dev
, &cp
->conn_buf_info
, pages
, 1);
1180 pages
= PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE
) / PAGE_SIZE
;
1181 ret
= cnic_alloc_dma(dev
, &cp
->gbl_buf_info
, pages
, 0);
1185 ret
= cnic_alloc_bnx2x_context(dev
);
1189 cp
->bnx2x_def_status_blk
= cp
->ethdev
->irq_arr
[1].status_blk
;
1191 cp
->l2_rx_ring_size
= 15;
1193 ret
= cnic_alloc_uio_rings(dev
, 4);
1197 ret
= cnic_init_uio(dev
);
1204 cnic_free_resc(dev
);
1208 static inline u32
cnic_kwq_avail(struct cnic_local
*cp
)
1210 return cp
->max_kwq_idx
-
1211 ((cp
->kwq_prod_idx
- cp
->kwq_con_idx
) & cp
->max_kwq_idx
);
1214 static int cnic_submit_bnx2_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1217 struct cnic_local
*cp
= dev
->cnic_priv
;
1218 struct kwqe
*prod_qe
;
1219 u16 prod
, sw_prod
, i
;
1221 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
1222 return -EAGAIN
; /* bnx2 is down */
1224 spin_lock_bh(&cp
->cnic_ulp_lock
);
1225 if (num_wqes
> cnic_kwq_avail(cp
) &&
1226 !test_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
)) {
1227 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1231 clear_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
1233 prod
= cp
->kwq_prod_idx
;
1234 sw_prod
= prod
& MAX_KWQ_IDX
;
1235 for (i
= 0; i
< num_wqes
; i
++) {
1236 prod_qe
= &cp
->kwq
[KWQ_PG(sw_prod
)][KWQ_IDX(sw_prod
)];
1237 memcpy(prod_qe
, wqes
[i
], sizeof(struct kwqe
));
1239 sw_prod
= prod
& MAX_KWQ_IDX
;
1241 cp
->kwq_prod_idx
= prod
;
1243 CNIC_WR16(dev
, cp
->kwq_io_addr
, cp
->kwq_prod_idx
);
1245 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1249 static void *cnic_get_kwqe_16_data(struct cnic_local
*cp
, u32 l5_cid
,
1250 union l5cm_specific_data
*l5_data
)
1252 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1255 map
= ctx
->kwqe_data_mapping
;
1256 l5_data
->phy_address
.lo
= (u64
) map
& 0xffffffff;
1257 l5_data
->phy_address
.hi
= (u64
) map
>> 32;
1258 return ctx
->kwqe_data
;
1261 static int cnic_submit_kwqe_16(struct cnic_dev
*dev
, u32 cmd
, u32 cid
,
1262 u32 type
, union l5cm_specific_data
*l5_data
)
1264 struct cnic_local
*cp
= dev
->cnic_priv
;
1265 struct l5cm_spe kwqe
;
1266 struct kwqe_16
*kwq
[1];
1269 kwqe
.hdr
.conn_and_cmd_data
=
1270 cpu_to_le32(((cmd
<< SPE_HDR_CMD_ID_SHIFT
) |
1271 BNX2X_HW_CID(cp
, cid
)));
1272 kwqe
.hdr
.type
= cpu_to_le16(type
);
1273 kwqe
.hdr
.reserved1
= 0;
1274 kwqe
.data
.phy_address
.lo
= cpu_to_le32(l5_data
->phy_address
.lo
);
1275 kwqe
.data
.phy_address
.hi
= cpu_to_le32(l5_data
->phy_address
.hi
);
1277 kwq
[0] = (struct kwqe_16
*) &kwqe
;
1279 spin_lock_bh(&cp
->cnic_ulp_lock
);
1280 ret
= cp
->ethdev
->drv_submit_kwqes_16(dev
->netdev
, kwq
, 1);
1281 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1289 static void cnic_reply_bnx2x_kcqes(struct cnic_dev
*dev
, int ulp_type
,
1290 struct kcqe
*cqes
[], u32 num_cqes
)
1292 struct cnic_local
*cp
= dev
->cnic_priv
;
1293 struct cnic_ulp_ops
*ulp_ops
;
1296 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
1297 if (likely(ulp_ops
)) {
1298 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
1304 static int cnic_bnx2x_iscsi_init1(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1306 struct cnic_local
*cp
= dev
->cnic_priv
;
1307 struct iscsi_kwqe_init1
*req1
= (struct iscsi_kwqe_init1
*) kwqe
;
1309 u32 pfid
= cp
->pfid
;
1311 cp
->num_iscsi_tasks
= req1
->num_tasks_per_conn
;
1312 cp
->num_ccells
= req1
->num_ccells_per_conn
;
1313 cp
->task_array_size
= BNX2X_ISCSI_TASK_CONTEXT_SIZE
*
1314 cp
->num_iscsi_tasks
;
1315 cp
->r2tq_size
= cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
*
1316 BNX2X_ISCSI_R2TQE_SIZE
;
1317 cp
->hq_size
= cp
->num_ccells
* BNX2X_ISCSI_HQ_BD_SIZE
;
1318 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1319 hq_bds
= pages
* (PAGE_SIZE
/ BNX2X_ISCSI_HQ_BD_SIZE
);
1320 cp
->num_cqs
= req1
->num_cqs
;
1322 if (!dev
->max_iscsi_conn
)
1325 /* init Tstorm RAM */
1326 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid
),
1328 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1330 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1331 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1332 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
1333 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1334 req1
->num_tasks_per_conn
);
1336 /* init Ustorm RAM */
1337 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1338 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid
),
1339 req1
->rq_buffer_size
);
1340 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1342 CNIC_WR8(dev
, BAR_USTRORM_INTMEM
+
1343 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1344 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1345 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1346 req1
->num_tasks_per_conn
);
1347 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_RQ_SIZE_OFFSET(pfid
),
1349 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_CQ_SIZE_OFFSET(pfid
),
1351 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid
),
1352 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1354 /* init Xstorm RAM */
1355 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1357 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1358 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1359 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
1360 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1361 req1
->num_tasks_per_conn
);
1362 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid
),
1364 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid
),
1365 req1
->num_tasks_per_conn
);
1366 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid
),
1367 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1369 /* init Cstorm RAM */
1370 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1372 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
1373 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1374 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1375 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1376 req1
->num_tasks_per_conn
);
1377 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid
),
1379 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid
),
1385 static int cnic_bnx2x_iscsi_init2(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1387 struct iscsi_kwqe_init2
*req2
= (struct iscsi_kwqe_init2
*) kwqe
;
1388 struct cnic_local
*cp
= dev
->cnic_priv
;
1389 u32 pfid
= cp
->pfid
;
1390 struct iscsi_kcqe kcqe
;
1391 struct kcqe
*cqes
[1];
1393 memset(&kcqe
, 0, sizeof(kcqe
));
1394 if (!dev
->max_iscsi_conn
) {
1395 kcqe
.completion_status
=
1396 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED
;
1400 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1401 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
), req2
->error_bit_map
[0]);
1402 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1403 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
) + 4,
1404 req2
->error_bit_map
[1]);
1406 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1407 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid
), req2
->max_cq_sqn
);
1408 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1409 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
), req2
->error_bit_map
[0]);
1410 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1411 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
) + 4,
1412 req2
->error_bit_map
[1]);
1414 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1415 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid
), req2
->max_cq_sqn
);
1417 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1420 kcqe
.op_code
= ISCSI_KCQE_OPCODE_INIT
;
1421 cqes
[0] = (struct kcqe
*) &kcqe
;
1422 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1427 static void cnic_free_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1429 struct cnic_local
*cp
= dev
->cnic_priv
;
1430 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1432 if (ctx
->ulp_proto_id
== CNIC_ULP_ISCSI
) {
1433 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1435 cnic_free_dma(dev
, &iscsi
->hq_info
);
1436 cnic_free_dma(dev
, &iscsi
->r2tq_info
);
1437 cnic_free_dma(dev
, &iscsi
->task_array_info
);
1439 cnic_free_id(&cp
->cid_tbl
, ctx
->cid
);
1443 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1447 struct cnic_local
*cp
= dev
->cnic_priv
;
1448 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1449 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1451 cid
= cnic_alloc_new_id(&cp
->cid_tbl
);
1458 pages
= PAGE_ALIGN(cp
->task_array_size
) / PAGE_SIZE
;
1460 ret
= cnic_alloc_dma(dev
, &iscsi
->task_array_info
, pages
, 1);
1464 pages
= PAGE_ALIGN(cp
->r2tq_size
) / PAGE_SIZE
;
1465 ret
= cnic_alloc_dma(dev
, &iscsi
->r2tq_info
, pages
, 1);
1469 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1470 ret
= cnic_alloc_dma(dev
, &iscsi
->hq_info
, pages
, 1);
1477 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1481 static void *cnic_get_bnx2x_ctx(struct cnic_dev
*dev
, u32 cid
, int init
,
1482 struct regpair
*ctx_addr
)
1484 struct cnic_local
*cp
= dev
->cnic_priv
;
1485 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1486 int blk
= (cid
- ethdev
->starting_cid
) / cp
->cids_per_blk
;
1487 int off
= (cid
- ethdev
->starting_cid
) % cp
->cids_per_blk
;
1488 unsigned long align_off
= 0;
1492 if (cp
->ctx_align
) {
1493 unsigned long mask
= cp
->ctx_align
- 1;
1495 if (cp
->ctx_arr
[blk
].mapping
& mask
)
1496 align_off
= cp
->ctx_align
-
1497 (cp
->ctx_arr
[blk
].mapping
& mask
);
1499 ctx_map
= cp
->ctx_arr
[blk
].mapping
+ align_off
+
1500 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1501 ctx
= cp
->ctx_arr
[blk
].ctx
+ align_off
+
1502 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1504 memset(ctx
, 0, BNX2X_CONTEXT_MEM_SIZE
);
1506 ctx_addr
->lo
= ctx_map
& 0xffffffff;
1507 ctx_addr
->hi
= (u64
) ctx_map
>> 32;
1511 static int cnic_setup_bnx2x_ctx(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1514 struct cnic_local
*cp
= dev
->cnic_priv
;
1515 struct iscsi_kwqe_conn_offload1
*req1
=
1516 (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1517 struct iscsi_kwqe_conn_offload2
*req2
=
1518 (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1519 struct iscsi_kwqe_conn_offload3
*req3
;
1520 struct cnic_context
*ctx
= &cp
->ctx_tbl
[req1
->iscsi_conn_id
];
1521 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1523 u32 hw_cid
= BNX2X_HW_CID(cp
, cid
);
1524 struct iscsi_context
*ictx
;
1525 struct regpair context_addr
;
1526 int i
, j
, n
= 2, n_max
;
1529 if (!req2
->num_additional_wqes
)
1532 n_max
= req2
->num_additional_wqes
+ 2;
1534 ictx
= cnic_get_bnx2x_ctx(dev
, cid
, 1, &context_addr
);
1538 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1540 ictx
->xstorm_ag_context
.hq_prod
= 1;
1542 ictx
->xstorm_st_context
.iscsi
.first_burst_length
=
1543 ISCSI_DEF_FIRST_BURST_LEN
;
1544 ictx
->xstorm_st_context
.iscsi
.max_send_pdu_length
=
1545 ISCSI_DEF_MAX_RECV_SEG_LEN
;
1546 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.lo
=
1547 req1
->sq_page_table_addr_lo
;
1548 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.hi
=
1549 req1
->sq_page_table_addr_hi
;
1550 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.lo
= req2
->sq_first_pte
.hi
;
1551 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.hi
= req2
->sq_first_pte
.lo
;
1552 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.lo
=
1553 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1554 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.hi
=
1555 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1556 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.lo
=
1557 iscsi
->hq_info
.pgtbl
[0];
1558 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.hi
=
1559 iscsi
->hq_info
.pgtbl
[1];
1560 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.lo
=
1561 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1562 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.hi
=
1563 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1564 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.lo
=
1565 iscsi
->r2tq_info
.pgtbl
[0];
1566 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.hi
=
1567 iscsi
->r2tq_info
.pgtbl
[1];
1568 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.lo
=
1569 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1570 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.hi
=
1571 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1572 ictx
->xstorm_st_context
.iscsi
.task_pbl_cache_idx
=
1573 BNX2X_ISCSI_PBL_NOT_CACHED
;
1574 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1575 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA
;
1576 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1577 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T
;
1579 ictx
->tstorm_st_context
.iscsi
.hdr_bytes_2_fetch
= ISCSI_HEADER_SIZE
;
1580 /* TSTORM requires the base address of RQ DB & not PTE */
1581 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.lo
=
1582 req2
->rq_page_table_addr_lo
& PAGE_MASK
;
1583 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.hi
=
1584 req2
->rq_page_table_addr_hi
;
1585 ictx
->tstorm_st_context
.iscsi
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1586 ictx
->tstorm_st_context
.tcp
.cwnd
= 0x5A8;
1587 ictx
->tstorm_st_context
.tcp
.flags2
|=
1588 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN
;
1589 ictx
->tstorm_st_context
.tcp
.ooo_support_mode
=
1590 TCP_TSTORM_OOO_DROP_AND_PROC_ACK
;
1592 ictx
->timers_context
.flags
|= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG
;
1594 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.lo
=
1595 req2
->rq_page_table_addr_lo
;
1596 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.hi
=
1597 req2
->rq_page_table_addr_hi
;
1598 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.lo
= req3
->qp_first_pte
[0].hi
;
1599 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.hi
= req3
->qp_first_pte
[0].lo
;
1600 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.lo
=
1601 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1602 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.hi
=
1603 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1604 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.lo
=
1605 iscsi
->r2tq_info
.pgtbl
[0];
1606 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.hi
=
1607 iscsi
->r2tq_info
.pgtbl
[1];
1608 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.lo
=
1609 req1
->cq_page_table_addr_lo
;
1610 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.hi
=
1611 req1
->cq_page_table_addr_hi
;
1612 ictx
->ustorm_st_context
.ring
.cq
[0].cq_sn
= ISCSI_INITIAL_SN
;
1613 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.lo
= req2
->cq_first_pte
.hi
;
1614 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.hi
= req2
->cq_first_pte
.lo
;
1615 ictx
->ustorm_st_context
.task_pbe_cache_index
=
1616 BNX2X_ISCSI_PBL_NOT_CACHED
;
1617 ictx
->ustorm_st_context
.task_pdu_cache_index
=
1618 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED
;
1620 for (i
= 1, j
= 1; i
< cp
->num_cqs
; i
++, j
++) {
1624 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1627 ictx
->ustorm_st_context
.ring
.cq
[i
].cq_sn
= ISCSI_INITIAL_SN
;
1628 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.lo
=
1629 req3
->qp_first_pte
[j
].hi
;
1630 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.hi
=
1631 req3
->qp_first_pte
[j
].lo
;
1634 ictx
->ustorm_st_context
.task_pbl_base
.lo
=
1635 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1636 ictx
->ustorm_st_context
.task_pbl_base
.hi
=
1637 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1638 ictx
->ustorm_st_context
.tce_phy_addr
.lo
=
1639 iscsi
->task_array_info
.pgtbl
[0];
1640 ictx
->ustorm_st_context
.tce_phy_addr
.hi
=
1641 iscsi
->task_array_info
.pgtbl
[1];
1642 ictx
->ustorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1643 ictx
->ustorm_st_context
.num_cqs
= cp
->num_cqs
;
1644 ictx
->ustorm_st_context
.negotiated_rx
|= ISCSI_DEF_MAX_RECV_SEG_LEN
;
1645 ictx
->ustorm_st_context
.negotiated_rx_and_flags
|=
1646 ISCSI_DEF_MAX_BURST_LEN
;
1647 ictx
->ustorm_st_context
.negotiated_rx
|=
1648 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T
<<
1649 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT
;
1651 ictx
->cstorm_st_context
.hq_pbl_base
.lo
=
1652 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1653 ictx
->cstorm_st_context
.hq_pbl_base
.hi
=
1654 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1655 ictx
->cstorm_st_context
.hq_curr_pbe
.lo
= iscsi
->hq_info
.pgtbl
[0];
1656 ictx
->cstorm_st_context
.hq_curr_pbe
.hi
= iscsi
->hq_info
.pgtbl
[1];
1657 ictx
->cstorm_st_context
.task_pbl_base
.lo
=
1658 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1659 ictx
->cstorm_st_context
.task_pbl_base
.hi
=
1660 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1661 /* CSTORM and USTORM initialization is different, CSTORM requires
1662 * CQ DB base & not PTE addr */
1663 ictx
->cstorm_st_context
.cq_db_base
.lo
=
1664 req1
->cq_page_table_addr_lo
& PAGE_MASK
;
1665 ictx
->cstorm_st_context
.cq_db_base
.hi
= req1
->cq_page_table_addr_hi
;
1666 ictx
->cstorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1667 ictx
->cstorm_st_context
.cq_proc_en_bit_map
= (1 << cp
->num_cqs
) - 1;
1668 for (i
= 0; i
< cp
->num_cqs
; i
++) {
1669 ictx
->cstorm_st_context
.cq_c_prod_sqn_arr
.sqn
[i
] =
1671 ictx
->cstorm_st_context
.cq_c_sqn_2_notify_arr
.sqn
[i
] =
1675 ictx
->xstorm_ag_context
.cdu_reserved
=
1676 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_XCM_AG
,
1677 ISCSI_CONNECTION_TYPE
);
1678 ictx
->ustorm_ag_context
.cdu_usage
=
1679 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_UCM_AG
,
1680 ISCSI_CONNECTION_TYPE
);
1685 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1688 struct iscsi_kwqe_conn_offload1
*req1
;
1689 struct iscsi_kwqe_conn_offload2
*req2
;
1690 struct cnic_local
*cp
= dev
->cnic_priv
;
1691 struct cnic_context
*ctx
;
1692 struct iscsi_kcqe kcqe
;
1693 struct kcqe
*cqes
[1];
1702 req1
= (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1703 req2
= (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1704 if ((num
- 2) < req2
->num_additional_wqes
) {
1708 *work
= 2 + req2
->num_additional_wqes
;
1710 l5_cid
= req1
->iscsi_conn_id
;
1711 if (l5_cid
>= MAX_ISCSI_TBL_SZ
)
1714 memset(&kcqe
, 0, sizeof(kcqe
));
1715 kcqe
.op_code
= ISCSI_KCQE_OPCODE_OFFLOAD_CONN
;
1716 kcqe
.iscsi_conn_id
= l5_cid
;
1717 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
;
1719 ctx
= &cp
->ctx_tbl
[l5_cid
];
1720 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
)) {
1721 kcqe
.completion_status
=
1722 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY
;
1726 if (atomic_inc_return(&cp
->iscsi_conn
) > dev
->max_iscsi_conn
) {
1727 atomic_dec(&cp
->iscsi_conn
);
1730 ret
= cnic_alloc_bnx2x_conn_resc(dev
, l5_cid
);
1732 atomic_dec(&cp
->iscsi_conn
);
1736 ret
= cnic_setup_bnx2x_ctx(dev
, wqes
, num
);
1738 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1739 atomic_dec(&cp
->iscsi_conn
);
1743 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1744 kcqe
.iscsi_conn_context_id
= BNX2X_HW_CID(cp
, cp
->ctx_tbl
[l5_cid
].cid
);
1747 cqes
[0] = (struct kcqe
*) &kcqe
;
1748 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1753 static int cnic_bnx2x_iscsi_update(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1755 struct cnic_local
*cp
= dev
->cnic_priv
;
1756 struct iscsi_kwqe_conn_update
*req
=
1757 (struct iscsi_kwqe_conn_update
*) kwqe
;
1759 union l5cm_specific_data l5_data
;
1760 u32 l5_cid
, cid
= BNX2X_SW_CID(req
->context_id
);
1763 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) != 0)
1766 data
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
1770 memcpy(data
, kwqe
, sizeof(struct kwqe
));
1772 ret
= cnic_submit_kwqe_16(dev
, ISCSI_RAMROD_CMD_ID_UPDATE_CONN
,
1773 req
->context_id
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1777 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev
*dev
, u32 l5_cid
)
1779 struct cnic_local
*cp
= dev
->cnic_priv
;
1780 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1781 union l5cm_specific_data l5_data
;
1785 init_waitqueue_head(&ctx
->waitq
);
1787 memset(&l5_data
, 0, sizeof(l5_data
));
1788 hw_cid
= BNX2X_HW_CID(cp
, ctx
->cid
);
1789 type
= (NONE_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
1790 & SPE_HDR_CONN_TYPE
;
1791 type
|= ((cp
->pfid
<< SPE_HDR_FUNCTION_ID_SHIFT
) &
1792 SPE_HDR_FUNCTION_ID
);
1794 ret
= cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
1795 hw_cid
, type
, &l5_data
);
1798 wait_event(ctx
->waitq
, ctx
->wait_cond
);
1803 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1805 struct cnic_local
*cp
= dev
->cnic_priv
;
1806 struct iscsi_kwqe_conn_destroy
*req
=
1807 (struct iscsi_kwqe_conn_destroy
*) kwqe
;
1808 u32 l5_cid
= req
->reserved0
;
1809 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1811 struct iscsi_kcqe kcqe
;
1812 struct kcqe
*cqes
[1];
1814 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
1815 goto skip_cfc_delete
;
1817 if (!time_after(jiffies
, ctx
->timestamp
+ (2 * HZ
))) {
1818 unsigned long delta
= ctx
->timestamp
+ (2 * HZ
) - jiffies
;
1820 if (delta
> (2 * HZ
))
1823 set_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
);
1824 queue_delayed_work(cnic_wq
, &cp
->delete_task
, delta
);
1828 ret
= cnic_bnx2x_destroy_ramrod(dev
, l5_cid
);
1831 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1833 atomic_dec(&cp
->iscsi_conn
);
1834 clear_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
1837 memset(&kcqe
, 0, sizeof(kcqe
));
1838 kcqe
.op_code
= ISCSI_KCQE_OPCODE_DESTROY_CONN
;
1839 kcqe
.iscsi_conn_id
= l5_cid
;
1840 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1841 kcqe
.iscsi_conn_context_id
= req
->context_id
;
1843 cqes
[0] = (struct kcqe
*) &kcqe
;
1844 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1849 static void cnic_init_storm_conn_bufs(struct cnic_dev
*dev
,
1850 struct l4_kwq_connect_req1
*kwqe1
,
1851 struct l4_kwq_connect_req3
*kwqe3
,
1852 struct l5cm_active_conn_buffer
*conn_buf
)
1854 struct l5cm_conn_addr_params
*conn_addr
= &conn_buf
->conn_addr_buf
;
1855 struct l5cm_xstorm_conn_buffer
*xstorm_buf
=
1856 &conn_buf
->xstorm_conn_buffer
;
1857 struct l5cm_tstorm_conn_buffer
*tstorm_buf
=
1858 &conn_buf
->tstorm_conn_buffer
;
1859 struct regpair context_addr
;
1860 u32 cid
= BNX2X_SW_CID(kwqe1
->cid
);
1861 struct in6_addr src_ip
, dst_ip
;
1865 addrp
= (u32
*) &conn_addr
->local_ip_addr
;
1866 for (i
= 0; i
< 4; i
++, addrp
++)
1867 src_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1869 addrp
= (u32
*) &conn_addr
->remote_ip_addr
;
1870 for (i
= 0; i
< 4; i
++, addrp
++)
1871 dst_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1873 cnic_get_bnx2x_ctx(dev
, cid
, 0, &context_addr
);
1875 xstorm_buf
->context_addr
.hi
= context_addr
.hi
;
1876 xstorm_buf
->context_addr
.lo
= context_addr
.lo
;
1877 xstorm_buf
->mss
= 0xffff;
1878 xstorm_buf
->rcv_buf
= kwqe3
->rcv_buf
;
1879 if (kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
)
1880 xstorm_buf
->params
|= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE
;
1881 xstorm_buf
->pseudo_header_checksum
=
1882 swab16(~csum_ipv6_magic(&src_ip
, &dst_ip
, 0, IPPROTO_TCP
, 0));
1884 if (!(kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
))
1885 tstorm_buf
->params
|=
1886 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE
;
1887 if (kwqe3
->ka_timeout
) {
1888 tstorm_buf
->ka_enable
= 1;
1889 tstorm_buf
->ka_timeout
= kwqe3
->ka_timeout
;
1890 tstorm_buf
->ka_interval
= kwqe3
->ka_interval
;
1891 tstorm_buf
->ka_max_probe_count
= kwqe3
->ka_max_probe_count
;
1893 tstorm_buf
->rcv_buf
= kwqe3
->rcv_buf
;
1894 tstorm_buf
->snd_buf
= kwqe3
->snd_buf
;
1895 tstorm_buf
->max_rt_time
= 0xffffffff;
1898 static void cnic_init_bnx2x_mac(struct cnic_dev
*dev
)
1900 struct cnic_local
*cp
= dev
->cnic_priv
;
1901 u32 pfid
= cp
->pfid
;
1902 u8
*mac
= dev
->mac_addr
;
1904 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1905 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid
), mac
[0]);
1906 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1907 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid
), mac
[1]);
1908 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1909 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid
), mac
[2]);
1910 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1911 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid
), mac
[3]);
1912 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1913 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid
), mac
[4]);
1914 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1915 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid
), mac
[5]);
1917 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1918 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[5]);
1919 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1920 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
1922 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1923 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[3]);
1924 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1925 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
1927 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1928 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 2,
1930 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1931 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 3,
1935 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev
*dev
, int tcp_ts
)
1937 struct cnic_local
*cp
= dev
->cnic_priv
;
1938 u8 xstorm_flags
= XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN
;
1939 u16 tstorm_flags
= 0;
1942 xstorm_flags
|= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
1943 tstorm_flags
|= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
1946 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1947 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->pfid
), xstorm_flags
);
1949 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
1950 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->pfid
), tstorm_flags
);
1953 static int cnic_bnx2x_connect(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1956 struct cnic_local
*cp
= dev
->cnic_priv
;
1957 struct l4_kwq_connect_req1
*kwqe1
=
1958 (struct l4_kwq_connect_req1
*) wqes
[0];
1959 struct l4_kwq_connect_req3
*kwqe3
;
1960 struct l5cm_active_conn_buffer
*conn_buf
;
1961 struct l5cm_conn_addr_params
*conn_addr
;
1962 union l5cm_specific_data l5_data
;
1963 u32 l5_cid
= kwqe1
->pg_cid
;
1964 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
1965 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1973 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
)
1983 if (sizeof(*conn_buf
) > CNIC_KWQ16_DATA_SIZE
) {
1984 netdev_err(dev
->netdev
, "conn_buf size too big\n");
1987 conn_buf
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
1991 memset(conn_buf
, 0, sizeof(*conn_buf
));
1993 conn_addr
= &conn_buf
->conn_addr_buf
;
1994 conn_addr
->remote_addr_0
= csk
->ha
[0];
1995 conn_addr
->remote_addr_1
= csk
->ha
[1];
1996 conn_addr
->remote_addr_2
= csk
->ha
[2];
1997 conn_addr
->remote_addr_3
= csk
->ha
[3];
1998 conn_addr
->remote_addr_4
= csk
->ha
[4];
1999 conn_addr
->remote_addr_5
= csk
->ha
[5];
2001 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
) {
2002 struct l4_kwq_connect_req2
*kwqe2
=
2003 (struct l4_kwq_connect_req2
*) wqes
[1];
2005 conn_addr
->local_ip_addr
.ip_addr_hi_hi
= kwqe2
->src_ip_v6_4
;
2006 conn_addr
->local_ip_addr
.ip_addr_hi_lo
= kwqe2
->src_ip_v6_3
;
2007 conn_addr
->local_ip_addr
.ip_addr_lo_hi
= kwqe2
->src_ip_v6_2
;
2009 conn_addr
->remote_ip_addr
.ip_addr_hi_hi
= kwqe2
->dst_ip_v6_4
;
2010 conn_addr
->remote_ip_addr
.ip_addr_hi_lo
= kwqe2
->dst_ip_v6_3
;
2011 conn_addr
->remote_ip_addr
.ip_addr_lo_hi
= kwqe2
->dst_ip_v6_2
;
2012 conn_addr
->params
|= L5CM_CONN_ADDR_PARAMS_IP_VERSION
;
2014 kwqe3
= (struct l4_kwq_connect_req3
*) wqes
[*work
- 1];
2016 conn_addr
->local_ip_addr
.ip_addr_lo_lo
= kwqe1
->src_ip
;
2017 conn_addr
->remote_ip_addr
.ip_addr_lo_lo
= kwqe1
->dst_ip
;
2018 conn_addr
->local_tcp_port
= kwqe1
->src_port
;
2019 conn_addr
->remote_tcp_port
= kwqe1
->dst_port
;
2021 conn_addr
->pmtu
= kwqe3
->pmtu
;
2022 cnic_init_storm_conn_bufs(dev
, kwqe1
, kwqe3
, conn_buf
);
2024 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
2025 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp
->pfid
), csk
->vlan_id
);
2027 cnic_bnx2x_set_tcp_timestamp(dev
,
2028 kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_TIME_STAMP
);
2030 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_TCP_CONNECT
,
2031 kwqe1
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2033 set_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
2038 static int cnic_bnx2x_close(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2040 struct l4_kwq_close_req
*req
= (struct l4_kwq_close_req
*) kwqe
;
2041 union l5cm_specific_data l5_data
;
2044 memset(&l5_data
, 0, sizeof(l5_data
));
2045 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_CLOSE
,
2046 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2050 static int cnic_bnx2x_reset(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2052 struct l4_kwq_reset_req
*req
= (struct l4_kwq_reset_req
*) kwqe
;
2053 union l5cm_specific_data l5_data
;
2056 memset(&l5_data
, 0, sizeof(l5_data
));
2057 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_ABORT
,
2058 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2061 static int cnic_bnx2x_offload_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2063 struct l4_kwq_offload_pg
*req
= (struct l4_kwq_offload_pg
*) kwqe
;
2065 struct kcqe
*cqes
[1];
2067 memset(&kcqe
, 0, sizeof(kcqe
));
2068 kcqe
.pg_host_opaque
= req
->host_opaque
;
2069 kcqe
.pg_cid
= req
->host_opaque
;
2070 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
;
2071 cqes
[0] = (struct kcqe
*) &kcqe
;
2072 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
2076 static int cnic_bnx2x_update_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2078 struct l4_kwq_update_pg
*req
= (struct l4_kwq_update_pg
*) kwqe
;
2080 struct kcqe
*cqes
[1];
2082 memset(&kcqe
, 0, sizeof(kcqe
));
2083 kcqe
.pg_host_opaque
= req
->pg_host_opaque
;
2084 kcqe
.pg_cid
= req
->pg_cid
;
2085 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_UPDATE_PG
;
2086 cqes
[0] = (struct kcqe
*) &kcqe
;
2087 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
2091 static int cnic_submit_bnx2x_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2098 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2099 return -EAGAIN
; /* bnx2 is down */
2101 for (i
= 0; i
< num_wqes
; ) {
2103 opcode
= KWQE_OPCODE(kwqe
->kwqe_op_flag
);
2107 case ISCSI_KWQE_OPCODE_INIT1
:
2108 ret
= cnic_bnx2x_iscsi_init1(dev
, kwqe
);
2110 case ISCSI_KWQE_OPCODE_INIT2
:
2111 ret
= cnic_bnx2x_iscsi_init2(dev
, kwqe
);
2113 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1
:
2114 ret
= cnic_bnx2x_iscsi_ofld1(dev
, &wqes
[i
],
2115 num_wqes
- i
, &work
);
2117 case ISCSI_KWQE_OPCODE_UPDATE_CONN
:
2118 ret
= cnic_bnx2x_iscsi_update(dev
, kwqe
);
2120 case ISCSI_KWQE_OPCODE_DESTROY_CONN
:
2121 ret
= cnic_bnx2x_iscsi_destroy(dev
, kwqe
);
2123 case L4_KWQE_OPCODE_VALUE_CONNECT1
:
2124 ret
= cnic_bnx2x_connect(dev
, &wqes
[i
], num_wqes
- i
,
2127 case L4_KWQE_OPCODE_VALUE_CLOSE
:
2128 ret
= cnic_bnx2x_close(dev
, kwqe
);
2130 case L4_KWQE_OPCODE_VALUE_RESET
:
2131 ret
= cnic_bnx2x_reset(dev
, kwqe
);
2133 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
:
2134 ret
= cnic_bnx2x_offload_pg(dev
, kwqe
);
2136 case L4_KWQE_OPCODE_VALUE_UPDATE_PG
:
2137 ret
= cnic_bnx2x_update_pg(dev
, kwqe
);
2139 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG
:
2144 netdev_err(dev
->netdev
, "Unknown type of KWQE(0x%x)\n",
2149 netdev_err(dev
->netdev
, "KWQE(0x%x) failed\n",
2156 static void service_kcqes(struct cnic_dev
*dev
, int num_cqes
)
2158 struct cnic_local
*cp
= dev
->cnic_priv
;
2164 struct cnic_ulp_ops
*ulp_ops
;
2166 u32 kcqe_op_flag
= cp
->completed_kcq
[i
]->kcqe_op_flag
;
2167 u32 kcqe_layer
= kcqe_op_flag
& KCQE_FLAGS_LAYER_MASK
;
2169 if (unlikely(kcqe_op_flag
& KCQE_RAMROD_COMPLETION
))
2172 while (j
< num_cqes
) {
2173 u32 next_op
= cp
->completed_kcq
[i
+ j
]->kcqe_op_flag
;
2175 if ((next_op
& KCQE_FLAGS_LAYER_MASK
) != kcqe_layer
)
2178 if (unlikely(next_op
& KCQE_RAMROD_COMPLETION
))
2183 if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_RDMA
)
2184 ulp_type
= CNIC_ULP_RDMA
;
2185 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_ISCSI
)
2186 ulp_type
= CNIC_ULP_ISCSI
;
2187 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L4
)
2188 ulp_type
= CNIC_ULP_L4
;
2189 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L2
)
2192 netdev_err(dev
->netdev
, "Unknown type of KCQE(0x%x)\n",
2198 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
2199 if (likely(ulp_ops
)) {
2200 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
2201 cp
->completed_kcq
+ i
, j
);
2210 cnic_spq_completion(dev
, DRV_CTL_RET_L5_SPQ_CREDIT_CMD
, comp
);
2213 static u16
cnic_bnx2_next_idx(u16 idx
)
2218 static u16
cnic_bnx2_hw_idx(u16 idx
)
2223 static u16
cnic_bnx2x_next_idx(u16 idx
)
2226 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
2232 static u16
cnic_bnx2x_hw_idx(u16 idx
)
2234 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
2239 static int cnic_get_kcqes(struct cnic_dev
*dev
, struct kcq_info
*info
)
2241 struct cnic_local
*cp
= dev
->cnic_priv
;
2242 u16 i
, ri
, hw_prod
, last
;
2244 int kcqe_cnt
= 0, last_cnt
= 0;
2246 i
= ri
= last
= info
->sw_prod_idx
;
2248 hw_prod
= *info
->hw_prod_idx_ptr
;
2249 hw_prod
= cp
->hw_idx(hw_prod
);
2251 while ((i
!= hw_prod
) && (kcqe_cnt
< MAX_COMPLETED_KCQE
)) {
2252 kcqe
= &info
->kcq
[KCQ_PG(ri
)][KCQ_IDX(ri
)];
2253 cp
->completed_kcq
[kcqe_cnt
++] = kcqe
;
2254 i
= cp
->next_idx(i
);
2255 ri
= i
& MAX_KCQ_IDX
;
2256 if (likely(!(kcqe
->kcqe_op_flag
& KCQE_FLAGS_NEXT
))) {
2257 last_cnt
= kcqe_cnt
;
2262 info
->sw_prod_idx
= last
;
2266 static int cnic_l2_completion(struct cnic_local
*cp
)
2268 u16 hw_cons
, sw_cons
;
2269 struct cnic_uio_dev
*udev
= cp
->udev
;
2270 union eth_rx_cqe
*cqe
, *cqe_ring
= (union eth_rx_cqe
*)
2271 (udev
->l2_ring
+ (2 * BCM_PAGE_SIZE
));
2275 if (!test_bit(CNIC_F_BNX2X_CLASS
, &cp
->dev
->flags
))
2278 hw_cons
= *cp
->rx_cons_ptr
;
2279 if ((hw_cons
& BNX2X_MAX_RCQ_DESC_CNT
) == BNX2X_MAX_RCQ_DESC_CNT
)
2282 sw_cons
= cp
->rx_cons
;
2283 while (sw_cons
!= hw_cons
) {
2286 cqe
= &cqe_ring
[sw_cons
& BNX2X_MAX_RCQ_DESC_CNT
];
2287 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
2288 if (cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
) {
2289 cmd
= le32_to_cpu(cqe
->ramrod_cqe
.conn_and_cmd_data
);
2290 cmd
>>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT
;
2291 if (cmd
== RAMROD_CMD_ID_ETH_CLIENT_SETUP
||
2292 cmd
== RAMROD_CMD_ID_ETH_HALT
)
2295 sw_cons
= BNX2X_NEXT_RCQE(sw_cons
);
2300 static void cnic_chk_pkt_rings(struct cnic_local
*cp
)
2302 u16 rx_cons
, tx_cons
;
2305 if (!test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
2308 rx_cons
= *cp
->rx_cons_ptr
;
2309 tx_cons
= *cp
->tx_cons_ptr
;
2310 if (cp
->tx_cons
!= tx_cons
|| cp
->rx_cons
!= rx_cons
) {
2311 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
2312 comp
= cnic_l2_completion(cp
);
2314 cp
->tx_cons
= tx_cons
;
2315 cp
->rx_cons
= rx_cons
;
2318 uio_event_notify(&cp
->udev
->cnic_uinfo
);
2321 clear_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
2324 static u32
cnic_service_bnx2_queues(struct cnic_dev
*dev
)
2326 struct cnic_local
*cp
= dev
->cnic_priv
;
2327 u32 status_idx
= (u16
) *cp
->kcq1
.status_idx_ptr
;
2330 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2332 while ((kcqe_cnt
= cnic_get_kcqes(dev
, &cp
->kcq1
))) {
2334 service_kcqes(dev
, kcqe_cnt
);
2336 /* Tell compiler that status_blk fields can change. */
2338 if (status_idx
!= *cp
->kcq1
.status_idx_ptr
) {
2339 status_idx
= (u16
) *cp
->kcq1
.status_idx_ptr
;
2340 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2345 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, cp
->kcq1
.sw_prod_idx
);
2347 cnic_chk_pkt_rings(cp
);
2352 static int cnic_service_bnx2(void *data
, void *status_blk
)
2354 struct cnic_dev
*dev
= data
;
2356 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))) {
2357 struct status_block
*sblk
= status_blk
;
2359 return sblk
->status_idx
;
2362 return cnic_service_bnx2_queues(dev
);
2365 static void cnic_service_bnx2_msix(unsigned long data
)
2367 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
2368 struct cnic_local
*cp
= dev
->cnic_priv
;
2370 cp
->last_status_idx
= cnic_service_bnx2_queues(dev
);
2372 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
2373 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
2376 static void cnic_doirq(struct cnic_dev
*dev
)
2378 struct cnic_local
*cp
= dev
->cnic_priv
;
2380 if (likely(test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))) {
2381 u16 prod
= cp
->kcq1
.sw_prod_idx
& MAX_KCQ_IDX
;
2383 prefetch(cp
->status_blk
.gen
);
2384 prefetch(&cp
->kcq1
.kcq
[KCQ_PG(prod
)][KCQ_IDX(prod
)]);
2386 tasklet_schedule(&cp
->cnic_irq_task
);
2390 static irqreturn_t
cnic_irq(int irq
, void *dev_instance
)
2392 struct cnic_dev
*dev
= dev_instance
;
2393 struct cnic_local
*cp
= dev
->cnic_priv
;
2403 static inline void cnic_ack_bnx2x_int(struct cnic_dev
*dev
, u8 id
, u8 storm
,
2404 u16 index
, u8 op
, u8 update
)
2406 struct cnic_local
*cp
= dev
->cnic_priv
;
2407 u32 hc_addr
= (HC_REG_COMMAND_REG
+ CNIC_PORT(cp
) * 32 +
2408 COMMAND_REG_INT_ACK
);
2409 struct igu_ack_register igu_ack
;
2411 igu_ack
.status_block_index
= index
;
2412 igu_ack
.sb_id_and_flags
=
2413 ((id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
2414 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
2415 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
2416 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
2418 CNIC_WR(dev
, hc_addr
, (*(u32
*)&igu_ack
));
2421 static void cnic_ack_igu_sb(struct cnic_dev
*dev
, u8 igu_sb_id
, u8 segment
,
2422 u16 index
, u8 op
, u8 update
)
2424 struct igu_regular cmd_data
;
2425 u32 igu_addr
= BAR_IGU_INTMEM
+ (IGU_CMD_INT_ACK_BASE
+ igu_sb_id
) * 8;
2427 cmd_data
.sb_id_and_flags
=
2428 (index
<< IGU_REGULAR_SB_INDEX_SHIFT
) |
2429 (segment
<< IGU_REGULAR_SEGMENT_ACCESS_SHIFT
) |
2430 (update
<< IGU_REGULAR_BUPDATE_SHIFT
) |
2431 (op
<< IGU_REGULAR_ENABLE_INT_SHIFT
);
2434 CNIC_WR(dev
, igu_addr
, cmd_data
.sb_id_and_flags
);
2437 static void cnic_ack_bnx2x_msix(struct cnic_dev
*dev
)
2439 struct cnic_local
*cp
= dev
->cnic_priv
;
2441 cnic_ack_bnx2x_int(dev
, cp
->bnx2x_igu_sb_id
, CSTORM_ID
, 0,
2442 IGU_INT_DISABLE
, 0);
2445 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev
*dev
)
2447 struct cnic_local
*cp
= dev
->cnic_priv
;
2449 cnic_ack_igu_sb(dev
, cp
->bnx2x_igu_sb_id
, IGU_SEG_ACCESS_DEF
, 0,
2450 IGU_INT_DISABLE
, 0);
2453 static u32
cnic_service_bnx2x_kcq(struct cnic_dev
*dev
, struct kcq_info
*info
)
2455 u32 last_status
= *info
->status_idx_ptr
;
2458 while ((kcqe_cnt
= cnic_get_kcqes(dev
, info
))) {
2460 service_kcqes(dev
, kcqe_cnt
);
2462 /* Tell compiler that sblk fields can change. */
2464 if (last_status
== *info
->status_idx_ptr
)
2467 last_status
= *info
->status_idx_ptr
;
2472 static void cnic_service_bnx2x_bh(unsigned long data
)
2474 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
2475 struct cnic_local
*cp
= dev
->cnic_priv
;
2478 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
2481 status_idx
= cnic_service_bnx2x_kcq(dev
, &cp
->kcq1
);
2483 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, cp
->kcq1
.sw_prod_idx
+ MAX_KCQ_IDX
);
2484 if (BNX2X_CHIP_IS_E2(cp
->chip_id
))
2485 cnic_ack_igu_sb(dev
, cp
->bnx2x_igu_sb_id
, IGU_SEG_ACCESS_DEF
,
2486 status_idx
, IGU_INT_ENABLE
, 1);
2488 cnic_ack_bnx2x_int(dev
, cp
->bnx2x_igu_sb_id
, USTORM_ID
,
2489 status_idx
, IGU_INT_ENABLE
, 1);
2492 static int cnic_service_bnx2x(void *data
, void *status_blk
)
2494 struct cnic_dev
*dev
= data
;
2495 struct cnic_local
*cp
= dev
->cnic_priv
;
2497 if (!(cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
2500 cnic_chk_pkt_rings(cp
);
2505 static void cnic_ulp_stop(struct cnic_dev
*dev
)
2507 struct cnic_local
*cp
= dev
->cnic_priv
;
2510 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
2512 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
2513 struct cnic_ulp_ops
*ulp_ops
;
2515 mutex_lock(&cnic_lock
);
2516 ulp_ops
= cp
->ulp_ops
[if_type
];
2518 mutex_unlock(&cnic_lock
);
2521 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2522 mutex_unlock(&cnic_lock
);
2524 if (test_and_clear_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
2525 ulp_ops
->cnic_stop(cp
->ulp_handle
[if_type
]);
2527 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2531 static void cnic_ulp_start(struct cnic_dev
*dev
)
2533 struct cnic_local
*cp
= dev
->cnic_priv
;
2536 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
2537 struct cnic_ulp_ops
*ulp_ops
;
2539 mutex_lock(&cnic_lock
);
2540 ulp_ops
= cp
->ulp_ops
[if_type
];
2541 if (!ulp_ops
|| !ulp_ops
->cnic_start
) {
2542 mutex_unlock(&cnic_lock
);
2545 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2546 mutex_unlock(&cnic_lock
);
2548 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
2549 ulp_ops
->cnic_start(cp
->ulp_handle
[if_type
]);
2551 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2555 static int cnic_ctl(void *data
, struct cnic_ctl_info
*info
)
2557 struct cnic_dev
*dev
= data
;
2559 switch (info
->cmd
) {
2560 case CNIC_CTL_STOP_CMD
:
2568 case CNIC_CTL_START_CMD
:
2571 if (!cnic_start_hw(dev
))
2572 cnic_ulp_start(dev
);
2576 case CNIC_CTL_COMPLETION_CMD
: {
2577 u32 cid
= BNX2X_SW_CID(info
->data
.comp
.cid
);
2579 struct cnic_local
*cp
= dev
->cnic_priv
;
2581 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) == 0) {
2582 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
2585 wake_up(&ctx
->waitq
);
2595 static void cnic_ulp_init(struct cnic_dev
*dev
)
2598 struct cnic_local
*cp
= dev
->cnic_priv
;
2600 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
2601 struct cnic_ulp_ops
*ulp_ops
;
2603 mutex_lock(&cnic_lock
);
2604 ulp_ops
= cnic_ulp_tbl
[i
];
2605 if (!ulp_ops
|| !ulp_ops
->cnic_init
) {
2606 mutex_unlock(&cnic_lock
);
2610 mutex_unlock(&cnic_lock
);
2612 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
2613 ulp_ops
->cnic_init(dev
);
2619 static void cnic_ulp_exit(struct cnic_dev
*dev
)
2622 struct cnic_local
*cp
= dev
->cnic_priv
;
2624 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
2625 struct cnic_ulp_ops
*ulp_ops
;
2627 mutex_lock(&cnic_lock
);
2628 ulp_ops
= cnic_ulp_tbl
[i
];
2629 if (!ulp_ops
|| !ulp_ops
->cnic_exit
) {
2630 mutex_unlock(&cnic_lock
);
2634 mutex_unlock(&cnic_lock
);
2636 if (test_and_clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
2637 ulp_ops
->cnic_exit(dev
);
2643 static int cnic_cm_offload_pg(struct cnic_sock
*csk
)
2645 struct cnic_dev
*dev
= csk
->dev
;
2646 struct l4_kwq_offload_pg
*l4kwqe
;
2647 struct kwqe
*wqes
[1];
2649 l4kwqe
= (struct l4_kwq_offload_pg
*) &csk
->kwqe1
;
2650 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2651 wqes
[0] = (struct kwqe
*) l4kwqe
;
2653 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
;
2655 L4_LAYER_CODE
<< L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT
;
2656 l4kwqe
->l2hdr_nbytes
= ETH_HLEN
;
2658 l4kwqe
->da0
= csk
->ha
[0];
2659 l4kwqe
->da1
= csk
->ha
[1];
2660 l4kwqe
->da2
= csk
->ha
[2];
2661 l4kwqe
->da3
= csk
->ha
[3];
2662 l4kwqe
->da4
= csk
->ha
[4];
2663 l4kwqe
->da5
= csk
->ha
[5];
2665 l4kwqe
->sa0
= dev
->mac_addr
[0];
2666 l4kwqe
->sa1
= dev
->mac_addr
[1];
2667 l4kwqe
->sa2
= dev
->mac_addr
[2];
2668 l4kwqe
->sa3
= dev
->mac_addr
[3];
2669 l4kwqe
->sa4
= dev
->mac_addr
[4];
2670 l4kwqe
->sa5
= dev
->mac_addr
[5];
2672 l4kwqe
->etype
= ETH_P_IP
;
2673 l4kwqe
->ipid_start
= DEF_IPID_START
;
2674 l4kwqe
->host_opaque
= csk
->l5_cid
;
2677 l4kwqe
->pg_flags
|= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING
;
2678 l4kwqe
->vlan_tag
= csk
->vlan_id
;
2679 l4kwqe
->l2hdr_nbytes
+= 4;
2682 return dev
->submit_kwqes(dev
, wqes
, 1);
2685 static int cnic_cm_update_pg(struct cnic_sock
*csk
)
2687 struct cnic_dev
*dev
= csk
->dev
;
2688 struct l4_kwq_update_pg
*l4kwqe
;
2689 struct kwqe
*wqes
[1];
2691 l4kwqe
= (struct l4_kwq_update_pg
*) &csk
->kwqe1
;
2692 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2693 wqes
[0] = (struct kwqe
*) l4kwqe
;
2695 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPDATE_PG
;
2697 L4_LAYER_CODE
<< L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT
;
2698 l4kwqe
->pg_cid
= csk
->pg_cid
;
2700 l4kwqe
->da0
= csk
->ha
[0];
2701 l4kwqe
->da1
= csk
->ha
[1];
2702 l4kwqe
->da2
= csk
->ha
[2];
2703 l4kwqe
->da3
= csk
->ha
[3];
2704 l4kwqe
->da4
= csk
->ha
[4];
2705 l4kwqe
->da5
= csk
->ha
[5];
2707 l4kwqe
->pg_host_opaque
= csk
->l5_cid
;
2708 l4kwqe
->pg_valids
= L4_KWQ_UPDATE_PG_VALIDS_DA
;
2710 return dev
->submit_kwqes(dev
, wqes
, 1);
2713 static int cnic_cm_upload_pg(struct cnic_sock
*csk
)
2715 struct cnic_dev
*dev
= csk
->dev
;
2716 struct l4_kwq_upload
*l4kwqe
;
2717 struct kwqe
*wqes
[1];
2719 l4kwqe
= (struct l4_kwq_upload
*) &csk
->kwqe1
;
2720 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2721 wqes
[0] = (struct kwqe
*) l4kwqe
;
2723 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPLOAD_PG
;
2725 L4_LAYER_CODE
<< L4_KWQ_UPLOAD_LAYER_CODE_SHIFT
;
2726 l4kwqe
->cid
= csk
->pg_cid
;
2728 return dev
->submit_kwqes(dev
, wqes
, 1);
2731 static int cnic_cm_conn_req(struct cnic_sock
*csk
)
2733 struct cnic_dev
*dev
= csk
->dev
;
2734 struct l4_kwq_connect_req1
*l4kwqe1
;
2735 struct l4_kwq_connect_req2
*l4kwqe2
;
2736 struct l4_kwq_connect_req3
*l4kwqe3
;
2737 struct kwqe
*wqes
[3];
2741 l4kwqe1
= (struct l4_kwq_connect_req1
*) &csk
->kwqe1
;
2742 l4kwqe2
= (struct l4_kwq_connect_req2
*) &csk
->kwqe2
;
2743 l4kwqe3
= (struct l4_kwq_connect_req3
*) &csk
->kwqe3
;
2744 memset(l4kwqe1
, 0, sizeof(*l4kwqe1
));
2745 memset(l4kwqe2
, 0, sizeof(*l4kwqe2
));
2746 memset(l4kwqe3
, 0, sizeof(*l4kwqe3
));
2748 l4kwqe3
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT3
;
2750 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT
;
2751 l4kwqe3
->ka_timeout
= csk
->ka_timeout
;
2752 l4kwqe3
->ka_interval
= csk
->ka_interval
;
2753 l4kwqe3
->ka_max_probe_count
= csk
->ka_max_probe_count
;
2754 l4kwqe3
->tos
= csk
->tos
;
2755 l4kwqe3
->ttl
= csk
->ttl
;
2756 l4kwqe3
->snd_seq_scale
= csk
->snd_seq_scale
;
2757 l4kwqe3
->pmtu
= csk
->mtu
;
2758 l4kwqe3
->rcv_buf
= csk
->rcv_buf
;
2759 l4kwqe3
->snd_buf
= csk
->snd_buf
;
2760 l4kwqe3
->seed
= csk
->seed
;
2762 wqes
[0] = (struct kwqe
*) l4kwqe1
;
2763 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
2764 wqes
[1] = (struct kwqe
*) l4kwqe2
;
2765 wqes
[2] = (struct kwqe
*) l4kwqe3
;
2768 l4kwqe1
->conn_flags
= L4_KWQ_CONNECT_REQ1_IP_V6
;
2769 l4kwqe2
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT2
;
2771 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT
|
2772 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT
;
2773 l4kwqe2
->src_ip_v6_2
= be32_to_cpu(csk
->src_ip
[1]);
2774 l4kwqe2
->src_ip_v6_3
= be32_to_cpu(csk
->src_ip
[2]);
2775 l4kwqe2
->src_ip_v6_4
= be32_to_cpu(csk
->src_ip
[3]);
2776 l4kwqe2
->dst_ip_v6_2
= be32_to_cpu(csk
->dst_ip
[1]);
2777 l4kwqe2
->dst_ip_v6_3
= be32_to_cpu(csk
->dst_ip
[2]);
2778 l4kwqe2
->dst_ip_v6_4
= be32_to_cpu(csk
->dst_ip
[3]);
2779 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct ipv6hdr
) -
2780 sizeof(struct tcphdr
);
2782 wqes
[1] = (struct kwqe
*) l4kwqe3
;
2783 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct iphdr
) -
2784 sizeof(struct tcphdr
);
2787 l4kwqe1
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT1
;
2789 (L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT
) |
2790 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT
;
2791 l4kwqe1
->cid
= csk
->cid
;
2792 l4kwqe1
->pg_cid
= csk
->pg_cid
;
2793 l4kwqe1
->src_ip
= be32_to_cpu(csk
->src_ip
[0]);
2794 l4kwqe1
->dst_ip
= be32_to_cpu(csk
->dst_ip
[0]);
2795 l4kwqe1
->src_port
= be16_to_cpu(csk
->src_port
);
2796 l4kwqe1
->dst_port
= be16_to_cpu(csk
->dst_port
);
2797 if (csk
->tcp_flags
& SK_TCP_NO_DELAY_ACK
)
2798 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
;
2799 if (csk
->tcp_flags
& SK_TCP_KEEP_ALIVE
)
2800 tcp_flags
|= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE
;
2801 if (csk
->tcp_flags
& SK_TCP_NAGLE
)
2802 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
;
2803 if (csk
->tcp_flags
& SK_TCP_TIMESTAMP
)
2804 tcp_flags
|= L4_KWQ_CONNECT_REQ1_TIME_STAMP
;
2805 if (csk
->tcp_flags
& SK_TCP_SACK
)
2806 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SACK
;
2807 if (csk
->tcp_flags
& SK_TCP_SEG_SCALING
)
2808 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SEG_SCALING
;
2810 l4kwqe1
->tcp_flags
= tcp_flags
;
2812 return dev
->submit_kwqes(dev
, wqes
, num_wqes
);
2815 static int cnic_cm_close_req(struct cnic_sock
*csk
)
2817 struct cnic_dev
*dev
= csk
->dev
;
2818 struct l4_kwq_close_req
*l4kwqe
;
2819 struct kwqe
*wqes
[1];
2821 l4kwqe
= (struct l4_kwq_close_req
*) &csk
->kwqe2
;
2822 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2823 wqes
[0] = (struct kwqe
*) l4kwqe
;
2825 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_CLOSE
;
2826 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT
;
2827 l4kwqe
->cid
= csk
->cid
;
2829 return dev
->submit_kwqes(dev
, wqes
, 1);
2832 static int cnic_cm_abort_req(struct cnic_sock
*csk
)
2834 struct cnic_dev
*dev
= csk
->dev
;
2835 struct l4_kwq_reset_req
*l4kwqe
;
2836 struct kwqe
*wqes
[1];
2838 l4kwqe
= (struct l4_kwq_reset_req
*) &csk
->kwqe2
;
2839 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2840 wqes
[0] = (struct kwqe
*) l4kwqe
;
2842 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_RESET
;
2843 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT
;
2844 l4kwqe
->cid
= csk
->cid
;
2846 return dev
->submit_kwqes(dev
, wqes
, 1);
2849 static int cnic_cm_create(struct cnic_dev
*dev
, int ulp_type
, u32 cid
,
2850 u32 l5_cid
, struct cnic_sock
**csk
, void *context
)
2852 struct cnic_local
*cp
= dev
->cnic_priv
;
2853 struct cnic_sock
*csk1
;
2855 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
2859 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
2861 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
2865 csk1
= &cp
->csk_tbl
[l5_cid
];
2866 if (atomic_read(&csk1
->ref_count
))
2869 if (test_and_set_bit(SK_F_INUSE
, &csk1
->flags
))
2874 csk1
->l5_cid
= l5_cid
;
2875 csk1
->ulp_type
= ulp_type
;
2876 csk1
->context
= context
;
2878 csk1
->ka_timeout
= DEF_KA_TIMEOUT
;
2879 csk1
->ka_interval
= DEF_KA_INTERVAL
;
2880 csk1
->ka_max_probe_count
= DEF_KA_MAX_PROBE_COUNT
;
2881 csk1
->tos
= DEF_TOS
;
2882 csk1
->ttl
= DEF_TTL
;
2883 csk1
->snd_seq_scale
= DEF_SND_SEQ_SCALE
;
2884 csk1
->rcv_buf
= DEF_RCV_BUF
;
2885 csk1
->snd_buf
= DEF_SND_BUF
;
2886 csk1
->seed
= DEF_SEED
;
2892 static void cnic_cm_cleanup(struct cnic_sock
*csk
)
2894 if (csk
->src_port
) {
2895 struct cnic_dev
*dev
= csk
->dev
;
2896 struct cnic_local
*cp
= dev
->cnic_priv
;
2898 cnic_free_id(&cp
->csk_port_tbl
, be16_to_cpu(csk
->src_port
));
2903 static void cnic_close_conn(struct cnic_sock
*csk
)
2905 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
)) {
2906 cnic_cm_upload_pg(csk
);
2907 clear_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
2909 cnic_cm_cleanup(csk
);
2912 static int cnic_cm_destroy(struct cnic_sock
*csk
)
2914 if (!cnic_in_use(csk
))
2918 clear_bit(SK_F_INUSE
, &csk
->flags
);
2919 smp_mb__after_clear_bit();
2920 while (atomic_read(&csk
->ref_count
) != 1)
2922 cnic_cm_cleanup(csk
);
2929 static inline u16
cnic_get_vlan(struct net_device
*dev
,
2930 struct net_device
**vlan_dev
)
2932 if (dev
->priv_flags
& IFF_802_1Q_VLAN
) {
2933 *vlan_dev
= vlan_dev_real_dev(dev
);
2934 return vlan_dev_vlan_id(dev
);
2940 static int cnic_get_v4_route(struct sockaddr_in
*dst_addr
,
2941 struct dst_entry
**dst
)
2943 #if defined(CONFIG_INET)
2948 memset(&fl
, 0, sizeof(fl
));
2949 fl
.nl_u
.ip4_u
.daddr
= dst_addr
->sin_addr
.s_addr
;
2951 err
= ip_route_output_key(&init_net
, &rt
, &fl
);
2956 return -ENETUNREACH
;
2960 static int cnic_get_v6_route(struct sockaddr_in6
*dst_addr
,
2961 struct dst_entry
**dst
)
2963 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
2966 memset(&fl
, 0, sizeof(fl
));
2967 ipv6_addr_copy(&fl
.fl6_dst
, &dst_addr
->sin6_addr
);
2968 if (ipv6_addr_type(&fl
.fl6_dst
) & IPV6_ADDR_LINKLOCAL
)
2969 fl
.oif
= dst_addr
->sin6_scope_id
;
2971 *dst
= ip6_route_output(&init_net
, NULL
, &fl
);
2976 return -ENETUNREACH
;
2979 static struct cnic_dev
*cnic_cm_select_dev(struct sockaddr_in
*dst_addr
,
2982 struct cnic_dev
*dev
= NULL
;
2983 struct dst_entry
*dst
;
2984 struct net_device
*netdev
= NULL
;
2985 int err
= -ENETUNREACH
;
2987 if (dst_addr
->sin_family
== AF_INET
)
2988 err
= cnic_get_v4_route(dst_addr
, &dst
);
2989 else if (dst_addr
->sin_family
== AF_INET6
) {
2990 struct sockaddr_in6
*dst_addr6
=
2991 (struct sockaddr_in6
*) dst_addr
;
2993 err
= cnic_get_v6_route(dst_addr6
, &dst
);
3003 cnic_get_vlan(dst
->dev
, &netdev
);
3005 dev
= cnic_from_netdev(netdev
);
3014 static int cnic_resolve_addr(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3016 struct cnic_dev
*dev
= csk
->dev
;
3017 struct cnic_local
*cp
= dev
->cnic_priv
;
3019 return cnic_send_nlmsg(cp
, ISCSI_KEVENT_PATH_REQ
, csk
);
3022 static int cnic_get_route(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3024 struct cnic_dev
*dev
= csk
->dev
;
3025 struct cnic_local
*cp
= dev
->cnic_priv
;
3027 struct dst_entry
*dst
= NULL
;
3028 struct net_device
*realdev
;
3032 if (saddr
->local
.v6
.sin6_family
== AF_INET6
&&
3033 saddr
->remote
.v6
.sin6_family
== AF_INET6
)
3035 else if (saddr
->local
.v4
.sin_family
== AF_INET
&&
3036 saddr
->remote
.v4
.sin_family
== AF_INET
)
3041 clear_bit(SK_F_IPV6
, &csk
->flags
);
3044 set_bit(SK_F_IPV6
, &csk
->flags
);
3045 cnic_get_v6_route(&saddr
->remote
.v6
, &dst
);
3047 memcpy(&csk
->dst_ip
[0], &saddr
->remote
.v6
.sin6_addr
,
3048 sizeof(struct in6_addr
));
3049 csk
->dst_port
= saddr
->remote
.v6
.sin6_port
;
3050 local_port
= saddr
->local
.v6
.sin6_port
;
3053 cnic_get_v4_route(&saddr
->remote
.v4
, &dst
);
3055 csk
->dst_ip
[0] = saddr
->remote
.v4
.sin_addr
.s_addr
;
3056 csk
->dst_port
= saddr
->remote
.v4
.sin_port
;
3057 local_port
= saddr
->local
.v4
.sin_port
;
3061 csk
->mtu
= dev
->netdev
->mtu
;
3062 if (dst
&& dst
->dev
) {
3063 u16 vlan
= cnic_get_vlan(dst
->dev
, &realdev
);
3064 if (realdev
== dev
->netdev
) {
3065 csk
->vlan_id
= vlan
;
3066 csk
->mtu
= dst_mtu(dst
);
3070 port_id
= be16_to_cpu(local_port
);
3071 if (port_id
>= CNIC_LOCAL_PORT_MIN
&&
3072 port_id
< CNIC_LOCAL_PORT_MAX
) {
3073 if (cnic_alloc_id(&cp
->csk_port_tbl
, port_id
))
3079 port_id
= cnic_alloc_new_id(&cp
->csk_port_tbl
);
3080 if (port_id
== -1) {
3084 local_port
= cpu_to_be16(port_id
);
3086 csk
->src_port
= local_port
;
3093 static void cnic_init_csk_state(struct cnic_sock
*csk
)
3096 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3097 clear_bit(SK_F_CLOSING
, &csk
->flags
);
3100 static int cnic_cm_connect(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3104 if (!cnic_in_use(csk
))
3107 if (test_and_set_bit(SK_F_CONNECT_START
, &csk
->flags
))
3110 cnic_init_csk_state(csk
);
3112 err
= cnic_get_route(csk
, saddr
);
3116 err
= cnic_resolve_addr(csk
, saddr
);
3121 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
3125 static int cnic_cm_abort(struct cnic_sock
*csk
)
3127 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
3128 u32 opcode
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
3130 if (!cnic_in_use(csk
))
3133 if (cnic_abort_prep(csk
))
3134 return cnic_cm_abort_req(csk
);
3136 /* Getting here means that we haven't started connect, or
3137 * connect was not successful.
3140 cp
->close_conn(csk
, opcode
);
3141 if (csk
->state
!= opcode
)
3147 static int cnic_cm_close(struct cnic_sock
*csk
)
3149 if (!cnic_in_use(csk
))
3152 if (cnic_close_prep(csk
)) {
3153 csk
->state
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
3154 return cnic_cm_close_req(csk
);
3161 static void cnic_cm_upcall(struct cnic_local
*cp
, struct cnic_sock
*csk
,
3164 struct cnic_ulp_ops
*ulp_ops
;
3165 int ulp_type
= csk
->ulp_type
;
3168 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
3170 if (opcode
== L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
)
3171 ulp_ops
->cm_connect_complete(csk
);
3172 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
3173 ulp_ops
->cm_close_complete(csk
);
3174 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
)
3175 ulp_ops
->cm_remote_abort(csk
);
3176 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_COMP
)
3177 ulp_ops
->cm_abort_complete(csk
);
3178 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
)
3179 ulp_ops
->cm_remote_close(csk
);
3184 static int cnic_cm_set_pg(struct cnic_sock
*csk
)
3186 if (cnic_offld_prep(csk
)) {
3187 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3188 cnic_cm_update_pg(csk
);
3190 cnic_cm_offload_pg(csk
);
3195 static void cnic_cm_process_offld_pg(struct cnic_dev
*dev
, struct l4_kcq
*kcqe
)
3197 struct cnic_local
*cp
= dev
->cnic_priv
;
3198 u32 l5_cid
= kcqe
->pg_host_opaque
;
3199 u8 opcode
= kcqe
->op_code
;
3200 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
3203 if (!cnic_in_use(csk
))
3206 if (opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3207 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3210 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3211 if (kcqe
->status
== L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL
) {
3212 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3213 cnic_cm_upcall(cp
, csk
,
3214 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
3218 csk
->pg_cid
= kcqe
->pg_cid
;
3219 set_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
3220 cnic_cm_conn_req(csk
);
3226 static void cnic_cm_process_kcqe(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
3228 struct cnic_local
*cp
= dev
->cnic_priv
;
3229 struct l4_kcq
*l4kcqe
= (struct l4_kcq
*) kcqe
;
3230 u8 opcode
= l4kcqe
->op_code
;
3232 struct cnic_sock
*csk
;
3234 if (opcode
== L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
||
3235 opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3236 cnic_cm_process_offld_pg(dev
, l4kcqe
);
3240 l5_cid
= l4kcqe
->conn_id
;
3242 l5_cid
= l4kcqe
->cid
;
3243 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
3246 csk
= &cp
->csk_tbl
[l5_cid
];
3249 if (!cnic_in_use(csk
)) {
3255 case L5CM_RAMROD_CMD_ID_TCP_CONNECT
:
3256 if (l4kcqe
->status
!= 0) {
3257 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3258 cnic_cm_upcall(cp
, csk
,
3259 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
3262 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
:
3263 if (l4kcqe
->status
== 0)
3264 set_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
);
3266 smp_mb__before_clear_bit();
3267 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3268 cnic_cm_upcall(cp
, csk
, opcode
);
3271 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
3272 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
3273 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
3274 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
3275 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
3276 cp
->close_conn(csk
, opcode
);
3279 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
:
3280 cnic_cm_upcall(cp
, csk
, opcode
);
3286 static void cnic_cm_indicate_kcqe(void *data
, struct kcqe
*kcqe
[], u32 num
)
3288 struct cnic_dev
*dev
= data
;
3291 for (i
= 0; i
< num
; i
++)
3292 cnic_cm_process_kcqe(dev
, kcqe
[i
]);
3295 static struct cnic_ulp_ops cm_ulp_ops
= {
3296 .indicate_kcqes
= cnic_cm_indicate_kcqe
,
3299 static void cnic_cm_free_mem(struct cnic_dev
*dev
)
3301 struct cnic_local
*cp
= dev
->cnic_priv
;
3305 cnic_free_id_tbl(&cp
->csk_port_tbl
);
3308 static int cnic_cm_alloc_mem(struct cnic_dev
*dev
)
3310 struct cnic_local
*cp
= dev
->cnic_priv
;
3312 cp
->csk_tbl
= kzalloc(sizeof(struct cnic_sock
) * MAX_CM_SK_TBL_SZ
,
3317 if (cnic_init_id_tbl(&cp
->csk_port_tbl
, CNIC_LOCAL_PORT_RANGE
,
3318 CNIC_LOCAL_PORT_MIN
)) {
3319 cnic_cm_free_mem(dev
);
3325 static int cnic_ready_to_close(struct cnic_sock
*csk
, u32 opcode
)
3327 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
3328 /* Unsolicited RESET_COMP or RESET_RECEIVED */
3329 opcode
= L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
;
3330 csk
->state
= opcode
;
3333 /* 1. If event opcode matches the expected event in csk->state
3334 * 2. If the expected event is CLOSE_COMP, we accept any event
3335 * 3. If the expected event is 0, meaning the connection was never
3336 * never established, we accept the opcode from cm_abort.
3338 if (opcode
== csk
->state
|| csk
->state
== 0 ||
3339 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
) {
3340 if (!test_and_set_bit(SK_F_CLOSING
, &csk
->flags
)) {
3341 if (csk
->state
== 0)
3342 csk
->state
= opcode
;
3349 static void cnic_close_bnx2_conn(struct cnic_sock
*csk
, u32 opcode
)
3351 struct cnic_dev
*dev
= csk
->dev
;
3352 struct cnic_local
*cp
= dev
->cnic_priv
;
3354 if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
) {
3355 cnic_cm_upcall(cp
, csk
, opcode
);
3359 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
3360 cnic_close_conn(csk
);
3361 csk
->state
= opcode
;
3362 cnic_cm_upcall(cp
, csk
, opcode
);
3365 static void cnic_cm_stop_bnx2_hw(struct cnic_dev
*dev
)
3369 static int cnic_cm_init_bnx2_hw(struct cnic_dev
*dev
)
3373 get_random_bytes(&seed
, 4);
3374 cnic_ctx_wr(dev
, 45, 0, seed
);
3378 static void cnic_close_bnx2x_conn(struct cnic_sock
*csk
, u32 opcode
)
3380 struct cnic_dev
*dev
= csk
->dev
;
3381 struct cnic_local
*cp
= dev
->cnic_priv
;
3382 struct cnic_context
*ctx
= &cp
->ctx_tbl
[csk
->l5_cid
];
3383 union l5cm_specific_data l5_data
;
3385 int close_complete
= 0;
3388 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
3389 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
3390 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
3391 if (cnic_ready_to_close(csk
, opcode
)) {
3392 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3393 cmd
= L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
;
3398 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
3399 cmd
= L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
;
3401 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
3406 memset(&l5_data
, 0, sizeof(l5_data
));
3408 cnic_submit_kwqe_16(dev
, cmd
, csk
->cid
, ISCSI_CONNECTION_TYPE
,
3410 } else if (close_complete
) {
3411 ctx
->timestamp
= jiffies
;
3412 cnic_close_conn(csk
);
3413 cnic_cm_upcall(cp
, csk
, csk
->state
);
3417 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev
*dev
)
3419 struct cnic_local
*cp
= dev
->cnic_priv
;
3425 if (!netif_running(dev
->netdev
))
3428 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
3429 struct cnic_context
*ctx
= &cp
->ctx_tbl
[i
];
3431 while (test_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
3434 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
3435 netdev_warn(dev
->netdev
, "CID %x not deleted\n",
3439 cancel_delayed_work(&cp
->delete_task
);
3440 flush_workqueue(cnic_wq
);
3442 if (atomic_read(&cp
->iscsi_conn
) != 0)
3443 netdev_warn(dev
->netdev
, "%d iSCSI connections not destroyed\n",
3444 atomic_read(&cp
->iscsi_conn
));
3447 static int cnic_cm_init_bnx2x_hw(struct cnic_dev
*dev
)
3449 struct cnic_local
*cp
= dev
->cnic_priv
;
3450 u32 pfid
= cp
->pfid
;
3451 u32 port
= CNIC_PORT(cp
);
3453 cnic_init_bnx2x_mac(dev
);
3454 cnic_bnx2x_set_tcp_timestamp(dev
, 1);
3456 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
3457 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid
), 0);
3459 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3460 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port
), 1);
3461 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3462 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port
),
3465 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3466 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid
), DEF_TTL
);
3467 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3468 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid
), DEF_TOS
);
3469 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3470 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid
), 2);
3471 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3472 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid
), DEF_SWS_TIMER
);
3474 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_TCP_MAX_CWND_OFFSET(pfid
),
3479 static void cnic_delete_task(struct work_struct
*work
)
3481 struct cnic_local
*cp
;
3482 struct cnic_dev
*dev
;
3484 int need_resched
= 0;
3486 cp
= container_of(work
, struct cnic_local
, delete_task
.work
);
3489 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
3490 struct cnic_context
*ctx
= &cp
->ctx_tbl
[i
];
3492 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
) ||
3493 !test_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
3496 if (!time_after(jiffies
, ctx
->timestamp
+ (2 * HZ
))) {
3501 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
3504 cnic_bnx2x_destroy_ramrod(dev
, i
);
3506 cnic_free_bnx2x_conn_resc(dev
, i
);
3507 if (ctx
->ulp_proto_id
== CNIC_ULP_ISCSI
)
3508 atomic_dec(&cp
->iscsi_conn
);
3510 clear_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
3514 queue_delayed_work(cnic_wq
, &cp
->delete_task
,
3515 msecs_to_jiffies(10));
3519 static int cnic_cm_open(struct cnic_dev
*dev
)
3521 struct cnic_local
*cp
= dev
->cnic_priv
;
3524 err
= cnic_cm_alloc_mem(dev
);
3528 err
= cp
->start_cm(dev
);
3533 INIT_DELAYED_WORK(&cp
->delete_task
, cnic_delete_task
);
3535 dev
->cm_create
= cnic_cm_create
;
3536 dev
->cm_destroy
= cnic_cm_destroy
;
3537 dev
->cm_connect
= cnic_cm_connect
;
3538 dev
->cm_abort
= cnic_cm_abort
;
3539 dev
->cm_close
= cnic_cm_close
;
3540 dev
->cm_select_dev
= cnic_cm_select_dev
;
3542 cp
->ulp_handle
[CNIC_ULP_L4
] = dev
;
3543 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], &cm_ulp_ops
);
3547 cnic_cm_free_mem(dev
);
3551 static int cnic_cm_shutdown(struct cnic_dev
*dev
)
3553 struct cnic_local
*cp
= dev
->cnic_priv
;
3561 for (i
= 0; i
< MAX_CM_SK_TBL_SZ
; i
++) {
3562 struct cnic_sock
*csk
= &cp
->csk_tbl
[i
];
3564 clear_bit(SK_F_INUSE
, &csk
->flags
);
3565 cnic_cm_cleanup(csk
);
3567 cnic_cm_free_mem(dev
);
3572 static void cnic_init_context(struct cnic_dev
*dev
, u32 cid
)
3577 cid_addr
= GET_CID_ADDR(cid
);
3579 for (i
= 0; i
< CTX_SIZE
; i
+= 4)
3580 cnic_ctx_wr(dev
, cid_addr
, i
, 0);
3583 static int cnic_setup_5709_context(struct cnic_dev
*dev
, int valid
)
3585 struct cnic_local
*cp
= dev
->cnic_priv
;
3587 u32 valid_bit
= valid
? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
: 0;
3589 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
3592 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
3594 u32 idx
= cp
->ctx_arr
[i
].cid
/ cp
->cids_per_blk
;
3597 memset(cp
->ctx_arr
[i
].ctx
, 0, BCM_PAGE_SIZE
);
3599 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
3600 (cp
->ctx_arr
[i
].mapping
& 0xffffffff) | valid_bit
);
3601 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
3602 (u64
) cp
->ctx_arr
[i
].mapping
>> 32);
3603 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, idx
|
3604 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
3605 for (j
= 0; j
< 10; j
++) {
3607 val
= CNIC_RD(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
3608 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
3612 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
3620 static void cnic_free_irq(struct cnic_dev
*dev
)
3622 struct cnic_local
*cp
= dev
->cnic_priv
;
3623 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3625 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3626 cp
->disable_int_sync(dev
);
3627 tasklet_kill(&cp
->cnic_irq_task
);
3628 free_irq(ethdev
->irq_arr
[0].vector
, dev
);
3632 static int cnic_request_irq(struct cnic_dev
*dev
)
3634 struct cnic_local
*cp
= dev
->cnic_priv
;
3635 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3638 err
= request_irq(ethdev
->irq_arr
[0].vector
, cnic_irq
, 0, "cnic", dev
);
3640 tasklet_disable(&cp
->cnic_irq_task
);
3645 static int cnic_init_bnx2_irq(struct cnic_dev
*dev
)
3647 struct cnic_local
*cp
= dev
->cnic_priv
;
3648 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3650 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3652 int sblk_num
= cp
->status_blk_num
;
3653 u32 base
= ((sblk_num
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
3654 BNX2_HC_SB_CONFIG_1
;
3656 CNIC_WR(dev
, base
, BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
3658 CNIC_WR(dev
, base
+ BNX2_HC_COMP_PROD_TRIP_OFF
, (2 << 16) | 8);
3659 CNIC_WR(dev
, base
+ BNX2_HC_COM_TICKS_OFF
, (64 << 16) | 220);
3660 CNIC_WR(dev
, base
+ BNX2_HC_CMD_TICKS_OFF
, (64 << 16) | 220);
3662 cp
->last_status_idx
= cp
->status_blk
.bnx2
->status_idx
;
3663 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2_msix
,
3664 (unsigned long) dev
);
3665 err
= cnic_request_irq(dev
);
3669 while (cp
->status_blk
.bnx2
->status_completion_producer_index
&&
3671 CNIC_WR(dev
, BNX2_HC_COALESCE_NOW
,
3672 1 << (11 + sblk_num
));
3677 if (cp
->status_blk
.bnx2
->status_completion_producer_index
) {
3683 struct status_block
*sblk
= cp
->status_blk
.gen
;
3684 u32 hc_cmd
= CNIC_RD(dev
, BNX2_HC_COMMAND
);
3687 while (sblk
->status_completion_producer_index
&& i
< 10) {
3688 CNIC_WR(dev
, BNX2_HC_COMMAND
,
3689 hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
3694 if (sblk
->status_completion_producer_index
)
3701 netdev_err(dev
->netdev
, "KCQ index not resetting to 0\n");
3705 static void cnic_enable_bnx2_int(struct cnic_dev
*dev
)
3707 struct cnic_local
*cp
= dev
->cnic_priv
;
3708 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3710 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
3713 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
3714 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
3717 static void cnic_disable_bnx2_int_sync(struct cnic_dev
*dev
)
3719 struct cnic_local
*cp
= dev
->cnic_priv
;
3720 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3722 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
3725 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
3726 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3727 CNIC_RD(dev
, BNX2_PCICFG_INT_ACK_CMD
);
3728 synchronize_irq(ethdev
->irq_arr
[0].vector
);
3731 static void cnic_init_bnx2_tx_ring(struct cnic_dev
*dev
)
3733 struct cnic_local
*cp
= dev
->cnic_priv
;
3734 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3735 struct cnic_uio_dev
*udev
= cp
->udev
;
3736 u32 cid_addr
, tx_cid
, sb_id
;
3737 u32 val
, offset0
, offset1
, offset2
, offset3
;
3740 dma_addr_t buf_map
, ring_map
= udev
->l2_ring_map
;
3741 struct status_block
*s_blk
= cp
->status_blk
.gen
;
3743 sb_id
= cp
->status_blk_num
;
3745 cp
->tx_cons_ptr
= &s_blk
->status_tx_quick_consumer_index2
;
3746 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3747 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
3749 tx_cid
= TX_TSS_CID
+ sb_id
- 1;
3750 CNIC_WR(dev
, BNX2_TSCH_TSS_CFG
, (sb_id
<< 24) |
3752 cp
->tx_cons_ptr
= &sblk
->status_tx_quick_consumer_index
;
3754 cp
->tx_cons
= *cp
->tx_cons_ptr
;
3756 cid_addr
= GET_CID_ADDR(tx_cid
);
3757 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
3758 u32 cid_addr2
= GET_CID_ADDR(tx_cid
+ 4) + 0x40;
3760 for (i
= 0; i
< PHY_CTX_SIZE
; i
+= 4)
3761 cnic_ctx_wr(dev
, cid_addr2
, i
, 0);
3763 offset0
= BNX2_L2CTX_TYPE_XI
;
3764 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
3765 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
3766 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
3768 cnic_init_context(dev
, tx_cid
);
3769 cnic_init_context(dev
, tx_cid
+ 1);
3771 offset0
= BNX2_L2CTX_TYPE
;
3772 offset1
= BNX2_L2CTX_CMD_TYPE
;
3773 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
3774 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
3776 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
3777 cnic_ctx_wr(dev
, cid_addr
, offset0
, val
);
3779 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
3780 cnic_ctx_wr(dev
, cid_addr
, offset1
, val
);
3782 txbd
= (struct tx_bd
*) udev
->l2_ring
;
3784 buf_map
= udev
->l2_buf_map
;
3785 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
++, txbd
++) {
3786 txbd
->tx_bd_haddr_hi
= (u64
) buf_map
>> 32;
3787 txbd
->tx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
3789 val
= (u64
) ring_map
>> 32;
3790 cnic_ctx_wr(dev
, cid_addr
, offset2
, val
);
3791 txbd
->tx_bd_haddr_hi
= val
;
3793 val
= (u64
) ring_map
& 0xffffffff;
3794 cnic_ctx_wr(dev
, cid_addr
, offset3
, val
);
3795 txbd
->tx_bd_haddr_lo
= val
;
3798 static void cnic_init_bnx2_rx_ring(struct cnic_dev
*dev
)
3800 struct cnic_local
*cp
= dev
->cnic_priv
;
3801 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3802 struct cnic_uio_dev
*udev
= cp
->udev
;
3803 u32 cid_addr
, sb_id
, val
, coal_reg
, coal_val
;
3806 struct status_block
*s_blk
= cp
->status_blk
.gen
;
3807 dma_addr_t ring_map
= udev
->l2_ring_map
;
3809 sb_id
= cp
->status_blk_num
;
3810 cnic_init_context(dev
, 2);
3811 cp
->rx_cons_ptr
= &s_blk
->status_rx_quick_consumer_index2
;
3812 coal_reg
= BNX2_HC_COMMAND
;
3813 coal_val
= CNIC_RD(dev
, coal_reg
);
3814 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3815 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
3817 cp
->rx_cons_ptr
= &sblk
->status_rx_quick_consumer_index
;
3818 coal_reg
= BNX2_HC_COALESCE_NOW
;
3819 coal_val
= 1 << (11 + sb_id
);
3822 while (!(*cp
->rx_cons_ptr
!= 0) && i
< 10) {
3823 CNIC_WR(dev
, coal_reg
, coal_val
);
3828 cp
->rx_cons
= *cp
->rx_cons_ptr
;
3830 cid_addr
= GET_CID_ADDR(2);
3831 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
|
3832 BNX2_L2CTX_CTX_TYPE_SIZE_L2
| (0x02 << 8);
3833 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
3836 val
= 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT
;
3838 val
= BNX2_L2CTX_L2_STATUSB_NUM(sb_id
);
3839 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_HOST_BDIDX
, val
);
3841 rxbd
= (struct rx_bd
*) (udev
->l2_ring
+ BCM_PAGE_SIZE
);
3842 for (i
= 0; i
< MAX_RX_DESC_CNT
; i
++, rxbd
++) {
3844 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
3846 buf_map
= udev
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
3847 rxbd
->rx_bd_len
= cp
->l2_single_buf_size
;
3848 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
3849 rxbd
->rx_bd_haddr_hi
= (u64
) buf_map
>> 32;
3850 rxbd
->rx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
3852 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) >> 32;
3853 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
3854 rxbd
->rx_bd_haddr_hi
= val
;
3856 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
3857 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
3858 rxbd
->rx_bd_haddr_lo
= val
;
3860 val
= cnic_reg_rd_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
);
3861 cnic_reg_wr_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
, val
| (1 << 2));
3864 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev
*dev
)
3866 struct kwqe
*wqes
[1], l2kwqe
;
3868 memset(&l2kwqe
, 0, sizeof(l2kwqe
));
3870 l2kwqe
.kwqe_op_flag
= (L2_LAYER_CODE
<< KWQE_FLAGS_LAYER_SHIFT
) |
3871 (L2_KWQE_OPCODE_VALUE_FLUSH
<<
3872 KWQE_OPCODE_SHIFT
) | 2;
3873 dev
->submit_kwqes(dev
, wqes
, 1);
3876 static void cnic_set_bnx2_mac(struct cnic_dev
*dev
)
3878 struct cnic_local
*cp
= dev
->cnic_priv
;
3881 val
= cp
->func
<< 2;
3883 cp
->shmem_base
= cnic_reg_rd_ind(dev
, BNX2_SHM_HDR_ADDR_0
+ val
);
3885 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
3886 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER
);
3887 dev
->mac_addr
[0] = (u8
) (val
>> 8);
3888 dev
->mac_addr
[1] = (u8
) val
;
3890 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH4
, val
);
3892 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
3893 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER
);
3894 dev
->mac_addr
[2] = (u8
) (val
>> 24);
3895 dev
->mac_addr
[3] = (u8
) (val
>> 16);
3896 dev
->mac_addr
[4] = (u8
) (val
>> 8);
3897 dev
->mac_addr
[5] = (u8
) val
;
3899 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH5
, val
);
3901 val
= 4 | BNX2_RPM_SORT_USER2_BC_EN
;
3902 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
3903 val
|= BNX2_RPM_SORT_USER2_PROM_VLAN
;
3905 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, 0x0);
3906 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
);
3907 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
| BNX2_RPM_SORT_USER2_ENA
);
3910 static int cnic_start_bnx2_hw(struct cnic_dev
*dev
)
3912 struct cnic_local
*cp
= dev
->cnic_priv
;
3913 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3914 struct status_block
*sblk
= cp
->status_blk
.gen
;
3915 u32 val
, kcq_cid_addr
, kwq_cid_addr
;
3918 cnic_set_bnx2_mac(dev
);
3920 val
= CNIC_RD(dev
, BNX2_MQ_CONFIG
);
3921 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
3922 if (BCM_PAGE_BITS
> 12)
3923 val
|= (12 - 8) << 4;
3925 val
|= (BCM_PAGE_BITS
- 8) << 4;
3927 CNIC_WR(dev
, BNX2_MQ_CONFIG
, val
);
3929 CNIC_WR(dev
, BNX2_HC_COMP_PROD_TRIP
, (2 << 16) | 8);
3930 CNIC_WR(dev
, BNX2_HC_COM_TICKS
, (64 << 16) | 220);
3931 CNIC_WR(dev
, BNX2_HC_CMD_TICKS
, (64 << 16) | 220);
3933 err
= cnic_setup_5709_context(dev
, 1);
3937 cnic_init_context(dev
, KWQ_CID
);
3938 cnic_init_context(dev
, KCQ_CID
);
3940 kwq_cid_addr
= GET_CID_ADDR(KWQ_CID
);
3941 cp
->kwq_io_addr
= MB_GET_CID_ADDR(KWQ_CID
) + L5_KRNLQ_HOST_QIDX
;
3943 cp
->max_kwq_idx
= MAX_KWQ_IDX
;
3944 cp
->kwq_prod_idx
= 0;
3945 cp
->kwq_con_idx
= 0;
3946 set_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
3948 if (CHIP_NUM(cp
) == CHIP_NUM_5706
|| CHIP_NUM(cp
) == CHIP_NUM_5708
)
3949 cp
->kwq_con_idx_ptr
= &sblk
->status_rx_quick_consumer_index15
;
3951 cp
->kwq_con_idx_ptr
= &sblk
->status_cmd_consumer_index
;
3953 /* Initialize the kernel work queue context. */
3954 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
3955 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
3956 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_TYPE
, val
);
3958 val
= (BCM_PAGE_SIZE
/ sizeof(struct kwqe
) - 1) << 16;
3959 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
3961 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kwqe
)) << 16) | KWQ_PAGE_CNT
;
3962 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
3964 val
= (u32
) ((u64
) cp
->kwq_info
.pgtbl_map
>> 32);
3965 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
3967 val
= (u32
) cp
->kwq_info
.pgtbl_map
;
3968 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
3970 kcq_cid_addr
= GET_CID_ADDR(KCQ_CID
);
3971 cp
->kcq1
.io_addr
= MB_GET_CID_ADDR(KCQ_CID
) + L5_KRNLQ_HOST_QIDX
;
3973 cp
->kcq1
.sw_prod_idx
= 0;
3974 cp
->kcq1
.hw_prod_idx_ptr
=
3975 (u16
*) &sblk
->status_completion_producer_index
;
3977 cp
->kcq1
.status_idx_ptr
= (u16
*) &sblk
->status_idx
;
3979 /* Initialize the kernel complete queue context. */
3980 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
3981 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
3982 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_TYPE
, val
);
3984 val
= (BCM_PAGE_SIZE
/ sizeof(struct kcqe
) - 1) << 16;
3985 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
3987 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kcqe
)) << 16) | KCQ_PAGE_CNT
;
3988 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
3990 val
= (u32
) ((u64
) cp
->kcq1
.dma
.pgtbl_map
>> 32);
3991 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
3993 val
= (u32
) cp
->kcq1
.dma
.pgtbl_map
;
3994 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
3997 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3998 struct status_block_msix
*msblk
= cp
->status_blk
.bnx2
;
3999 u32 sb_id
= cp
->status_blk_num
;
4000 u32 sb
= BNX2_L2CTX_L5_STATUSB_NUM(sb_id
);
4002 cp
->kcq1
.hw_prod_idx_ptr
=
4003 (u16
*) &msblk
->status_completion_producer_index
;
4004 cp
->kcq1
.status_idx_ptr
= (u16
*) &msblk
->status_idx
;
4005 cp
->kwq_con_idx_ptr
= (u16
*) &msblk
->status_cmd_consumer_index
;
4006 cp
->int_num
= sb_id
<< BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT
;
4007 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
4008 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
4011 /* Enable Commnad Scheduler notification when we write to the
4012 * host producer index of the kernel contexts. */
4013 CNIC_WR(dev
, BNX2_MQ_KNL_CMD_MASK1
, 2);
4015 /* Enable Command Scheduler notification when we write to either
4016 * the Send Queue or Receive Queue producer indexes of the kernel
4017 * bypass contexts. */
4018 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_CMD_MASK1
, 7);
4019 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_WRITE_MASK1
, 7);
4021 /* Notify COM when the driver post an application buffer. */
4022 CNIC_WR(dev
, BNX2_MQ_KNL_RX_V2P_MASK2
, 0x2000);
4024 /* Set the CP and COM doorbells. These two processors polls the
4025 * doorbell for a non zero value before running. This must be done
4026 * after setting up the kernel queue contexts. */
4027 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 1);
4028 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 1);
4030 cnic_init_bnx2_tx_ring(dev
);
4031 cnic_init_bnx2_rx_ring(dev
);
4033 err
= cnic_init_bnx2_irq(dev
);
4035 netdev_err(dev
->netdev
, "cnic_init_irq failed\n");
4036 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
4037 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
4044 static void cnic_setup_bnx2x_context(struct cnic_dev
*dev
)
4046 struct cnic_local
*cp
= dev
->cnic_priv
;
4047 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4048 u32 start_offset
= ethdev
->ctx_tbl_offset
;
4051 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
4052 struct cnic_ctx
*ctx
= &cp
->ctx_arr
[i
];
4053 dma_addr_t map
= ctx
->mapping
;
4055 if (cp
->ctx_align
) {
4056 unsigned long mask
= cp
->ctx_align
- 1;
4058 map
= (map
+ mask
) & ~mask
;
4061 cnic_ctx_tbl_wr(dev
, start_offset
+ i
, map
);
4065 static int cnic_init_bnx2x_irq(struct cnic_dev
*dev
)
4067 struct cnic_local
*cp
= dev
->cnic_priv
;
4068 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4071 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2x_bh
,
4072 (unsigned long) dev
);
4073 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
4074 err
= cnic_request_irq(dev
);
4079 static inline void cnic_storm_memset_hc_disable(struct cnic_dev
*dev
,
4080 u16 sb_id
, u8 sb_index
,
4084 u32 addr
= BAR_CSTRORM_INTMEM
+
4085 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id
) +
4086 offsetof(struct hc_status_block_data_e1x
, index_data
) +
4087 sizeof(struct hc_index_data
)*sb_index
+
4088 offsetof(struct hc_index_data
, flags
);
4089 u16 flags
= CNIC_RD16(dev
, addr
);
4091 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
4092 flags
|= (((~disable
) << HC_INDEX_DATA_HC_ENABLED_SHIFT
) &
4093 HC_INDEX_DATA_HC_ENABLED
);
4094 CNIC_WR16(dev
, addr
, flags
);
4097 static void cnic_enable_bnx2x_int(struct cnic_dev
*dev
)
4099 struct cnic_local
*cp
= dev
->cnic_priv
;
4100 u8 sb_id
= cp
->status_blk_num
;
4102 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4103 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id
) +
4104 offsetof(struct hc_status_block_data_e1x
, index_data
) +
4105 sizeof(struct hc_index_data
)*HC_INDEX_ISCSI_EQ_CONS
+
4106 offsetof(struct hc_index_data
, timeout
), 64 / 12);
4107 cnic_storm_memset_hc_disable(dev
, sb_id
, HC_INDEX_ISCSI_EQ_CONS
, 0);
4110 static void cnic_disable_bnx2x_int_sync(struct cnic_dev
*dev
)
4114 static void cnic_init_bnx2x_tx_ring(struct cnic_dev
*dev
,
4115 struct client_init_ramrod_data
*data
)
4117 struct cnic_local
*cp
= dev
->cnic_priv
;
4118 struct cnic_uio_dev
*udev
= cp
->udev
;
4119 union eth_tx_bd_types
*txbd
= (union eth_tx_bd_types
*) udev
->l2_ring
;
4120 dma_addr_t buf_map
, ring_map
= udev
->l2_ring_map
;
4121 struct host_sp_status_block
*sb
= cp
->bnx2x_def_status_blk
;
4122 int port
= CNIC_PORT(cp
);
4124 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4127 memset(txbd
, 0, BCM_PAGE_SIZE
);
4129 buf_map
= udev
->l2_buf_map
;
4130 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
+= 3, txbd
+= 3) {
4131 struct eth_tx_start_bd
*start_bd
= &txbd
->start_bd
;
4132 struct eth_tx_bd
*reg_bd
= &((txbd
+ 2)->reg_bd
);
4134 start_bd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
4135 start_bd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
4136 reg_bd
->addr_hi
= start_bd
->addr_hi
;
4137 reg_bd
->addr_lo
= start_bd
->addr_lo
+ 0x10;
4138 start_bd
->nbytes
= cpu_to_le16(0x10);
4139 start_bd
->nbd
= cpu_to_le16(3);
4140 start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
4141 start_bd
->general_data
= (UNICAST_ADDRESS
<<
4142 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT
);
4143 start_bd
->general_data
|= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
);
4147 val
= (u64
) ring_map
>> 32;
4148 txbd
->next_bd
.addr_hi
= cpu_to_le32(val
);
4150 data
->tx
.tx_bd_page_base
.hi
= cpu_to_le32(val
);
4152 val
= (u64
) ring_map
& 0xffffffff;
4153 txbd
->next_bd
.addr_lo
= cpu_to_le32(val
);
4155 data
->tx
.tx_bd_page_base
.lo
= cpu_to_le32(val
);
4157 /* Other ramrod params */
4158 data
->tx
.tx_sb_index_number
= HC_SP_INDEX_ETH_ISCSI_CQ_CONS
;
4159 data
->tx
.tx_status_block_id
= BNX2X_DEF_SB_ID
;
4161 /* reset xstorm per client statistics */
4162 if (cli
< MAX_STAT_COUNTER_ID
) {
4163 val
= BAR_XSTRORM_INTMEM
+
4164 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
4165 for (i
= 0; i
< sizeof(struct xstorm_per_client_stats
) / 4; i
++)
4166 CNIC_WR(dev
, val
+ i
* 4, 0);
4170 &sb
->sp_sb
.index_values
[HC_SP_INDEX_ETH_ISCSI_CQ_CONS
];
4173 static void cnic_init_bnx2x_rx_ring(struct cnic_dev
*dev
,
4174 struct client_init_ramrod_data
*data
)
4176 struct cnic_local
*cp
= dev
->cnic_priv
;
4177 struct cnic_uio_dev
*udev
= cp
->udev
;
4178 struct eth_rx_bd
*rxbd
= (struct eth_rx_bd
*) (udev
->l2_ring
+
4180 struct eth_rx_cqe_next_page
*rxcqe
= (struct eth_rx_cqe_next_page
*)
4181 (udev
->l2_ring
+ (2 * BCM_PAGE_SIZE
));
4182 struct host_sp_status_block
*sb
= cp
->bnx2x_def_status_blk
;
4184 int port
= CNIC_PORT(cp
);
4185 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4186 int cl_qzone_id
= BNX2X_CL_QZONE_ID(cp
, cli
);
4188 dma_addr_t ring_map
= udev
->l2_ring_map
;
4191 data
->general
.client_id
= cli
;
4192 data
->general
.statistics_en_flg
= 1;
4193 data
->general
.statistics_counter_id
= cli
;
4194 data
->general
.activate_flg
= 1;
4195 data
->general
.sp_client_id
= cli
;
4197 for (i
= 0; i
< BNX2X_MAX_RX_DESC_CNT
; i
++, rxbd
++) {
4199 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
4201 buf_map
= udev
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
4202 rxbd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
4203 rxbd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
4206 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) >> 32;
4207 rxbd
->addr_hi
= cpu_to_le32(val
);
4208 data
->rx
.bd_page_base
.hi
= cpu_to_le32(val
);
4210 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
4211 rxbd
->addr_lo
= cpu_to_le32(val
);
4212 data
->rx
.bd_page_base
.lo
= cpu_to_le32(val
);
4214 rxcqe
+= BNX2X_MAX_RCQ_DESC_CNT
;
4215 val
= (u64
) (ring_map
+ (2 * BCM_PAGE_SIZE
)) >> 32;
4216 rxcqe
->addr_hi
= cpu_to_le32(val
);
4217 data
->rx
.cqe_page_base
.hi
= cpu_to_le32(val
);
4219 val
= (u64
) (ring_map
+ (2 * BCM_PAGE_SIZE
)) & 0xffffffff;
4220 rxcqe
->addr_lo
= cpu_to_le32(val
);
4221 data
->rx
.cqe_page_base
.lo
= cpu_to_le32(val
);
4223 /* Other ramrod params */
4224 data
->rx
.client_qzone_id
= cl_qzone_id
;
4225 data
->rx
.rx_sb_index_number
= HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS
;
4226 data
->rx
.status_block_id
= BNX2X_DEF_SB_ID
;
4228 data
->rx
.cache_line_alignment_log_size
= L1_CACHE_SHIFT
;
4229 data
->rx
.bd_buff_size
= cpu_to_le16(cp
->l2_single_buf_size
);
4231 data
->rx
.mtu
= cpu_to_le16(cp
->l2_single_buf_size
- 14);
4232 data
->rx
.outer_vlan_removal_enable_flg
= 1;
4234 /* reset tstorm and ustorm per client statistics */
4235 if (cli
< MAX_STAT_COUNTER_ID
) {
4236 val
= BAR_TSTRORM_INTMEM
+
4237 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
4238 for (i
= 0; i
< sizeof(struct tstorm_per_client_stats
) / 4; i
++)
4239 CNIC_WR(dev
, val
+ i
* 4, 0);
4241 val
= BAR_USTRORM_INTMEM
+
4242 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
4243 for (i
= 0; i
< sizeof(struct ustorm_per_client_stats
) / 4; i
++)
4244 CNIC_WR(dev
, val
+ i
* 4, 0);
4248 &sb
->sp_sb
.index_values
[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS
];
4249 cp
->rx_cons
= *cp
->rx_cons_ptr
;
4252 static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev
*dev
, u32 upper_addr
,
4258 val
= CNIC_RD(dev
, upper_addr
);
4260 mac
[0] = (u8
) (val
>> 8);
4263 val
= CNIC_RD(dev
, lower_addr
);
4265 mac
[2] = (u8
) (val
>> 24);
4266 mac
[3] = (u8
) (val
>> 16);
4267 mac
[4] = (u8
) (val
>> 8);
4270 if (is_valid_ether_addr(mac
)) {
4271 memcpy(dev
->mac_addr
, mac
, 6);
4278 static void cnic_get_bnx2x_iscsi_info(struct cnic_dev
*dev
)
4280 struct cnic_local
*cp
= dev
->cnic_priv
;
4281 u32 base
, base2
, addr
, addr1
, val
;
4282 int port
= CNIC_PORT(cp
);
4284 dev
->max_iscsi_conn
= 0;
4285 base
= CNIC_RD(dev
, MISC_REG_SHARED_MEM_ADDR
);
4289 base2
= CNIC_RD(dev
, (CNIC_PATH(cp
) ? MISC_REG_GENERIC_CR_1
:
4290 MISC_REG_GENERIC_CR_0
));
4291 addr
= BNX2X_SHMEM_ADDR(base
,
4292 dev_info
.port_hw_config
[port
].iscsi_mac_upper
);
4294 addr1
= BNX2X_SHMEM_ADDR(base
,
4295 dev_info
.port_hw_config
[port
].iscsi_mac_lower
);
4297 cnic_read_bnx2x_iscsi_mac(dev
, addr
, addr1
);
4299 addr
= BNX2X_SHMEM_ADDR(base
, validity_map
[port
]);
4300 val
= CNIC_RD(dev
, addr
);
4302 if (!(val
& SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT
)) {
4305 addr
= BNX2X_SHMEM_ADDR(base
,
4306 drv_lic_key
[port
].max_iscsi_init_conn
);
4307 val16
= CNIC_RD16(dev
, addr
);
4311 dev
->max_iscsi_conn
= val16
;
4313 if (BNX2X_CHIP_IS_E1H(cp
->chip_id
) || BNX2X_CHIP_IS_E2(cp
->chip_id
)) {
4314 int func
= CNIC_FUNC(cp
);
4317 if (BNX2X_SHMEM2_HAS(base2
, mf_cfg_addr
))
4318 mf_cfg_addr
= CNIC_RD(dev
, BNX2X_SHMEM2_ADDR(base2
,
4321 mf_cfg_addr
= base
+ BNX2X_SHMEM_MF_BLK_OFFSET
;
4323 if (BNX2X_CHIP_IS_E2(cp
->chip_id
)) {
4324 /* Must determine if the MF is SD vs SI mode */
4325 addr
= BNX2X_SHMEM_ADDR(base
,
4326 dev_info
.shared_feature_config
.config
);
4327 val
= CNIC_RD(dev
, addr
);
4328 if ((val
& SHARED_FEAT_CFG_FORCE_SF_MODE_MASK
) ==
4329 SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT
) {
4332 /* MULTI_FUNCTION_SI mode */
4333 addr
= BNX2X_MF_CFG_ADDR(mf_cfg_addr
,
4334 func_ext_config
[func
].func_cfg
);
4335 val
= CNIC_RD(dev
, addr
);
4336 if (!(val
& MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD
))
4337 dev
->max_iscsi_conn
= 0;
4339 addr
= BNX2X_MF_CFG_ADDR(mf_cfg_addr
,
4340 func_ext_config
[func
].
4341 iscsi_mac_addr_upper
);
4342 addr1
= BNX2X_MF_CFG_ADDR(mf_cfg_addr
,
4343 func_ext_config
[func
].
4344 iscsi_mac_addr_lower
);
4345 rc
= cnic_read_bnx2x_iscsi_mac(dev
, addr
,
4348 dev
->max_iscsi_conn
= 0;
4354 addr
= BNX2X_MF_CFG_ADDR(mf_cfg_addr
,
4355 func_mf_config
[func
].e1hov_tag
);
4357 val
= CNIC_RD(dev
, addr
);
4358 val
&= FUNC_MF_CFG_E1HOV_TAG_MASK
;
4359 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
4360 addr
= BNX2X_MF_CFG_ADDR(mf_cfg_addr
,
4361 func_mf_config
[func
].config
);
4362 val
= CNIC_RD(dev
, addr
);
4363 val
&= FUNC_MF_CFG_PROTOCOL_MASK
;
4364 if (val
!= FUNC_MF_CFG_PROTOCOL_ISCSI
)
4365 dev
->max_iscsi_conn
= 0;
4368 if (!is_valid_ether_addr(dev
->mac_addr
))
4369 dev
->max_iscsi_conn
= 0;
4372 static int cnic_start_bnx2x_hw(struct cnic_dev
*dev
)
4374 struct cnic_local
*cp
= dev
->cnic_priv
;
4375 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4376 int func
= CNIC_FUNC(cp
), ret
, i
;
4379 if (BNX2X_CHIP_IS_E2(cp
->chip_id
)) {
4380 u32 val
= CNIC_RD(dev
, MISC_REG_PORT4MODE_EN_OVWR
);
4383 val
= CNIC_RD(dev
, MISC_REG_PORT4MODE_EN
);
4385 val
= (val
>> 1) & 1;
4388 cp
->pfid
= func
>> 1;
4390 cp
->pfid
= func
& 0x6;
4396 ret
= cnic_init_id_tbl(&cp
->cid_tbl
, MAX_ISCSI_TBL_SZ
,
4397 cp
->iscsi_start_cid
);
4402 cp
->bnx2x_igu_sb_id
= ethdev
->irq_arr
[0].status_blk_num2
;
4404 cp
->kcq1
.io_addr
= BAR_CSTRORM_INTMEM
+
4405 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid
, 0);
4406 cp
->kcq1
.sw_prod_idx
= 0;
4408 if (BNX2X_CHIP_IS_E2(cp
->chip_id
)) {
4409 struct host_hc_status_block_e2
*sb
= cp
->status_blk
.gen
;
4411 cp
->kcq1
.hw_prod_idx_ptr
=
4412 &sb
->sb
.index_values
[HC_INDEX_ISCSI_EQ_CONS
];
4413 cp
->kcq1
.status_idx_ptr
=
4414 &sb
->sb
.running_index
[SM_RX_ID
];
4416 struct host_hc_status_block_e1x
*sb
= cp
->status_blk
.gen
;
4418 cp
->kcq1
.hw_prod_idx_ptr
=
4419 &sb
->sb
.index_values
[HC_INDEX_ISCSI_EQ_CONS
];
4420 cp
->kcq1
.status_idx_ptr
=
4421 &sb
->sb
.running_index
[SM_RX_ID
];
4424 cnic_get_bnx2x_iscsi_info(dev
);
4427 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, MAX_KCQ_IDX
);
4428 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4429 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid
, 0), 0);
4430 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4431 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid
, 0),
4432 cp
->kcq1
.dma
.pg_map_arr
[1] & 0xffffffff);
4433 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4434 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid
, 0) + 4,
4435 (u64
) cp
->kcq1
.dma
.pg_map_arr
[1] >> 32);
4436 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4437 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid
, 0),
4438 cp
->kcq1
.dma
.pg_map_arr
[0] & 0xffffffff);
4439 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4440 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid
, 0) + 4,
4441 (u64
) cp
->kcq1
.dma
.pg_map_arr
[0] >> 32);
4442 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4443 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid
, 0), 1);
4444 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
4445 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid
, 0), cp
->status_blk_num
);
4446 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4447 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid
, 0),
4448 HC_INDEX_ISCSI_EQ_CONS
);
4450 for (i
= 0; i
< cp
->conn_buf_info
.num_pages
; i
++) {
4451 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4452 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid
, i
),
4453 cp
->conn_buf_info
.pgtbl
[2 * i
]);
4454 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4455 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid
, i
) + 4,
4456 cp
->conn_buf_info
.pgtbl
[(2 * i
) + 1]);
4459 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
4460 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid
),
4461 cp
->gbl_buf_info
.pg_map_arr
[0] & 0xffffffff);
4462 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
4463 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid
) + 4,
4464 (u64
) cp
->gbl_buf_info
.pg_map_arr
[0] >> 32);
4466 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4467 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid
), DEF_RCV_BUF
);
4469 cnic_setup_bnx2x_context(dev
);
4471 ret
= cnic_init_bnx2x_irq(dev
);
4478 static void cnic_init_rings(struct cnic_dev
*dev
)
4480 struct cnic_local
*cp
= dev
->cnic_priv
;
4481 struct cnic_uio_dev
*udev
= cp
->udev
;
4483 if (test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
4486 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
4487 cnic_init_bnx2_tx_ring(dev
);
4488 cnic_init_bnx2_rx_ring(dev
);
4489 set_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
4490 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
4491 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4492 u32 cid
= cp
->ethdev
->iscsi_l2_cid
;
4493 u32 cl_qzone_id
, type
;
4494 struct client_init_ramrod_data
*data
;
4495 union l5cm_specific_data l5_data
;
4496 struct ustorm_eth_rx_producers rx_prods
= {0};
4499 rx_prods
.bd_prod
= 0;
4500 rx_prods
.cqe_prod
= BNX2X_MAX_RCQ_DESC_CNT
;
4503 cl_qzone_id
= BNX2X_CL_QZONE_ID(cp
, cli
);
4505 off
= BAR_USTRORM_INTMEM
+
4506 (BNX2X_CHIP_IS_E2(cp
->chip_id
) ?
4507 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id
) :
4508 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp
), cli
));
4510 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
) / 4; i
++)
4511 CNIC_WR(dev
, off
+ i
* 4, ((u32
*) &rx_prods
)[i
]);
4513 set_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
4515 data
= udev
->l2_buf
;
4517 memset(data
, 0, sizeof(*data
));
4519 cnic_init_bnx2x_tx_ring(dev
, data
);
4520 cnic_init_bnx2x_rx_ring(dev
, data
);
4522 l5_data
.phy_address
.lo
= udev
->l2_buf_map
& 0xffffffff;
4523 l5_data
.phy_address
.hi
= (u64
) udev
->l2_buf_map
>> 32;
4525 type
= (ETH_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
4526 & SPE_HDR_CONN_TYPE
;
4527 type
|= ((cp
->pfid
<< SPE_HDR_FUNCTION_ID_SHIFT
) &
4528 SPE_HDR_FUNCTION_ID
);
4530 set_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
4532 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
,
4533 cid
, type
, &l5_data
);
4536 while (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
) &&
4540 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
4541 netdev_err(dev
->netdev
,
4542 "iSCSI CLIENT_SETUP did not complete\n");
4543 cnic_spq_completion(dev
, DRV_CTL_RET_L2_SPQ_CREDIT_CMD
, 1);
4544 cnic_ring_ctl(dev
, cid
, cli
, 1);
4548 static void cnic_shutdown_rings(struct cnic_dev
*dev
)
4550 struct cnic_local
*cp
= dev
->cnic_priv
;
4552 if (!test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
4555 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
4556 cnic_shutdown_bnx2_rx_ring(dev
);
4557 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
4558 struct cnic_local
*cp
= dev
->cnic_priv
;
4559 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4560 u32 cid
= cp
->ethdev
->iscsi_l2_cid
;
4561 union l5cm_specific_data l5_data
;
4565 cnic_ring_ctl(dev
, cid
, cli
, 0);
4567 set_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
4569 l5_data
.phy_address
.lo
= cli
;
4570 l5_data
.phy_address
.hi
= 0;
4571 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_HALT
,
4572 cid
, ETH_CONNECTION_TYPE
, &l5_data
);
4574 while (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
) &&
4578 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
4579 netdev_err(dev
->netdev
,
4580 "iSCSI CLIENT_HALT did not complete\n");
4581 cnic_spq_completion(dev
, DRV_CTL_RET_L2_SPQ_CREDIT_CMD
, 1);
4583 memset(&l5_data
, 0, sizeof(l5_data
));
4584 type
= (NONE_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
4585 & SPE_HDR_CONN_TYPE
;
4586 type
|= ((cp
->pfid
<< SPE_HDR_FUNCTION_ID_SHIFT
) &
4587 SPE_HDR_FUNCTION_ID
);
4588 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
4589 cid
, type
, &l5_data
);
4592 clear_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
4595 static int cnic_register_netdev(struct cnic_dev
*dev
)
4597 struct cnic_local
*cp
= dev
->cnic_priv
;
4598 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4604 if (ethdev
->drv_state
& CNIC_DRV_STATE_REGD
)
4607 err
= ethdev
->drv_register_cnic(dev
->netdev
, cp
->cnic_ops
, dev
);
4609 netdev_err(dev
->netdev
, "register_cnic failed\n");
4614 static void cnic_unregister_netdev(struct cnic_dev
*dev
)
4616 struct cnic_local
*cp
= dev
->cnic_priv
;
4617 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4622 ethdev
->drv_unregister_cnic(dev
->netdev
);
4625 static int cnic_start_hw(struct cnic_dev
*dev
)
4627 struct cnic_local
*cp
= dev
->cnic_priv
;
4628 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4631 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
4634 dev
->regview
= ethdev
->io_base
;
4635 pci_dev_get(dev
->pcidev
);
4636 cp
->func
= PCI_FUNC(dev
->pcidev
->devfn
);
4637 cp
->status_blk
.gen
= ethdev
->irq_arr
[0].status_blk
;
4638 cp
->status_blk_num
= ethdev
->irq_arr
[0].status_blk_num
;
4640 err
= cp
->alloc_resc(dev
);
4642 netdev_err(dev
->netdev
, "allocate resource failure\n");
4646 err
= cp
->start_hw(dev
);
4650 err
= cnic_cm_open(dev
);
4654 set_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
4656 cp
->enable_int(dev
);
4662 pci_dev_put(dev
->pcidev
);
4666 static void cnic_stop_bnx2_hw(struct cnic_dev
*dev
)
4668 cnic_disable_bnx2_int_sync(dev
);
4670 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
4671 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
4673 cnic_init_context(dev
, KWQ_CID
);
4674 cnic_init_context(dev
, KCQ_CID
);
4676 cnic_setup_5709_context(dev
, 0);
4679 cnic_free_resc(dev
);
4683 static void cnic_stop_bnx2x_hw(struct cnic_dev
*dev
)
4685 struct cnic_local
*cp
= dev
->cnic_priv
;
4688 *cp
->kcq1
.hw_prod_idx_ptr
= 0;
4689 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4690 CSTORM_ISCSI_EQ_CONS_OFFSET(cp
->pfid
, 0), 0);
4691 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, 0);
4692 cnic_free_resc(dev
);
4695 static void cnic_stop_hw(struct cnic_dev
*dev
)
4697 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
4698 struct cnic_local
*cp
= dev
->cnic_priv
;
4701 /* Need to wait for the ring shutdown event to complete
4702 * before clearing the CNIC_UP flag.
4704 while (cp
->udev
->uio_dev
!= -1 && i
< 15) {
4708 cnic_shutdown_rings(dev
);
4709 clear_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
4710 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], NULL
);
4712 cnic_cm_shutdown(dev
);
4714 pci_dev_put(dev
->pcidev
);
4718 static void cnic_free_dev(struct cnic_dev
*dev
)
4722 while ((atomic_read(&dev
->ref_count
) != 0) && i
< 10) {
4726 if (atomic_read(&dev
->ref_count
) != 0)
4727 netdev_err(dev
->netdev
, "Failed waiting for ref count to go to zero\n");
4729 netdev_info(dev
->netdev
, "Removed CNIC device\n");
4730 dev_put(dev
->netdev
);
4734 static struct cnic_dev
*cnic_alloc_dev(struct net_device
*dev
,
4735 struct pci_dev
*pdev
)
4737 struct cnic_dev
*cdev
;
4738 struct cnic_local
*cp
;
4741 alloc_size
= sizeof(struct cnic_dev
) + sizeof(struct cnic_local
);
4743 cdev
= kzalloc(alloc_size
, GFP_KERNEL
);
4745 netdev_err(dev
, "allocate dev struct failure\n");
4750 cdev
->cnic_priv
= (char *)cdev
+ sizeof(struct cnic_dev
);
4751 cdev
->register_device
= cnic_register_device
;
4752 cdev
->unregister_device
= cnic_unregister_device
;
4753 cdev
->iscsi_nl_msg_recv
= cnic_iscsi_nl_msg_recv
;
4755 cp
= cdev
->cnic_priv
;
4757 cp
->l2_single_buf_size
= 0x400;
4758 cp
->l2_rx_ring_size
= 3;
4760 spin_lock_init(&cp
->cnic_ulp_lock
);
4762 netdev_info(dev
, "Added CNIC device\n");
4767 static struct cnic_dev
*init_bnx2_cnic(struct net_device
*dev
)
4769 struct pci_dev
*pdev
;
4770 struct cnic_dev
*cdev
;
4771 struct cnic_local
*cp
;
4772 struct cnic_eth_dev
*ethdev
= NULL
;
4773 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
4775 probe
= symbol_get(bnx2_cnic_probe
);
4777 ethdev
= (*probe
)(dev
);
4778 symbol_put(bnx2_cnic_probe
);
4783 pdev
= ethdev
->pdev
;
4789 if (pdev
->device
== PCI_DEVICE_ID_NX2_5709
||
4790 pdev
->device
== PCI_DEVICE_ID_NX2_5709S
) {
4793 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev
);
4801 cdev
= cnic_alloc_dev(dev
, pdev
);
4805 set_bit(CNIC_F_BNX2_CLASS
, &cdev
->flags
);
4806 cdev
->submit_kwqes
= cnic_submit_bnx2_kwqes
;
4808 cp
= cdev
->cnic_priv
;
4809 cp
->ethdev
= ethdev
;
4810 cdev
->pcidev
= pdev
;
4811 cp
->chip_id
= ethdev
->chip_id
;
4813 cp
->cnic_ops
= &cnic_bnx2_ops
;
4814 cp
->start_hw
= cnic_start_bnx2_hw
;
4815 cp
->stop_hw
= cnic_stop_bnx2_hw
;
4816 cp
->setup_pgtbl
= cnic_setup_page_tbl
;
4817 cp
->alloc_resc
= cnic_alloc_bnx2_resc
;
4818 cp
->free_resc
= cnic_free_resc
;
4819 cp
->start_cm
= cnic_cm_init_bnx2_hw
;
4820 cp
->stop_cm
= cnic_cm_stop_bnx2_hw
;
4821 cp
->enable_int
= cnic_enable_bnx2_int
;
4822 cp
->disable_int_sync
= cnic_disable_bnx2_int_sync
;
4823 cp
->close_conn
= cnic_close_bnx2_conn
;
4824 cp
->next_idx
= cnic_bnx2_next_idx
;
4825 cp
->hw_idx
= cnic_bnx2_hw_idx
;
4833 static struct cnic_dev
*init_bnx2x_cnic(struct net_device
*dev
)
4835 struct pci_dev
*pdev
;
4836 struct cnic_dev
*cdev
;
4837 struct cnic_local
*cp
;
4838 struct cnic_eth_dev
*ethdev
= NULL
;
4839 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
4841 probe
= symbol_get(bnx2x_cnic_probe
);
4843 ethdev
= (*probe
)(dev
);
4844 symbol_put(bnx2x_cnic_probe
);
4849 pdev
= ethdev
->pdev
;
4854 cdev
= cnic_alloc_dev(dev
, pdev
);
4860 set_bit(CNIC_F_BNX2X_CLASS
, &cdev
->flags
);
4861 cdev
->submit_kwqes
= cnic_submit_bnx2x_kwqes
;
4863 cp
= cdev
->cnic_priv
;
4864 cp
->ethdev
= ethdev
;
4865 cdev
->pcidev
= pdev
;
4866 cp
->chip_id
= ethdev
->chip_id
;
4868 cp
->cnic_ops
= &cnic_bnx2x_ops
;
4869 cp
->start_hw
= cnic_start_bnx2x_hw
;
4870 cp
->stop_hw
= cnic_stop_bnx2x_hw
;
4871 cp
->setup_pgtbl
= cnic_setup_page_tbl_le
;
4872 cp
->alloc_resc
= cnic_alloc_bnx2x_resc
;
4873 cp
->free_resc
= cnic_free_resc
;
4874 cp
->start_cm
= cnic_cm_init_bnx2x_hw
;
4875 cp
->stop_cm
= cnic_cm_stop_bnx2x_hw
;
4876 cp
->enable_int
= cnic_enable_bnx2x_int
;
4877 cp
->disable_int_sync
= cnic_disable_bnx2x_int_sync
;
4878 if (BNX2X_CHIP_IS_E2(cp
->chip_id
))
4879 cp
->ack_int
= cnic_ack_bnx2x_e2_msix
;
4881 cp
->ack_int
= cnic_ack_bnx2x_msix
;
4882 cp
->close_conn
= cnic_close_bnx2x_conn
;
4883 cp
->next_idx
= cnic_bnx2x_next_idx
;
4884 cp
->hw_idx
= cnic_bnx2x_hw_idx
;
4888 static struct cnic_dev
*is_cnic_dev(struct net_device
*dev
)
4890 struct ethtool_drvinfo drvinfo
;
4891 struct cnic_dev
*cdev
= NULL
;
4893 if (dev
->ethtool_ops
&& dev
->ethtool_ops
->get_drvinfo
) {
4894 memset(&drvinfo
, 0, sizeof(drvinfo
));
4895 dev
->ethtool_ops
->get_drvinfo(dev
, &drvinfo
);
4897 if (!strcmp(drvinfo
.driver
, "bnx2"))
4898 cdev
= init_bnx2_cnic(dev
);
4899 if (!strcmp(drvinfo
.driver
, "bnx2x"))
4900 cdev
= init_bnx2x_cnic(dev
);
4902 write_lock(&cnic_dev_lock
);
4903 list_add(&cdev
->list
, &cnic_dev_list
);
4904 write_unlock(&cnic_dev_lock
);
4911 * netdev event handler
4913 static int cnic_netdev_event(struct notifier_block
*this, unsigned long event
,
4916 struct net_device
*netdev
= ptr
;
4917 struct cnic_dev
*dev
;
4921 dev
= cnic_from_netdev(netdev
);
4923 if (!dev
&& (event
== NETDEV_REGISTER
|| event
== NETDEV_UP
)) {
4924 /* Check for the hot-plug device */
4925 dev
= is_cnic_dev(netdev
);
4932 struct cnic_local
*cp
= dev
->cnic_priv
;
4936 else if (event
== NETDEV_UNREGISTER
)
4939 if (event
== NETDEV_UP
) {
4940 if (cnic_register_netdev(dev
) != 0) {
4944 if (!cnic_start_hw(dev
))
4945 cnic_ulp_start(dev
);
4949 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
4950 struct cnic_ulp_ops
*ulp_ops
;
4953 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
4954 if (!ulp_ops
|| !ulp_ops
->indicate_netevent
)
4957 ctx
= cp
->ulp_handle
[if_type
];
4959 ulp_ops
->indicate_netevent(ctx
, event
);
4963 if (event
== NETDEV_GOING_DOWN
) {
4966 cnic_unregister_netdev(dev
);
4967 } else if (event
== NETDEV_UNREGISTER
) {
4968 write_lock(&cnic_dev_lock
);
4969 list_del_init(&dev
->list
);
4970 write_unlock(&cnic_dev_lock
);
4982 static struct notifier_block cnic_netdev_notifier
= {
4983 .notifier_call
= cnic_netdev_event
4986 static void cnic_release(void)
4988 struct cnic_dev
*dev
;
4989 struct cnic_uio_dev
*udev
;
4991 while (!list_empty(&cnic_dev_list
)) {
4992 dev
= list_entry(cnic_dev_list
.next
, struct cnic_dev
, list
);
4993 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
4999 cnic_unregister_netdev(dev
);
5000 list_del_init(&dev
->list
);
5003 while (!list_empty(&cnic_udev_list
)) {
5004 udev
= list_entry(cnic_udev_list
.next
, struct cnic_uio_dev
,
5006 cnic_free_uio(udev
);
5010 static int __init
cnic_init(void)
5014 pr_info("%s", version
);
5016 rc
= register_netdevice_notifier(&cnic_netdev_notifier
);
5022 cnic_wq
= create_singlethread_workqueue("cnic_wq");
5025 unregister_netdevice_notifier(&cnic_netdev_notifier
);
5032 static void __exit
cnic_exit(void)
5034 unregister_netdevice_notifier(&cnic_netdev_notifier
);
5036 destroy_workqueue(cnic_wq
);
5039 module_init(cnic_init
);
5040 module_exit(cnic_exit
);