1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/netdevice.h>
22 #include <linux/uio_driver.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/delay.h>
26 #include <linux/ethtool.h>
27 #include <linux/if_vlan.h>
28 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
33 #include <net/route.h>
35 #include <net/ip6_route.h>
36 #include <net/ip6_checksum.h>
37 #include <scsi/iscsi_if.h>
41 #include "bnx2x_reg.h"
42 #include "bnx2x_fw_defs.h"
43 #include "bnx2x_hsi.h"
44 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
45 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
47 #include "cnic_defs.h"
49 #define DRV_MODULE_NAME "cnic"
50 #define PFX DRV_MODULE_NAME ": "
52 static char version
[] __devinitdata
=
53 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME
" v" CNIC_MODULE_VERSION
" (" CNIC_MODULE_RELDATE
")\n";
55 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
56 "Chen (zongxi@broadcom.com");
57 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
58 MODULE_LICENSE("GPL");
59 MODULE_VERSION(CNIC_MODULE_VERSION
);
61 static LIST_HEAD(cnic_dev_list
);
62 static DEFINE_RWLOCK(cnic_dev_lock
);
63 static DEFINE_MUTEX(cnic_lock
);
65 static struct cnic_ulp_ops
*cnic_ulp_tbl
[MAX_CNIC_ULP_TYPE
];
67 static int cnic_service_bnx2(void *, void *);
68 static int cnic_service_bnx2x(void *, void *);
69 static int cnic_ctl(void *, struct cnic_ctl_info
*);
71 static struct cnic_ops cnic_bnx2_ops
= {
72 .cnic_owner
= THIS_MODULE
,
73 .cnic_handler
= cnic_service_bnx2
,
77 static struct cnic_ops cnic_bnx2x_ops
= {
78 .cnic_owner
= THIS_MODULE
,
79 .cnic_handler
= cnic_service_bnx2x
,
83 static void cnic_shutdown_rings(struct cnic_dev
*);
84 static void cnic_init_rings(struct cnic_dev
*);
85 static int cnic_cm_set_pg(struct cnic_sock
*);
87 static int cnic_uio_open(struct uio_info
*uinfo
, struct inode
*inode
)
89 struct cnic_dev
*dev
= uinfo
->priv
;
90 struct cnic_local
*cp
= dev
->cnic_priv
;
92 if (!capable(CAP_NET_ADMIN
))
95 if (cp
->uio_dev
!= -1)
99 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
104 cp
->uio_dev
= iminor(inode
);
106 cnic_init_rings(dev
);
112 static int cnic_uio_close(struct uio_info
*uinfo
, struct inode
*inode
)
114 struct cnic_dev
*dev
= uinfo
->priv
;
115 struct cnic_local
*cp
= dev
->cnic_priv
;
117 cnic_shutdown_rings(dev
);
123 static inline void cnic_hold(struct cnic_dev
*dev
)
125 atomic_inc(&dev
->ref_count
);
128 static inline void cnic_put(struct cnic_dev
*dev
)
130 atomic_dec(&dev
->ref_count
);
133 static inline void csk_hold(struct cnic_sock
*csk
)
135 atomic_inc(&csk
->ref_count
);
138 static inline void csk_put(struct cnic_sock
*csk
)
140 atomic_dec(&csk
->ref_count
);
143 static struct cnic_dev
*cnic_from_netdev(struct net_device
*netdev
)
145 struct cnic_dev
*cdev
;
147 read_lock(&cnic_dev_lock
);
148 list_for_each_entry(cdev
, &cnic_dev_list
, list
) {
149 if (netdev
== cdev
->netdev
) {
151 read_unlock(&cnic_dev_lock
);
155 read_unlock(&cnic_dev_lock
);
159 static inline void ulp_get(struct cnic_ulp_ops
*ulp_ops
)
161 atomic_inc(&ulp_ops
->ref_count
);
164 static inline void ulp_put(struct cnic_ulp_ops
*ulp_ops
)
166 atomic_dec(&ulp_ops
->ref_count
);
169 static void cnic_ctx_wr(struct cnic_dev
*dev
, u32 cid_addr
, u32 off
, u32 val
)
171 struct cnic_local
*cp
= dev
->cnic_priv
;
172 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
173 struct drv_ctl_info info
;
174 struct drv_ctl_io
*io
= &info
.data
.io
;
176 info
.cmd
= DRV_CTL_CTX_WR_CMD
;
177 io
->cid_addr
= cid_addr
;
180 ethdev
->drv_ctl(dev
->netdev
, &info
);
183 static void cnic_ctx_tbl_wr(struct cnic_dev
*dev
, u32 off
, dma_addr_t addr
)
185 struct cnic_local
*cp
= dev
->cnic_priv
;
186 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
187 struct drv_ctl_info info
;
188 struct drv_ctl_io
*io
= &info
.data
.io
;
190 info
.cmd
= DRV_CTL_CTXTBL_WR_CMD
;
193 ethdev
->drv_ctl(dev
->netdev
, &info
);
196 static void cnic_ring_ctl(struct cnic_dev
*dev
, u32 cid
, u32 cl_id
, int start
)
198 struct cnic_local
*cp
= dev
->cnic_priv
;
199 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
200 struct drv_ctl_info info
;
201 struct drv_ctl_l2_ring
*ring
= &info
.data
.ring
;
204 info
.cmd
= DRV_CTL_START_L2_CMD
;
206 info
.cmd
= DRV_CTL_STOP_L2_CMD
;
209 ring
->client_id
= cl_id
;
210 ethdev
->drv_ctl(dev
->netdev
, &info
);
213 static void cnic_reg_wr_ind(struct cnic_dev
*dev
, u32 off
, u32 val
)
215 struct cnic_local
*cp
= dev
->cnic_priv
;
216 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
217 struct drv_ctl_info info
;
218 struct drv_ctl_io
*io
= &info
.data
.io
;
220 info
.cmd
= DRV_CTL_IO_WR_CMD
;
223 ethdev
->drv_ctl(dev
->netdev
, &info
);
226 static u32
cnic_reg_rd_ind(struct cnic_dev
*dev
, u32 off
)
228 struct cnic_local
*cp
= dev
->cnic_priv
;
229 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
230 struct drv_ctl_info info
;
231 struct drv_ctl_io
*io
= &info
.data
.io
;
233 info
.cmd
= DRV_CTL_IO_RD_CMD
;
235 ethdev
->drv_ctl(dev
->netdev
, &info
);
239 static int cnic_in_use(struct cnic_sock
*csk
)
241 return test_bit(SK_F_INUSE
, &csk
->flags
);
244 static void cnic_kwq_completion(struct cnic_dev
*dev
, u32 count
)
246 struct cnic_local
*cp
= dev
->cnic_priv
;
247 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
248 struct drv_ctl_info info
;
250 info
.cmd
= DRV_CTL_COMPLETION_CMD
;
251 info
.data
.comp
.comp_count
= count
;
252 ethdev
->drv_ctl(dev
->netdev
, &info
);
255 static int cnic_get_l5_cid(struct cnic_local
*cp
, u32 cid
, u32
*l5_cid
)
259 for (i
= 0; i
< MAX_ISCSI_TBL_SZ
; i
++) {
260 if (cp
->ctx_tbl
[i
].cid
== cid
) {
268 static int cnic_send_nlmsg(struct cnic_local
*cp
, u32 type
,
269 struct cnic_sock
*csk
)
271 struct iscsi_path path_req
;
274 u32 msg_type
= ISCSI_KEVENT_IF_DOWN
;
275 struct cnic_ulp_ops
*ulp_ops
;
277 if (cp
->uio_dev
== -1)
281 len
= sizeof(path_req
);
282 buf
= (char *) &path_req
;
283 memset(&path_req
, 0, len
);
285 msg_type
= ISCSI_KEVENT_PATH_REQ
;
286 path_req
.handle
= (u64
) csk
->l5_cid
;
287 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
288 memcpy(&path_req
.dst
.v6_addr
, &csk
->dst_ip
[0],
289 sizeof(struct in6_addr
));
290 path_req
.ip_addr_len
= 16;
292 memcpy(&path_req
.dst
.v4_addr
, &csk
->dst_ip
[0],
293 sizeof(struct in_addr
));
294 path_req
.ip_addr_len
= 4;
296 path_req
.vlan_id
= csk
->vlan_id
;
297 path_req
.pmtu
= csk
->mtu
;
301 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[CNIC_ULP_ISCSI
]);
303 ulp_ops
->iscsi_nl_send_msg(cp
->dev
, msg_type
, buf
, len
);
308 static int cnic_iscsi_nl_msg_recv(struct cnic_dev
*dev
, u32 msg_type
,
314 case ISCSI_UEVENT_PATH_UPDATE
: {
315 struct cnic_local
*cp
;
317 struct cnic_sock
*csk
;
318 struct iscsi_path
*path_resp
;
320 if (len
< sizeof(*path_resp
))
323 path_resp
= (struct iscsi_path
*) buf
;
325 l5_cid
= (u32
) path_resp
->handle
;
326 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
329 csk
= &cp
->csk_tbl
[l5_cid
];
331 if (cnic_in_use(csk
)) {
332 memcpy(csk
->ha
, path_resp
->mac_addr
, 6);
333 if (test_bit(SK_F_IPV6
, &csk
->flags
))
334 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v6_addr
,
335 sizeof(struct in6_addr
));
337 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v4_addr
,
338 sizeof(struct in_addr
));
339 if (is_valid_ether_addr(csk
->ha
))
350 static int cnic_offld_prep(struct cnic_sock
*csk
)
352 if (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
355 if (!test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
356 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
363 static int cnic_close_prep(struct cnic_sock
*csk
)
365 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
366 smp_mb__after_clear_bit();
368 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
369 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
377 static int cnic_abort_prep(struct cnic_sock
*csk
)
379 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
380 smp_mb__after_clear_bit();
382 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
385 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
386 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
393 static void cnic_uio_stop(void)
395 struct cnic_dev
*dev
;
397 read_lock(&cnic_dev_lock
);
398 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
399 struct cnic_local
*cp
= dev
->cnic_priv
;
402 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
404 read_unlock(&cnic_dev_lock
);
407 int cnic_register_driver(int ulp_type
, struct cnic_ulp_ops
*ulp_ops
)
409 struct cnic_dev
*dev
;
411 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
412 printk(KERN_ERR PFX
"cnic_register_driver: Bad type %d\n",
416 mutex_lock(&cnic_lock
);
417 if (cnic_ulp_tbl
[ulp_type
]) {
418 printk(KERN_ERR PFX
"cnic_register_driver: Type %d has already "
419 "been registered\n", ulp_type
);
420 mutex_unlock(&cnic_lock
);
424 read_lock(&cnic_dev_lock
);
425 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
426 struct cnic_local
*cp
= dev
->cnic_priv
;
428 clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]);
430 read_unlock(&cnic_dev_lock
);
432 atomic_set(&ulp_ops
->ref_count
, 0);
433 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], ulp_ops
);
434 mutex_unlock(&cnic_lock
);
436 /* Prevent race conditions with netdev_event */
438 read_lock(&cnic_dev_lock
);
439 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
440 struct cnic_local
*cp
= dev
->cnic_priv
;
442 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]))
443 ulp_ops
->cnic_init(dev
);
445 read_unlock(&cnic_dev_lock
);
451 int cnic_unregister_driver(int ulp_type
)
453 struct cnic_dev
*dev
;
454 struct cnic_ulp_ops
*ulp_ops
;
457 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
458 printk(KERN_ERR PFX
"cnic_unregister_driver: Bad type %d\n",
462 mutex_lock(&cnic_lock
);
463 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
465 printk(KERN_ERR PFX
"cnic_unregister_driver: Type %d has not "
466 "been registered\n", ulp_type
);
469 read_lock(&cnic_dev_lock
);
470 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
471 struct cnic_local
*cp
= dev
->cnic_priv
;
473 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
474 printk(KERN_ERR PFX
"cnic_unregister_driver: Type %d "
475 "still has devices registered\n", ulp_type
);
476 read_unlock(&cnic_dev_lock
);
480 read_unlock(&cnic_dev_lock
);
482 if (ulp_type
== CNIC_ULP_ISCSI
)
485 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], NULL
);
487 mutex_unlock(&cnic_lock
);
489 while ((atomic_read(&ulp_ops
->ref_count
) != 0) && (i
< 20)) {
494 if (atomic_read(&ulp_ops
->ref_count
) != 0)
495 printk(KERN_WARNING PFX
"%s: Failed waiting for ref count to go"
496 " to zero.\n", dev
->netdev
->name
);
500 mutex_unlock(&cnic_lock
);
504 static int cnic_start_hw(struct cnic_dev
*);
505 static void cnic_stop_hw(struct cnic_dev
*);
507 static int cnic_register_device(struct cnic_dev
*dev
, int ulp_type
,
510 struct cnic_local
*cp
= dev
->cnic_priv
;
511 struct cnic_ulp_ops
*ulp_ops
;
513 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
514 printk(KERN_ERR PFX
"cnic_register_device: Bad type %d\n",
518 mutex_lock(&cnic_lock
);
519 if (cnic_ulp_tbl
[ulp_type
] == NULL
) {
520 printk(KERN_ERR PFX
"cnic_register_device: Driver with type %d "
521 "has not been registered\n", ulp_type
);
522 mutex_unlock(&cnic_lock
);
525 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
526 printk(KERN_ERR PFX
"cnic_register_device: Type %d has already "
527 "been registered to this device\n", ulp_type
);
528 mutex_unlock(&cnic_lock
);
532 clear_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]);
533 cp
->ulp_handle
[ulp_type
] = ulp_ctx
;
534 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
535 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], ulp_ops
);
538 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
539 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]))
540 ulp_ops
->cnic_start(cp
->ulp_handle
[ulp_type
]);
542 mutex_unlock(&cnic_lock
);
547 EXPORT_SYMBOL(cnic_register_driver
);
549 static int cnic_unregister_device(struct cnic_dev
*dev
, int ulp_type
)
551 struct cnic_local
*cp
= dev
->cnic_priv
;
554 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
555 printk(KERN_ERR PFX
"cnic_unregister_device: Bad type %d\n",
559 mutex_lock(&cnic_lock
);
560 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
561 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], NULL
);
564 printk(KERN_ERR PFX
"cnic_unregister_device: device not "
565 "registered to this ulp type %d\n", ulp_type
);
566 mutex_unlock(&cnic_lock
);
569 mutex_unlock(&cnic_lock
);
573 while (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]) &&
578 if (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]))
579 printk(KERN_WARNING PFX
"%s: Failed waiting for ULP up call"
580 " to complete.\n", dev
->netdev
->name
);
584 EXPORT_SYMBOL(cnic_unregister_driver
);
586 static int cnic_init_id_tbl(struct cnic_id_tbl
*id_tbl
, u32 size
, u32 start_id
)
588 id_tbl
->start
= start_id
;
591 spin_lock_init(&id_tbl
->lock
);
592 id_tbl
->table
= kzalloc(DIV_ROUND_UP(size
, 32) * 4, GFP_KERNEL
);
599 static void cnic_free_id_tbl(struct cnic_id_tbl
*id_tbl
)
601 kfree(id_tbl
->table
);
602 id_tbl
->table
= NULL
;
605 static int cnic_alloc_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
610 if (id
>= id_tbl
->max
)
613 spin_lock(&id_tbl
->lock
);
614 if (!test_bit(id
, id_tbl
->table
)) {
615 set_bit(id
, id_tbl
->table
);
618 spin_unlock(&id_tbl
->lock
);
622 /* Returns -1 if not successful */
623 static u32
cnic_alloc_new_id(struct cnic_id_tbl
*id_tbl
)
627 spin_lock(&id_tbl
->lock
);
628 id
= find_next_zero_bit(id_tbl
->table
, id_tbl
->max
, id_tbl
->next
);
629 if (id
>= id_tbl
->max
) {
631 if (id_tbl
->next
!= 0) {
632 id
= find_first_zero_bit(id_tbl
->table
, id_tbl
->next
);
633 if (id
>= id_tbl
->next
)
638 if (id
< id_tbl
->max
) {
639 set_bit(id
, id_tbl
->table
);
640 id_tbl
->next
= (id
+ 1) & (id_tbl
->max
- 1);
644 spin_unlock(&id_tbl
->lock
);
649 static void cnic_free_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
655 if (id
>= id_tbl
->max
)
658 clear_bit(id
, id_tbl
->table
);
661 static void cnic_free_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
668 for (i
= 0; i
< dma
->num_pages
; i
++) {
669 if (dma
->pg_arr
[i
]) {
670 dma_free_coherent(&dev
->pcidev
->dev
, BCM_PAGE_SIZE
,
671 dma
->pg_arr
[i
], dma
->pg_map_arr
[i
]);
672 dma
->pg_arr
[i
] = NULL
;
676 dma_free_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
677 dma
->pgtbl
, dma
->pgtbl_map
);
685 static void cnic_setup_page_tbl(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
688 u32
*page_table
= dma
->pgtbl
;
690 for (i
= 0; i
< dma
->num_pages
; i
++) {
691 /* Each entry needs to be in big endian format. */
692 *page_table
= (u32
) ((u64
) dma
->pg_map_arr
[i
] >> 32);
694 *page_table
= (u32
) dma
->pg_map_arr
[i
];
699 static void cnic_setup_page_tbl_le(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
702 u32
*page_table
= dma
->pgtbl
;
704 for (i
= 0; i
< dma
->num_pages
; i
++) {
705 /* Each entry needs to be in little endian format. */
706 *page_table
= dma
->pg_map_arr
[i
] & 0xffffffff;
708 *page_table
= (u32
) ((u64
) dma
->pg_map_arr
[i
] >> 32);
713 static int cnic_alloc_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
,
714 int pages
, int use_pg_tbl
)
717 struct cnic_local
*cp
= dev
->cnic_priv
;
719 size
= pages
* (sizeof(void *) + sizeof(dma_addr_t
));
720 dma
->pg_arr
= kzalloc(size
, GFP_ATOMIC
);
721 if (dma
->pg_arr
== NULL
)
724 dma
->pg_map_arr
= (dma_addr_t
*) (dma
->pg_arr
+ pages
);
725 dma
->num_pages
= pages
;
727 for (i
= 0; i
< pages
; i
++) {
728 dma
->pg_arr
[i
] = dma_alloc_coherent(&dev
->pcidev
->dev
,
732 if (dma
->pg_arr
[i
] == NULL
)
738 dma
->pgtbl_size
= ((pages
* 8) + BCM_PAGE_SIZE
- 1) &
739 ~(BCM_PAGE_SIZE
- 1);
740 dma
->pgtbl
= dma_alloc_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
741 &dma
->pgtbl_map
, GFP_ATOMIC
);
742 if (dma
->pgtbl
== NULL
)
745 cp
->setup_pgtbl(dev
, dma
);
750 cnic_free_dma(dev
, dma
);
754 static void cnic_free_context(struct cnic_dev
*dev
)
756 struct cnic_local
*cp
= dev
->cnic_priv
;
759 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
760 if (cp
->ctx_arr
[i
].ctx
) {
761 dma_free_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
763 cp
->ctx_arr
[i
].mapping
);
764 cp
->ctx_arr
[i
].ctx
= NULL
;
769 static void cnic_free_resc(struct cnic_dev
*dev
)
771 struct cnic_local
*cp
= dev
->cnic_priv
;
774 if (cp
->cnic_uinfo
) {
775 while (cp
->uio_dev
!= -1 && i
< 15) {
779 uio_unregister_device(cp
->cnic_uinfo
);
780 kfree(cp
->cnic_uinfo
);
781 cp
->cnic_uinfo
= NULL
;
785 dma_free_coherent(&dev
->pcidev
->dev
, cp
->l2_buf_size
,
786 cp
->l2_buf
, cp
->l2_buf_map
);
791 dma_free_coherent(&dev
->pcidev
->dev
, cp
->l2_ring_size
,
792 cp
->l2_ring
, cp
->l2_ring_map
);
796 cnic_free_context(dev
);
801 cnic_free_dma(dev
, &cp
->gbl_buf_info
);
802 cnic_free_dma(dev
, &cp
->conn_buf_info
);
803 cnic_free_dma(dev
, &cp
->kwq_info
);
804 cnic_free_dma(dev
, &cp
->kwq_16_data_info
);
805 cnic_free_dma(dev
, &cp
->kcq_info
);
806 kfree(cp
->iscsi_tbl
);
807 cp
->iscsi_tbl
= NULL
;
811 cnic_free_id_tbl(&cp
->cid_tbl
);
814 static int cnic_alloc_context(struct cnic_dev
*dev
)
816 struct cnic_local
*cp
= dev
->cnic_priv
;
818 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
821 cp
->ctx_blk_size
= BCM_PAGE_SIZE
;
822 cp
->cids_per_blk
= BCM_PAGE_SIZE
/ 128;
823 arr_size
= BNX2_MAX_CID
/ cp
->cids_per_blk
*
824 sizeof(struct cnic_ctx
);
825 cp
->ctx_arr
= kzalloc(arr_size
, GFP_KERNEL
);
826 if (cp
->ctx_arr
== NULL
)
830 for (i
= 0; i
< 2; i
++) {
831 u32 j
, reg
, off
, lo
, hi
;
834 off
= BNX2_PG_CTX_MAP
;
836 off
= BNX2_ISCSI_CTX_MAP
;
838 reg
= cnic_reg_rd_ind(dev
, off
);
841 for (j
= lo
; j
< hi
; j
+= cp
->cids_per_blk
, k
++)
842 cp
->ctx_arr
[k
].cid
= j
;
846 if (cp
->ctx_blks
>= (BNX2_MAX_CID
/ cp
->cids_per_blk
)) {
851 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
853 dma_alloc_coherent(&dev
->pcidev
->dev
,
855 &cp
->ctx_arr
[i
].mapping
,
857 if (cp
->ctx_arr
[i
].ctx
== NULL
)
864 static int cnic_alloc_l2_rings(struct cnic_dev
*dev
, int pages
)
866 struct cnic_local
*cp
= dev
->cnic_priv
;
868 cp
->l2_ring_size
= pages
* BCM_PAGE_SIZE
;
869 cp
->l2_ring
= dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->l2_ring_size
,
871 GFP_KERNEL
| __GFP_COMP
);
875 cp
->l2_buf_size
= (cp
->l2_rx_ring_size
+ 1) * cp
->l2_single_buf_size
;
876 cp
->l2_buf_size
= PAGE_ALIGN(cp
->l2_buf_size
);
877 cp
->l2_buf
= dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->l2_buf_size
,
879 GFP_KERNEL
| __GFP_COMP
);
886 static int cnic_alloc_uio(struct cnic_dev
*dev
) {
887 struct cnic_local
*cp
= dev
->cnic_priv
;
888 struct uio_info
*uinfo
;
891 uinfo
= kzalloc(sizeof(*uinfo
), GFP_ATOMIC
);
895 uinfo
->mem
[0].addr
= dev
->netdev
->base_addr
;
896 uinfo
->mem
[0].internal_addr
= dev
->regview
;
897 uinfo
->mem
[0].size
= dev
->netdev
->mem_end
- dev
->netdev
->mem_start
;
898 uinfo
->mem
[0].memtype
= UIO_MEM_PHYS
;
900 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
901 uinfo
->mem
[1].addr
= (unsigned long) cp
->status_blk
& PAGE_MASK
;
902 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
903 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
* 9;
905 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
;
907 uinfo
->name
= "bnx2_cnic";
908 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
909 uinfo
->mem
[1].addr
= (unsigned long) cp
->bnx2x_def_status_blk
&
911 uinfo
->mem
[1].size
= sizeof(struct host_def_status_block
);
913 uinfo
->name
= "bnx2x_cnic";
916 uinfo
->mem
[1].memtype
= UIO_MEM_LOGICAL
;
918 uinfo
->mem
[2].addr
= (unsigned long) cp
->l2_ring
;
919 uinfo
->mem
[2].size
= cp
->l2_ring_size
;
920 uinfo
->mem
[2].memtype
= UIO_MEM_LOGICAL
;
922 uinfo
->mem
[3].addr
= (unsigned long) cp
->l2_buf
;
923 uinfo
->mem
[3].size
= cp
->l2_buf_size
;
924 uinfo
->mem
[3].memtype
= UIO_MEM_LOGICAL
;
926 uinfo
->version
= CNIC_MODULE_VERSION
;
927 uinfo
->irq
= UIO_IRQ_CUSTOM
;
929 uinfo
->open
= cnic_uio_open
;
930 uinfo
->release
= cnic_uio_close
;
934 ret
= uio_register_device(&dev
->pcidev
->dev
, uinfo
);
940 cp
->cnic_uinfo
= uinfo
;
944 static int cnic_alloc_bnx2_resc(struct cnic_dev
*dev
)
946 struct cnic_local
*cp
= dev
->cnic_priv
;
949 ret
= cnic_alloc_dma(dev
, &cp
->kwq_info
, KWQ_PAGE_CNT
, 1);
952 cp
->kwq
= (struct kwqe
**) cp
->kwq_info
.pg_arr
;
954 ret
= cnic_alloc_dma(dev
, &cp
->kcq_info
, KCQ_PAGE_CNT
, 1);
957 cp
->kcq
= (struct kcqe
**) cp
->kcq_info
.pg_arr
;
959 ret
= cnic_alloc_context(dev
);
963 ret
= cnic_alloc_l2_rings(dev
, 2);
967 ret
= cnic_alloc_uio(dev
);
978 static int cnic_alloc_bnx2x_context(struct cnic_dev
*dev
)
980 struct cnic_local
*cp
= dev
->cnic_priv
;
981 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
982 int ctx_blk_size
= cp
->ethdev
->ctx_blk_size
;
983 int total_mem
, blks
, i
, cid_space
;
985 if (BNX2X_ISCSI_START_CID
< ethdev
->starting_cid
)
988 cid_space
= MAX_ISCSI_TBL_SZ
+
989 (BNX2X_ISCSI_START_CID
- ethdev
->starting_cid
);
991 total_mem
= BNX2X_CONTEXT_MEM_SIZE
* cid_space
;
992 blks
= total_mem
/ ctx_blk_size
;
993 if (total_mem
% ctx_blk_size
)
996 if (blks
> cp
->ethdev
->ctx_tbl_len
)
999 cp
->ctx_arr
= kzalloc(blks
* sizeof(struct cnic_ctx
), GFP_KERNEL
);
1000 if (cp
->ctx_arr
== NULL
)
1003 cp
->ctx_blks
= blks
;
1004 cp
->ctx_blk_size
= ctx_blk_size
;
1005 if (BNX2X_CHIP_IS_E1H(cp
->chip_id
))
1008 cp
->ctx_align
= ctx_blk_size
;
1010 cp
->cids_per_blk
= ctx_blk_size
/ BNX2X_CONTEXT_MEM_SIZE
;
1012 for (i
= 0; i
< blks
; i
++) {
1013 cp
->ctx_arr
[i
].ctx
=
1014 dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
1015 &cp
->ctx_arr
[i
].mapping
,
1017 if (cp
->ctx_arr
[i
].ctx
== NULL
)
1020 if (cp
->ctx_align
&& cp
->ctx_blk_size
== ctx_blk_size
) {
1021 if (cp
->ctx_arr
[i
].mapping
& (cp
->ctx_align
- 1)) {
1022 cnic_free_context(dev
);
1023 cp
->ctx_blk_size
+= cp
->ctx_align
;
1032 static int cnic_alloc_bnx2x_resc(struct cnic_dev
*dev
)
1034 struct cnic_local
*cp
= dev
->cnic_priv
;
1035 int i
, j
, n
, ret
, pages
;
1036 struct cnic_dma
*kwq_16_dma
= &cp
->kwq_16_data_info
;
1038 cp
->iscsi_tbl
= kzalloc(sizeof(struct cnic_iscsi
) * MAX_ISCSI_TBL_SZ
,
1043 cp
->ctx_tbl
= kzalloc(sizeof(struct cnic_context
) *
1044 MAX_CNIC_L5_CONTEXT
, GFP_KERNEL
);
1048 for (i
= 0; i
< MAX_ISCSI_TBL_SZ
; i
++) {
1049 cp
->ctx_tbl
[i
].proto
.iscsi
= &cp
->iscsi_tbl
[i
];
1050 cp
->ctx_tbl
[i
].ulp_proto_id
= CNIC_ULP_ISCSI
;
1053 pages
= PAGE_ALIGN(MAX_CNIC_L5_CONTEXT
* CNIC_KWQ16_DATA_SIZE
) /
1056 ret
= cnic_alloc_dma(dev
, kwq_16_dma
, pages
, 0);
1060 n
= PAGE_SIZE
/ CNIC_KWQ16_DATA_SIZE
;
1061 for (i
= 0, j
= 0; i
< MAX_ISCSI_TBL_SZ
; i
++) {
1062 long off
= CNIC_KWQ16_DATA_SIZE
* (i
% n
);
1064 cp
->ctx_tbl
[i
].kwqe_data
= kwq_16_dma
->pg_arr
[j
] + off
;
1065 cp
->ctx_tbl
[i
].kwqe_data_mapping
= kwq_16_dma
->pg_map_arr
[j
] +
1068 if ((i
% n
) == (n
- 1))
1072 ret
= cnic_alloc_dma(dev
, &cp
->kcq_info
, KCQ_PAGE_CNT
, 0);
1075 cp
->kcq
= (struct kcqe
**) cp
->kcq_info
.pg_arr
;
1077 for (i
= 0; i
< KCQ_PAGE_CNT
; i
++) {
1078 struct bnx2x_bd_chain_next
*next
=
1079 (struct bnx2x_bd_chain_next
*)
1080 &cp
->kcq
[i
][MAX_KCQE_CNT
];
1083 if (j
>= KCQ_PAGE_CNT
)
1085 next
->addr_hi
= (u64
) cp
->kcq_info
.pg_map_arr
[j
] >> 32;
1086 next
->addr_lo
= cp
->kcq_info
.pg_map_arr
[j
] & 0xffffffff;
1089 pages
= PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS
*
1090 BNX2X_ISCSI_CONN_BUF_SIZE
) / PAGE_SIZE
;
1091 ret
= cnic_alloc_dma(dev
, &cp
->conn_buf_info
, pages
, 1);
1095 pages
= PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE
) / PAGE_SIZE
;
1096 ret
= cnic_alloc_dma(dev
, &cp
->gbl_buf_info
, pages
, 0);
1100 ret
= cnic_alloc_bnx2x_context(dev
);
1104 cp
->bnx2x_status_blk
= cp
->status_blk
;
1105 cp
->bnx2x_def_status_blk
= cp
->ethdev
->irq_arr
[1].status_blk
;
1107 memset(cp
->bnx2x_status_blk
, 0, sizeof(struct host_status_block
));
1109 cp
->l2_rx_ring_size
= 15;
1111 ret
= cnic_alloc_l2_rings(dev
, 4);
1115 ret
= cnic_alloc_uio(dev
);
1122 cnic_free_resc(dev
);
1126 static inline u32
cnic_kwq_avail(struct cnic_local
*cp
)
1128 return cp
->max_kwq_idx
-
1129 ((cp
->kwq_prod_idx
- cp
->kwq_con_idx
) & cp
->max_kwq_idx
);
1132 static int cnic_submit_bnx2_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1135 struct cnic_local
*cp
= dev
->cnic_priv
;
1136 struct kwqe
*prod_qe
;
1137 u16 prod
, sw_prod
, i
;
1139 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
1140 return -EAGAIN
; /* bnx2 is down */
1142 spin_lock_bh(&cp
->cnic_ulp_lock
);
1143 if (num_wqes
> cnic_kwq_avail(cp
) &&
1144 !(cp
->cnic_local_flags
& CNIC_LCL_FL_KWQ_INIT
)) {
1145 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1149 cp
->cnic_local_flags
&= ~CNIC_LCL_FL_KWQ_INIT
;
1151 prod
= cp
->kwq_prod_idx
;
1152 sw_prod
= prod
& MAX_KWQ_IDX
;
1153 for (i
= 0; i
< num_wqes
; i
++) {
1154 prod_qe
= &cp
->kwq
[KWQ_PG(sw_prod
)][KWQ_IDX(sw_prod
)];
1155 memcpy(prod_qe
, wqes
[i
], sizeof(struct kwqe
));
1157 sw_prod
= prod
& MAX_KWQ_IDX
;
1159 cp
->kwq_prod_idx
= prod
;
1161 CNIC_WR16(dev
, cp
->kwq_io_addr
, cp
->kwq_prod_idx
);
1163 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1167 static void *cnic_get_kwqe_16_data(struct cnic_local
*cp
, u32 l5_cid
,
1168 union l5cm_specific_data
*l5_data
)
1170 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1173 map
= ctx
->kwqe_data_mapping
;
1174 l5_data
->phy_address
.lo
= (u64
) map
& 0xffffffff;
1175 l5_data
->phy_address
.hi
= (u64
) map
>> 32;
1176 return ctx
->kwqe_data
;
1179 static int cnic_submit_kwqe_16(struct cnic_dev
*dev
, u32 cmd
, u32 cid
,
1180 u32 type
, union l5cm_specific_data
*l5_data
)
1182 struct cnic_local
*cp
= dev
->cnic_priv
;
1183 struct l5cm_spe kwqe
;
1184 struct kwqe_16
*kwq
[1];
1187 kwqe
.hdr
.conn_and_cmd_data
=
1188 cpu_to_le32(((cmd
<< SPE_HDR_CMD_ID_SHIFT
) |
1189 BNX2X_HW_CID(cid
, cp
->func
)));
1190 kwqe
.hdr
.type
= cpu_to_le16(type
);
1191 kwqe
.hdr
.reserved
= 0;
1192 kwqe
.data
.phy_address
.lo
= cpu_to_le32(l5_data
->phy_address
.lo
);
1193 kwqe
.data
.phy_address
.hi
= cpu_to_le32(l5_data
->phy_address
.hi
);
1195 kwq
[0] = (struct kwqe_16
*) &kwqe
;
1197 spin_lock_bh(&cp
->cnic_ulp_lock
);
1198 ret
= cp
->ethdev
->drv_submit_kwqes_16(dev
->netdev
, kwq
, 1);
1199 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1207 static void cnic_reply_bnx2x_kcqes(struct cnic_dev
*dev
, int ulp_type
,
1208 struct kcqe
*cqes
[], u32 num_cqes
)
1210 struct cnic_local
*cp
= dev
->cnic_priv
;
1211 struct cnic_ulp_ops
*ulp_ops
;
1214 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
1215 if (likely(ulp_ops
)) {
1216 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
1222 static int cnic_bnx2x_iscsi_init1(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1224 struct cnic_local
*cp
= dev
->cnic_priv
;
1225 struct iscsi_kwqe_init1
*req1
= (struct iscsi_kwqe_init1
*) kwqe
;
1226 int func
= cp
->func
, pages
;
1229 cp
->num_iscsi_tasks
= req1
->num_tasks_per_conn
;
1230 cp
->num_ccells
= req1
->num_ccells_per_conn
;
1231 cp
->task_array_size
= BNX2X_ISCSI_TASK_CONTEXT_SIZE
*
1232 cp
->num_iscsi_tasks
;
1233 cp
->r2tq_size
= cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
*
1234 BNX2X_ISCSI_R2TQE_SIZE
;
1235 cp
->hq_size
= cp
->num_ccells
* BNX2X_ISCSI_HQ_BD_SIZE
;
1236 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1237 hq_bds
= pages
* (PAGE_SIZE
/ BNX2X_ISCSI_HQ_BD_SIZE
);
1238 cp
->num_cqs
= req1
->num_cqs
;
1240 if (!dev
->max_iscsi_conn
)
1243 /* init Tstorm RAM */
1244 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_RQ_SIZE_OFFSET(func
),
1246 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_PAGE_SIZE_OFFSET(func
),
1248 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1249 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func
), PAGE_SHIFT
);
1250 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
1251 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func
),
1252 req1
->num_tasks_per_conn
);
1254 /* init Ustorm RAM */
1255 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1256 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func
),
1257 req1
->rq_buffer_size
);
1258 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_PAGE_SIZE_OFFSET(func
),
1260 CNIC_WR8(dev
, BAR_USTRORM_INTMEM
+
1261 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func
), PAGE_SHIFT
);
1262 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1263 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func
),
1264 req1
->num_tasks_per_conn
);
1265 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_RQ_SIZE_OFFSET(func
),
1267 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_CQ_SIZE_OFFSET(func
),
1269 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_R2TQ_SIZE_OFFSET(func
),
1270 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1272 /* init Xstorm RAM */
1273 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_PAGE_SIZE_OFFSET(func
),
1275 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1276 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func
), PAGE_SHIFT
);
1277 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
1278 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func
),
1279 req1
->num_tasks_per_conn
);
1280 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_HQ_SIZE_OFFSET(func
),
1282 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_SQ_SIZE_OFFSET(func
),
1283 req1
->num_tasks_per_conn
);
1284 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func
),
1285 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1287 /* init Cstorm RAM */
1288 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_PAGE_SIZE_OFFSET(func
),
1290 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
1291 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func
), PAGE_SHIFT
);
1292 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1293 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func
),
1294 req1
->num_tasks_per_conn
);
1295 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_CQ_SIZE_OFFSET(func
),
1297 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_HQ_SIZE_OFFSET(func
),
1303 static int cnic_bnx2x_iscsi_init2(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1305 struct iscsi_kwqe_init2
*req2
= (struct iscsi_kwqe_init2
*) kwqe
;
1306 struct cnic_local
*cp
= dev
->cnic_priv
;
1307 int func
= cp
->func
;
1308 struct iscsi_kcqe kcqe
;
1309 struct kcqe
*cqes
[1];
1311 memset(&kcqe
, 0, sizeof(kcqe
));
1312 if (!dev
->max_iscsi_conn
) {
1313 kcqe
.completion_status
=
1314 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED
;
1318 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1319 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func
), req2
->error_bit_map
[0]);
1320 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1321 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func
) + 4,
1322 req2
->error_bit_map
[1]);
1324 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1325 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func
), req2
->max_cq_sqn
);
1326 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1327 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func
), req2
->error_bit_map
[0]);
1328 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1329 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func
) + 4,
1330 req2
->error_bit_map
[1]);
1332 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1333 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func
), req2
->max_cq_sqn
);
1335 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1338 kcqe
.op_code
= ISCSI_KCQE_OPCODE_INIT
;
1339 cqes
[0] = (struct kcqe
*) &kcqe
;
1340 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1345 static void cnic_free_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1347 struct cnic_local
*cp
= dev
->cnic_priv
;
1348 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1350 if (ctx
->ulp_proto_id
== CNIC_ULP_ISCSI
) {
1351 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1353 cnic_free_dma(dev
, &iscsi
->hq_info
);
1354 cnic_free_dma(dev
, &iscsi
->r2tq_info
);
1355 cnic_free_dma(dev
, &iscsi
->task_array_info
);
1357 cnic_free_id(&cp
->cid_tbl
, ctx
->cid
);
1361 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1365 struct cnic_local
*cp
= dev
->cnic_priv
;
1366 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1367 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1369 cid
= cnic_alloc_new_id(&cp
->cid_tbl
);
1376 pages
= PAGE_ALIGN(cp
->task_array_size
) / PAGE_SIZE
;
1378 ret
= cnic_alloc_dma(dev
, &iscsi
->task_array_info
, pages
, 1);
1382 pages
= PAGE_ALIGN(cp
->r2tq_size
) / PAGE_SIZE
;
1383 ret
= cnic_alloc_dma(dev
, &iscsi
->r2tq_info
, pages
, 1);
1387 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1388 ret
= cnic_alloc_dma(dev
, &iscsi
->hq_info
, pages
, 1);
1395 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1399 static void *cnic_get_bnx2x_ctx(struct cnic_dev
*dev
, u32 cid
, int init
,
1400 struct regpair
*ctx_addr
)
1402 struct cnic_local
*cp
= dev
->cnic_priv
;
1403 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1404 int blk
= (cid
- ethdev
->starting_cid
) / cp
->cids_per_blk
;
1405 int off
= (cid
- ethdev
->starting_cid
) % cp
->cids_per_blk
;
1406 unsigned long align_off
= 0;
1410 if (cp
->ctx_align
) {
1411 unsigned long mask
= cp
->ctx_align
- 1;
1413 if (cp
->ctx_arr
[blk
].mapping
& mask
)
1414 align_off
= cp
->ctx_align
-
1415 (cp
->ctx_arr
[blk
].mapping
& mask
);
1417 ctx_map
= cp
->ctx_arr
[blk
].mapping
+ align_off
+
1418 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1419 ctx
= cp
->ctx_arr
[blk
].ctx
+ align_off
+
1420 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1422 memset(ctx
, 0, BNX2X_CONTEXT_MEM_SIZE
);
1424 ctx_addr
->lo
= ctx_map
& 0xffffffff;
1425 ctx_addr
->hi
= (u64
) ctx_map
>> 32;
1429 static int cnic_setup_bnx2x_ctx(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1432 struct cnic_local
*cp
= dev
->cnic_priv
;
1433 struct iscsi_kwqe_conn_offload1
*req1
=
1434 (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1435 struct iscsi_kwqe_conn_offload2
*req2
=
1436 (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1437 struct iscsi_kwqe_conn_offload3
*req3
;
1438 struct cnic_context
*ctx
= &cp
->ctx_tbl
[req1
->iscsi_conn_id
];
1439 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1441 u32 hw_cid
= BNX2X_HW_CID(cid
, cp
->func
);
1442 struct iscsi_context
*ictx
;
1443 struct regpair context_addr
;
1444 int i
, j
, n
= 2, n_max
;
1447 if (!req2
->num_additional_wqes
)
1450 n_max
= req2
->num_additional_wqes
+ 2;
1452 ictx
= cnic_get_bnx2x_ctx(dev
, cid
, 1, &context_addr
);
1456 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1458 ictx
->xstorm_ag_context
.hq_prod
= 1;
1460 ictx
->xstorm_st_context
.iscsi
.first_burst_length
=
1461 ISCSI_DEF_FIRST_BURST_LEN
;
1462 ictx
->xstorm_st_context
.iscsi
.max_send_pdu_length
=
1463 ISCSI_DEF_MAX_RECV_SEG_LEN
;
1464 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.lo
=
1465 req1
->sq_page_table_addr_lo
;
1466 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.hi
=
1467 req1
->sq_page_table_addr_hi
;
1468 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.lo
= req2
->sq_first_pte
.hi
;
1469 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.hi
= req2
->sq_first_pte
.lo
;
1470 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.lo
=
1471 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1472 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.hi
=
1473 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1474 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.lo
=
1475 iscsi
->hq_info
.pgtbl
[0];
1476 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.hi
=
1477 iscsi
->hq_info
.pgtbl
[1];
1478 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.lo
=
1479 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1480 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.hi
=
1481 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1482 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.lo
=
1483 iscsi
->r2tq_info
.pgtbl
[0];
1484 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.hi
=
1485 iscsi
->r2tq_info
.pgtbl
[1];
1486 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.lo
=
1487 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1488 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.hi
=
1489 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1490 ictx
->xstorm_st_context
.iscsi
.task_pbl_cache_idx
=
1491 BNX2X_ISCSI_PBL_NOT_CACHED
;
1492 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1493 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA
;
1494 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1495 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T
;
1497 ictx
->tstorm_st_context
.iscsi
.hdr_bytes_2_fetch
= ISCSI_HEADER_SIZE
;
1498 /* TSTORM requires the base address of RQ DB & not PTE */
1499 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.lo
=
1500 req2
->rq_page_table_addr_lo
& PAGE_MASK
;
1501 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.hi
=
1502 req2
->rq_page_table_addr_hi
;
1503 ictx
->tstorm_st_context
.iscsi
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1504 ictx
->tstorm_st_context
.tcp
.cwnd
= 0x5A8;
1505 ictx
->tstorm_st_context
.tcp
.flags2
|=
1506 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN
;
1508 ictx
->timers_context
.flags
|= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG
;
1510 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.lo
=
1511 req2
->rq_page_table_addr_lo
;
1512 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.hi
=
1513 req2
->rq_page_table_addr_hi
;
1514 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.lo
= req3
->qp_first_pte
[0].hi
;
1515 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.hi
= req3
->qp_first_pte
[0].lo
;
1516 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.lo
=
1517 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1518 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.hi
=
1519 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1520 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.lo
=
1521 iscsi
->r2tq_info
.pgtbl
[0];
1522 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.hi
=
1523 iscsi
->r2tq_info
.pgtbl
[1];
1524 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.lo
=
1525 req1
->cq_page_table_addr_lo
;
1526 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.hi
=
1527 req1
->cq_page_table_addr_hi
;
1528 ictx
->ustorm_st_context
.ring
.cq
[0].cq_sn
= ISCSI_INITIAL_SN
;
1529 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.lo
= req2
->cq_first_pte
.hi
;
1530 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.hi
= req2
->cq_first_pte
.lo
;
1531 ictx
->ustorm_st_context
.task_pbe_cache_index
=
1532 BNX2X_ISCSI_PBL_NOT_CACHED
;
1533 ictx
->ustorm_st_context
.task_pdu_cache_index
=
1534 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED
;
1536 for (i
= 1, j
= 1; i
< cp
->num_cqs
; i
++, j
++) {
1540 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1543 ictx
->ustorm_st_context
.ring
.cq
[i
].cq_sn
= ISCSI_INITIAL_SN
;
1544 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.lo
=
1545 req3
->qp_first_pte
[j
].hi
;
1546 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.hi
=
1547 req3
->qp_first_pte
[j
].lo
;
1550 ictx
->ustorm_st_context
.task_pbl_base
.lo
=
1551 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1552 ictx
->ustorm_st_context
.task_pbl_base
.hi
=
1553 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1554 ictx
->ustorm_st_context
.tce_phy_addr
.lo
=
1555 iscsi
->task_array_info
.pgtbl
[0];
1556 ictx
->ustorm_st_context
.tce_phy_addr
.hi
=
1557 iscsi
->task_array_info
.pgtbl
[1];
1558 ictx
->ustorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1559 ictx
->ustorm_st_context
.num_cqs
= cp
->num_cqs
;
1560 ictx
->ustorm_st_context
.negotiated_rx
|= ISCSI_DEF_MAX_RECV_SEG_LEN
;
1561 ictx
->ustorm_st_context
.negotiated_rx_and_flags
|=
1562 ISCSI_DEF_MAX_BURST_LEN
;
1563 ictx
->ustorm_st_context
.negotiated_rx
|=
1564 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T
<<
1565 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT
;
1567 ictx
->cstorm_st_context
.hq_pbl_base
.lo
=
1568 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1569 ictx
->cstorm_st_context
.hq_pbl_base
.hi
=
1570 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1571 ictx
->cstorm_st_context
.hq_curr_pbe
.lo
= iscsi
->hq_info
.pgtbl
[0];
1572 ictx
->cstorm_st_context
.hq_curr_pbe
.hi
= iscsi
->hq_info
.pgtbl
[1];
1573 ictx
->cstorm_st_context
.task_pbl_base
.lo
=
1574 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1575 ictx
->cstorm_st_context
.task_pbl_base
.hi
=
1576 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1577 /* CSTORM and USTORM initialization is different, CSTORM requires
1578 * CQ DB base & not PTE addr */
1579 ictx
->cstorm_st_context
.cq_db_base
.lo
=
1580 req1
->cq_page_table_addr_lo
& PAGE_MASK
;
1581 ictx
->cstorm_st_context
.cq_db_base
.hi
= req1
->cq_page_table_addr_hi
;
1582 ictx
->cstorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1583 ictx
->cstorm_st_context
.cq_proc_en_bit_map
= (1 << cp
->num_cqs
) - 1;
1584 for (i
= 0; i
< cp
->num_cqs
; i
++) {
1585 ictx
->cstorm_st_context
.cq_c_prod_sqn_arr
.sqn
[i
] =
1587 ictx
->cstorm_st_context
.cq_c_sqn_2_notify_arr
.sqn
[i
] =
1591 ictx
->xstorm_ag_context
.cdu_reserved
=
1592 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_XCM_AG
,
1593 ISCSI_CONNECTION_TYPE
);
1594 ictx
->ustorm_ag_context
.cdu_usage
=
1595 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_UCM_AG
,
1596 ISCSI_CONNECTION_TYPE
);
1601 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1604 struct iscsi_kwqe_conn_offload1
*req1
;
1605 struct iscsi_kwqe_conn_offload2
*req2
;
1606 struct cnic_local
*cp
= dev
->cnic_priv
;
1607 struct iscsi_kcqe kcqe
;
1608 struct kcqe
*cqes
[1];
1617 req1
= (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1618 req2
= (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1619 if ((num
- 2) < req2
->num_additional_wqes
) {
1623 *work
= 2 + req2
->num_additional_wqes
;;
1625 l5_cid
= req1
->iscsi_conn_id
;
1626 if (l5_cid
>= MAX_ISCSI_TBL_SZ
)
1629 memset(&kcqe
, 0, sizeof(kcqe
));
1630 kcqe
.op_code
= ISCSI_KCQE_OPCODE_OFFLOAD_CONN
;
1631 kcqe
.iscsi_conn_id
= l5_cid
;
1632 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
;
1634 if (atomic_inc_return(&cp
->iscsi_conn
) > dev
->max_iscsi_conn
) {
1635 atomic_dec(&cp
->iscsi_conn
);
1639 ret
= cnic_alloc_bnx2x_conn_resc(dev
, l5_cid
);
1641 atomic_dec(&cp
->iscsi_conn
);
1645 ret
= cnic_setup_bnx2x_ctx(dev
, wqes
, num
);
1647 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1648 atomic_dec(&cp
->iscsi_conn
);
1652 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1653 kcqe
.iscsi_conn_context_id
= BNX2X_HW_CID(cp
->ctx_tbl
[l5_cid
].cid
,
1657 cqes
[0] = (struct kcqe
*) &kcqe
;
1658 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1663 static int cnic_bnx2x_iscsi_update(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1665 struct cnic_local
*cp
= dev
->cnic_priv
;
1666 struct iscsi_kwqe_conn_update
*req
=
1667 (struct iscsi_kwqe_conn_update
*) kwqe
;
1669 union l5cm_specific_data l5_data
;
1670 u32 l5_cid
, cid
= BNX2X_SW_CID(req
->context_id
);
1673 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) != 0)
1676 data
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
1680 memcpy(data
, kwqe
, sizeof(struct kwqe
));
1682 ret
= cnic_submit_kwqe_16(dev
, ISCSI_RAMROD_CMD_ID_UPDATE_CONN
,
1683 req
->context_id
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1687 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1689 struct cnic_local
*cp
= dev
->cnic_priv
;
1690 struct iscsi_kwqe_conn_destroy
*req
=
1691 (struct iscsi_kwqe_conn_destroy
*) kwqe
;
1692 union l5cm_specific_data l5_data
;
1693 u32 l5_cid
= req
->reserved0
;
1694 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1696 struct iscsi_kcqe kcqe
;
1697 struct kcqe
*cqes
[1];
1699 if (!(ctx
->ctx_flags
& CTX_FL_OFFLD_START
))
1700 goto skip_cfc_delete
;
1702 while (!time_after(jiffies
, ctx
->timestamp
+ (2 * HZ
)))
1705 init_waitqueue_head(&ctx
->waitq
);
1707 memset(&l5_data
, 0, sizeof(l5_data
));
1708 ret
= cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_CFC_DEL
,
1710 ETH_CONNECTION_TYPE
|
1711 (1 << SPE_HDR_COMMON_RAMROD_SHIFT
),
1714 wait_event(ctx
->waitq
, ctx
->wait_cond
);
1717 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1719 atomic_dec(&cp
->iscsi_conn
);
1721 memset(&kcqe
, 0, sizeof(kcqe
));
1722 kcqe
.op_code
= ISCSI_KCQE_OPCODE_DESTROY_CONN
;
1723 kcqe
.iscsi_conn_id
= l5_cid
;
1724 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1725 kcqe
.iscsi_conn_context_id
= req
->context_id
;
1727 cqes
[0] = (struct kcqe
*) &kcqe
;
1728 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1733 static void cnic_init_storm_conn_bufs(struct cnic_dev
*dev
,
1734 struct l4_kwq_connect_req1
*kwqe1
,
1735 struct l4_kwq_connect_req3
*kwqe3
,
1736 struct l5cm_active_conn_buffer
*conn_buf
)
1738 struct l5cm_conn_addr_params
*conn_addr
= &conn_buf
->conn_addr_buf
;
1739 struct l5cm_xstorm_conn_buffer
*xstorm_buf
=
1740 &conn_buf
->xstorm_conn_buffer
;
1741 struct l5cm_tstorm_conn_buffer
*tstorm_buf
=
1742 &conn_buf
->tstorm_conn_buffer
;
1743 struct regpair context_addr
;
1744 u32 cid
= BNX2X_SW_CID(kwqe1
->cid
);
1745 struct in6_addr src_ip
, dst_ip
;
1749 addrp
= (u32
*) &conn_addr
->local_ip_addr
;
1750 for (i
= 0; i
< 4; i
++, addrp
++)
1751 src_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1753 addrp
= (u32
*) &conn_addr
->remote_ip_addr
;
1754 for (i
= 0; i
< 4; i
++, addrp
++)
1755 dst_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1757 cnic_get_bnx2x_ctx(dev
, cid
, 0, &context_addr
);
1759 xstorm_buf
->context_addr
.hi
= context_addr
.hi
;
1760 xstorm_buf
->context_addr
.lo
= context_addr
.lo
;
1761 xstorm_buf
->mss
= 0xffff;
1762 xstorm_buf
->rcv_buf
= kwqe3
->rcv_buf
;
1763 if (kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
)
1764 xstorm_buf
->params
|= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE
;
1765 xstorm_buf
->pseudo_header_checksum
=
1766 swab16(~csum_ipv6_magic(&src_ip
, &dst_ip
, 0, IPPROTO_TCP
, 0));
1768 if (!(kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
))
1769 tstorm_buf
->params
|=
1770 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE
;
1771 if (kwqe3
->ka_timeout
) {
1772 tstorm_buf
->ka_enable
= 1;
1773 tstorm_buf
->ka_timeout
= kwqe3
->ka_timeout
;
1774 tstorm_buf
->ka_interval
= kwqe3
->ka_interval
;
1775 tstorm_buf
->ka_max_probe_count
= kwqe3
->ka_max_probe_count
;
1777 tstorm_buf
->rcv_buf
= kwqe3
->rcv_buf
;
1778 tstorm_buf
->snd_buf
= kwqe3
->snd_buf
;
1779 tstorm_buf
->max_rt_time
= 0xffffffff;
1782 static void cnic_init_bnx2x_mac(struct cnic_dev
*dev
)
1784 struct cnic_local
*cp
= dev
->cnic_priv
;
1785 int func
= CNIC_FUNC(cp
);
1786 u8
*mac
= dev
->mac_addr
;
1788 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1789 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func
), mac
[0]);
1790 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1791 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func
), mac
[1]);
1792 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1793 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func
), mac
[2]);
1794 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1795 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func
), mac
[3]);
1796 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1797 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func
), mac
[4]);
1798 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1799 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func
), mac
[5]);
1801 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1802 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func
), mac
[5]);
1803 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1804 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func
) + 1,
1806 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1807 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func
), mac
[3]);
1808 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1809 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func
) + 1,
1811 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1812 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func
) + 2,
1814 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1815 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func
) + 3,
1819 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev
*dev
, int tcp_ts
)
1821 struct cnic_local
*cp
= dev
->cnic_priv
;
1822 u8 xstorm_flags
= XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN
;
1823 u16 tstorm_flags
= 0;
1826 xstorm_flags
|= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
1827 tstorm_flags
|= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
1830 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1831 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->func
), xstorm_flags
);
1833 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
1834 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->func
), tstorm_flags
);
1837 static int cnic_bnx2x_connect(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1840 struct cnic_local
*cp
= dev
->cnic_priv
;
1841 struct l4_kwq_connect_req1
*kwqe1
=
1842 (struct l4_kwq_connect_req1
*) wqes
[0];
1843 struct l4_kwq_connect_req3
*kwqe3
;
1844 struct l5cm_active_conn_buffer
*conn_buf
;
1845 struct l5cm_conn_addr_params
*conn_addr
;
1846 union l5cm_specific_data l5_data
;
1847 u32 l5_cid
= kwqe1
->pg_cid
;
1848 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
1849 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1857 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
)
1867 if (sizeof(*conn_buf
) > CNIC_KWQ16_DATA_SIZE
) {
1868 printk(KERN_ERR PFX
"%s: conn_buf size too big\n",
1872 conn_buf
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
1876 memset(conn_buf
, 0, sizeof(*conn_buf
));
1878 conn_addr
= &conn_buf
->conn_addr_buf
;
1879 conn_addr
->remote_addr_0
= csk
->ha
[0];
1880 conn_addr
->remote_addr_1
= csk
->ha
[1];
1881 conn_addr
->remote_addr_2
= csk
->ha
[2];
1882 conn_addr
->remote_addr_3
= csk
->ha
[3];
1883 conn_addr
->remote_addr_4
= csk
->ha
[4];
1884 conn_addr
->remote_addr_5
= csk
->ha
[5];
1886 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
) {
1887 struct l4_kwq_connect_req2
*kwqe2
=
1888 (struct l4_kwq_connect_req2
*) wqes
[1];
1890 conn_addr
->local_ip_addr
.ip_addr_hi_hi
= kwqe2
->src_ip_v6_4
;
1891 conn_addr
->local_ip_addr
.ip_addr_hi_lo
= kwqe2
->src_ip_v6_3
;
1892 conn_addr
->local_ip_addr
.ip_addr_lo_hi
= kwqe2
->src_ip_v6_2
;
1894 conn_addr
->remote_ip_addr
.ip_addr_hi_hi
= kwqe2
->dst_ip_v6_4
;
1895 conn_addr
->remote_ip_addr
.ip_addr_hi_lo
= kwqe2
->dst_ip_v6_3
;
1896 conn_addr
->remote_ip_addr
.ip_addr_lo_hi
= kwqe2
->dst_ip_v6_2
;
1897 conn_addr
->params
|= L5CM_CONN_ADDR_PARAMS_IP_VERSION
;
1899 kwqe3
= (struct l4_kwq_connect_req3
*) wqes
[*work
- 1];
1901 conn_addr
->local_ip_addr
.ip_addr_lo_lo
= kwqe1
->src_ip
;
1902 conn_addr
->remote_ip_addr
.ip_addr_lo_lo
= kwqe1
->dst_ip
;
1903 conn_addr
->local_tcp_port
= kwqe1
->src_port
;
1904 conn_addr
->remote_tcp_port
= kwqe1
->dst_port
;
1906 conn_addr
->pmtu
= kwqe3
->pmtu
;
1907 cnic_init_storm_conn_bufs(dev
, kwqe1
, kwqe3
, conn_buf
);
1909 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
1910 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp
->func
), csk
->vlan_id
);
1912 cnic_bnx2x_set_tcp_timestamp(dev
,
1913 kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_TIME_STAMP
);
1915 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_TCP_CONNECT
,
1916 kwqe1
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1918 ctx
->ctx_flags
|= CTX_FL_OFFLD_START
;
1923 static int cnic_bnx2x_close(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1925 struct l4_kwq_close_req
*req
= (struct l4_kwq_close_req
*) kwqe
;
1926 union l5cm_specific_data l5_data
;
1929 memset(&l5_data
, 0, sizeof(l5_data
));
1930 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_CLOSE
,
1931 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1935 static int cnic_bnx2x_reset(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1937 struct l4_kwq_reset_req
*req
= (struct l4_kwq_reset_req
*) kwqe
;
1938 union l5cm_specific_data l5_data
;
1941 memset(&l5_data
, 0, sizeof(l5_data
));
1942 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_ABORT
,
1943 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1946 static int cnic_bnx2x_offload_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1948 struct l4_kwq_offload_pg
*req
= (struct l4_kwq_offload_pg
*) kwqe
;
1950 struct kcqe
*cqes
[1];
1952 memset(&kcqe
, 0, sizeof(kcqe
));
1953 kcqe
.pg_host_opaque
= req
->host_opaque
;
1954 kcqe
.pg_cid
= req
->host_opaque
;
1955 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
;
1956 cqes
[0] = (struct kcqe
*) &kcqe
;
1957 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
1961 static int cnic_bnx2x_update_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1963 struct l4_kwq_update_pg
*req
= (struct l4_kwq_update_pg
*) kwqe
;
1965 struct kcqe
*cqes
[1];
1967 memset(&kcqe
, 0, sizeof(kcqe
));
1968 kcqe
.pg_host_opaque
= req
->pg_host_opaque
;
1969 kcqe
.pg_cid
= req
->pg_cid
;
1970 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_UPDATE_PG
;
1971 cqes
[0] = (struct kcqe
*) &kcqe
;
1972 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
1976 static int cnic_submit_bnx2x_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1983 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
1984 return -EAGAIN
; /* bnx2 is down */
1986 for (i
= 0; i
< num_wqes
; ) {
1988 opcode
= KWQE_OPCODE(kwqe
->kwqe_op_flag
);
1992 case ISCSI_KWQE_OPCODE_INIT1
:
1993 ret
= cnic_bnx2x_iscsi_init1(dev
, kwqe
);
1995 case ISCSI_KWQE_OPCODE_INIT2
:
1996 ret
= cnic_bnx2x_iscsi_init2(dev
, kwqe
);
1998 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1
:
1999 ret
= cnic_bnx2x_iscsi_ofld1(dev
, &wqes
[i
],
2000 num_wqes
- i
, &work
);
2002 case ISCSI_KWQE_OPCODE_UPDATE_CONN
:
2003 ret
= cnic_bnx2x_iscsi_update(dev
, kwqe
);
2005 case ISCSI_KWQE_OPCODE_DESTROY_CONN
:
2006 ret
= cnic_bnx2x_iscsi_destroy(dev
, kwqe
);
2008 case L4_KWQE_OPCODE_VALUE_CONNECT1
:
2009 ret
= cnic_bnx2x_connect(dev
, &wqes
[i
], num_wqes
- i
,
2012 case L4_KWQE_OPCODE_VALUE_CLOSE
:
2013 ret
= cnic_bnx2x_close(dev
, kwqe
);
2015 case L4_KWQE_OPCODE_VALUE_RESET
:
2016 ret
= cnic_bnx2x_reset(dev
, kwqe
);
2018 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
:
2019 ret
= cnic_bnx2x_offload_pg(dev
, kwqe
);
2021 case L4_KWQE_OPCODE_VALUE_UPDATE_PG
:
2022 ret
= cnic_bnx2x_update_pg(dev
, kwqe
);
2024 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG
:
2029 printk(KERN_ERR PFX
"%s: Unknown type of KWQE(0x%x)\n",
2030 dev
->netdev
->name
, opcode
);
2034 printk(KERN_ERR PFX
"%s: KWQE(0x%x) failed\n",
2035 dev
->netdev
->name
, opcode
);
2041 static void service_kcqes(struct cnic_dev
*dev
, int num_cqes
)
2043 struct cnic_local
*cp
= dev
->cnic_priv
;
2049 struct cnic_ulp_ops
*ulp_ops
;
2051 u32 kcqe_op_flag
= cp
->completed_kcq
[i
]->kcqe_op_flag
;
2052 u32 kcqe_layer
= kcqe_op_flag
& KCQE_FLAGS_LAYER_MASK
;
2054 if (unlikely(kcqe_op_flag
& KCQE_RAMROD_COMPLETION
))
2055 cnic_kwq_completion(dev
, 1);
2057 while (j
< num_cqes
) {
2058 u32 next_op
= cp
->completed_kcq
[i
+ j
]->kcqe_op_flag
;
2060 if ((next_op
& KCQE_FLAGS_LAYER_MASK
) != kcqe_layer
)
2063 if (unlikely(next_op
& KCQE_RAMROD_COMPLETION
))
2064 cnic_kwq_completion(dev
, 1);
2068 if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_RDMA
)
2069 ulp_type
= CNIC_ULP_RDMA
;
2070 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_ISCSI
)
2071 ulp_type
= CNIC_ULP_ISCSI
;
2072 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L4
)
2073 ulp_type
= CNIC_ULP_L4
;
2074 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L2
)
2077 printk(KERN_ERR PFX
"%s: Unknown type of KCQE(0x%x)\n",
2078 dev
->netdev
->name
, kcqe_op_flag
);
2083 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
2084 if (likely(ulp_ops
)) {
2085 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
2086 cp
->completed_kcq
+ i
, j
);
2097 static u16
cnic_bnx2_next_idx(u16 idx
)
2102 static u16
cnic_bnx2_hw_idx(u16 idx
)
2107 static u16
cnic_bnx2x_next_idx(u16 idx
)
2110 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
2116 static u16
cnic_bnx2x_hw_idx(u16 idx
)
2118 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
2123 static int cnic_get_kcqes(struct cnic_dev
*dev
, u16 hw_prod
, u16
*sw_prod
)
2125 struct cnic_local
*cp
= dev
->cnic_priv
;
2128 int kcqe_cnt
= 0, last_cnt
= 0;
2130 i
= ri
= last
= *sw_prod
;
2133 while ((i
!= hw_prod
) && (kcqe_cnt
< MAX_COMPLETED_KCQE
)) {
2134 kcqe
= &cp
->kcq
[KCQ_PG(ri
)][KCQ_IDX(ri
)];
2135 cp
->completed_kcq
[kcqe_cnt
++] = kcqe
;
2136 i
= cp
->next_idx(i
);
2137 ri
= i
& MAX_KCQ_IDX
;
2138 if (likely(!(kcqe
->kcqe_op_flag
& KCQE_FLAGS_NEXT
))) {
2139 last_cnt
= kcqe_cnt
;
2148 static void cnic_chk_pkt_rings(struct cnic_local
*cp
)
2150 u16 rx_cons
= *cp
->rx_cons_ptr
;
2151 u16 tx_cons
= *cp
->tx_cons_ptr
;
2153 if (cp
->tx_cons
!= tx_cons
|| cp
->rx_cons
!= rx_cons
) {
2154 cp
->tx_cons
= tx_cons
;
2155 cp
->rx_cons
= rx_cons
;
2157 uio_event_notify(cp
->cnic_uinfo
);
2161 static int cnic_service_bnx2(void *data
, void *status_blk
)
2163 struct cnic_dev
*dev
= data
;
2164 struct status_block
*sblk
= status_blk
;
2165 struct cnic_local
*cp
= dev
->cnic_priv
;
2166 u32 status_idx
= sblk
->status_idx
;
2167 u16 hw_prod
, sw_prod
;
2170 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
2173 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2175 hw_prod
= sblk
->status_completion_producer_index
;
2176 sw_prod
= cp
->kcq_prod_idx
;
2177 while (sw_prod
!= hw_prod
) {
2178 kcqe_cnt
= cnic_get_kcqes(dev
, hw_prod
, &sw_prod
);
2182 service_kcqes(dev
, kcqe_cnt
);
2184 /* Tell compiler that status_blk fields can change. */
2186 if (status_idx
!= sblk
->status_idx
) {
2187 status_idx
= sblk
->status_idx
;
2188 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2189 hw_prod
= sblk
->status_completion_producer_index
;
2195 CNIC_WR16(dev
, cp
->kcq_io_addr
, sw_prod
);
2197 cp
->kcq_prod_idx
= sw_prod
;
2199 cnic_chk_pkt_rings(cp
);
2203 static void cnic_service_bnx2_msix(unsigned long data
)
2205 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
2206 struct cnic_local
*cp
= dev
->cnic_priv
;
2207 struct status_block_msix
*status_blk
= cp
->bnx2_status_blk
;
2208 u32 status_idx
= status_blk
->status_idx
;
2209 u16 hw_prod
, sw_prod
;
2212 cp
->kwq_con_idx
= status_blk
->status_cmd_consumer_index
;
2214 hw_prod
= status_blk
->status_completion_producer_index
;
2215 sw_prod
= cp
->kcq_prod_idx
;
2216 while (sw_prod
!= hw_prod
) {
2217 kcqe_cnt
= cnic_get_kcqes(dev
, hw_prod
, &sw_prod
);
2221 service_kcqes(dev
, kcqe_cnt
);
2223 /* Tell compiler that status_blk fields can change. */
2225 if (status_idx
!= status_blk
->status_idx
) {
2226 status_idx
= status_blk
->status_idx
;
2227 cp
->kwq_con_idx
= status_blk
->status_cmd_consumer_index
;
2228 hw_prod
= status_blk
->status_completion_producer_index
;
2234 CNIC_WR16(dev
, cp
->kcq_io_addr
, sw_prod
);
2235 cp
->kcq_prod_idx
= sw_prod
;
2237 cnic_chk_pkt_rings(cp
);
2239 cp
->last_status_idx
= status_idx
;
2240 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
2241 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
2244 static irqreturn_t
cnic_irq(int irq
, void *dev_instance
)
2246 struct cnic_dev
*dev
= dev_instance
;
2247 struct cnic_local
*cp
= dev
->cnic_priv
;
2248 u16 prod
= cp
->kcq_prod_idx
& MAX_KCQ_IDX
;
2253 prefetch(cp
->status_blk
);
2254 prefetch(&cp
->kcq
[KCQ_PG(prod
)][KCQ_IDX(prod
)]);
2256 if (likely(test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
2257 tasklet_schedule(&cp
->cnic_irq_task
);
2262 static inline void cnic_ack_bnx2x_int(struct cnic_dev
*dev
, u8 id
, u8 storm
,
2263 u16 index
, u8 op
, u8 update
)
2265 struct cnic_local
*cp
= dev
->cnic_priv
;
2266 u32 hc_addr
= (HC_REG_COMMAND_REG
+ CNIC_PORT(cp
) * 32 +
2267 COMMAND_REG_INT_ACK
);
2268 struct igu_ack_register igu_ack
;
2270 igu_ack
.status_block_index
= index
;
2271 igu_ack
.sb_id_and_flags
=
2272 ((id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
2273 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
2274 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
2275 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
2277 CNIC_WR(dev
, hc_addr
, (*(u32
*)&igu_ack
));
2280 static void cnic_ack_bnx2x_msix(struct cnic_dev
*dev
)
2282 struct cnic_local
*cp
= dev
->cnic_priv
;
2284 cnic_ack_bnx2x_int(dev
, cp
->status_blk_num
, CSTORM_ID
, 0,
2285 IGU_INT_DISABLE
, 0);
2288 static void cnic_service_bnx2x_bh(unsigned long data
)
2290 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
2291 struct cnic_local
*cp
= dev
->cnic_priv
;
2292 u16 hw_prod
, sw_prod
;
2293 struct cstorm_status_block_c
*sblk
=
2294 &cp
->bnx2x_status_blk
->c_status_block
;
2295 u32 status_idx
= sblk
->status_block_index
;
2298 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
2301 hw_prod
= sblk
->index_values
[HC_INDEX_C_ISCSI_EQ_CONS
];
2302 hw_prod
= cp
->hw_idx(hw_prod
);
2303 sw_prod
= cp
->kcq_prod_idx
;
2304 while (sw_prod
!= hw_prod
) {
2305 kcqe_cnt
= cnic_get_kcqes(dev
, hw_prod
, &sw_prod
);
2309 service_kcqes(dev
, kcqe_cnt
);
2311 /* Tell compiler that sblk fields can change. */
2313 if (status_idx
== sblk
->status_block_index
)
2316 status_idx
= sblk
->status_block_index
;
2317 hw_prod
= sblk
->index_values
[HC_INDEX_C_ISCSI_EQ_CONS
];
2318 hw_prod
= cp
->hw_idx(hw_prod
);
2322 CNIC_WR16(dev
, cp
->kcq_io_addr
, sw_prod
+ MAX_KCQ_IDX
);
2323 cnic_ack_bnx2x_int(dev
, cp
->status_blk_num
, CSTORM_ID
,
2324 status_idx
, IGU_INT_ENABLE
, 1);
2326 cp
->kcq_prod_idx
= sw_prod
;
2330 static int cnic_service_bnx2x(void *data
, void *status_blk
)
2332 struct cnic_dev
*dev
= data
;
2333 struct cnic_local
*cp
= dev
->cnic_priv
;
2334 u16 prod
= cp
->kcq_prod_idx
& MAX_KCQ_IDX
;
2336 prefetch(cp
->status_blk
);
2337 prefetch(&cp
->kcq
[KCQ_PG(prod
)][KCQ_IDX(prod
)]);
2339 if (likely(test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
2340 tasklet_schedule(&cp
->cnic_irq_task
);
2342 cnic_chk_pkt_rings(cp
);
2347 static void cnic_ulp_stop(struct cnic_dev
*dev
)
2349 struct cnic_local
*cp
= dev
->cnic_priv
;
2353 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
2355 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
2356 struct cnic_ulp_ops
*ulp_ops
;
2358 mutex_lock(&cnic_lock
);
2359 ulp_ops
= cp
->ulp_ops
[if_type
];
2361 mutex_unlock(&cnic_lock
);
2364 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2365 mutex_unlock(&cnic_lock
);
2367 if (test_and_clear_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
2368 ulp_ops
->cnic_stop(cp
->ulp_handle
[if_type
]);
2370 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2374 static void cnic_ulp_start(struct cnic_dev
*dev
)
2376 struct cnic_local
*cp
= dev
->cnic_priv
;
2379 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
2380 struct cnic_ulp_ops
*ulp_ops
;
2382 mutex_lock(&cnic_lock
);
2383 ulp_ops
= cp
->ulp_ops
[if_type
];
2384 if (!ulp_ops
|| !ulp_ops
->cnic_start
) {
2385 mutex_unlock(&cnic_lock
);
2388 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2389 mutex_unlock(&cnic_lock
);
2391 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
2392 ulp_ops
->cnic_start(cp
->ulp_handle
[if_type
]);
2394 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2398 static int cnic_ctl(void *data
, struct cnic_ctl_info
*info
)
2400 struct cnic_dev
*dev
= data
;
2402 switch (info
->cmd
) {
2403 case CNIC_CTL_STOP_CMD
:
2411 case CNIC_CTL_START_CMD
:
2414 if (!cnic_start_hw(dev
))
2415 cnic_ulp_start(dev
);
2419 case CNIC_CTL_COMPLETION_CMD
: {
2420 u32 cid
= BNX2X_SW_CID(info
->data
.comp
.cid
);
2422 struct cnic_local
*cp
= dev
->cnic_priv
;
2424 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) == 0) {
2425 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
2428 wake_up(&ctx
->waitq
);
2438 static void cnic_ulp_init(struct cnic_dev
*dev
)
2441 struct cnic_local
*cp
= dev
->cnic_priv
;
2443 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
2444 struct cnic_ulp_ops
*ulp_ops
;
2446 mutex_lock(&cnic_lock
);
2447 ulp_ops
= cnic_ulp_tbl
[i
];
2448 if (!ulp_ops
|| !ulp_ops
->cnic_init
) {
2449 mutex_unlock(&cnic_lock
);
2453 mutex_unlock(&cnic_lock
);
2455 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
2456 ulp_ops
->cnic_init(dev
);
2462 static void cnic_ulp_exit(struct cnic_dev
*dev
)
2465 struct cnic_local
*cp
= dev
->cnic_priv
;
2467 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
2468 struct cnic_ulp_ops
*ulp_ops
;
2470 mutex_lock(&cnic_lock
);
2471 ulp_ops
= cnic_ulp_tbl
[i
];
2472 if (!ulp_ops
|| !ulp_ops
->cnic_exit
) {
2473 mutex_unlock(&cnic_lock
);
2477 mutex_unlock(&cnic_lock
);
2479 if (test_and_clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
2480 ulp_ops
->cnic_exit(dev
);
2486 static int cnic_cm_offload_pg(struct cnic_sock
*csk
)
2488 struct cnic_dev
*dev
= csk
->dev
;
2489 struct l4_kwq_offload_pg
*l4kwqe
;
2490 struct kwqe
*wqes
[1];
2492 l4kwqe
= (struct l4_kwq_offload_pg
*) &csk
->kwqe1
;
2493 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2494 wqes
[0] = (struct kwqe
*) l4kwqe
;
2496 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
;
2498 L4_LAYER_CODE
<< L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT
;
2499 l4kwqe
->l2hdr_nbytes
= ETH_HLEN
;
2501 l4kwqe
->da0
= csk
->ha
[0];
2502 l4kwqe
->da1
= csk
->ha
[1];
2503 l4kwqe
->da2
= csk
->ha
[2];
2504 l4kwqe
->da3
= csk
->ha
[3];
2505 l4kwqe
->da4
= csk
->ha
[4];
2506 l4kwqe
->da5
= csk
->ha
[5];
2508 l4kwqe
->sa0
= dev
->mac_addr
[0];
2509 l4kwqe
->sa1
= dev
->mac_addr
[1];
2510 l4kwqe
->sa2
= dev
->mac_addr
[2];
2511 l4kwqe
->sa3
= dev
->mac_addr
[3];
2512 l4kwqe
->sa4
= dev
->mac_addr
[4];
2513 l4kwqe
->sa5
= dev
->mac_addr
[5];
2515 l4kwqe
->etype
= ETH_P_IP
;
2516 l4kwqe
->ipid_count
= DEF_IPID_COUNT
;
2517 l4kwqe
->host_opaque
= csk
->l5_cid
;
2520 l4kwqe
->pg_flags
|= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING
;
2521 l4kwqe
->vlan_tag
= csk
->vlan_id
;
2522 l4kwqe
->l2hdr_nbytes
+= 4;
2525 return dev
->submit_kwqes(dev
, wqes
, 1);
2528 static int cnic_cm_update_pg(struct cnic_sock
*csk
)
2530 struct cnic_dev
*dev
= csk
->dev
;
2531 struct l4_kwq_update_pg
*l4kwqe
;
2532 struct kwqe
*wqes
[1];
2534 l4kwqe
= (struct l4_kwq_update_pg
*) &csk
->kwqe1
;
2535 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2536 wqes
[0] = (struct kwqe
*) l4kwqe
;
2538 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPDATE_PG
;
2540 L4_LAYER_CODE
<< L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT
;
2541 l4kwqe
->pg_cid
= csk
->pg_cid
;
2543 l4kwqe
->da0
= csk
->ha
[0];
2544 l4kwqe
->da1
= csk
->ha
[1];
2545 l4kwqe
->da2
= csk
->ha
[2];
2546 l4kwqe
->da3
= csk
->ha
[3];
2547 l4kwqe
->da4
= csk
->ha
[4];
2548 l4kwqe
->da5
= csk
->ha
[5];
2550 l4kwqe
->pg_host_opaque
= csk
->l5_cid
;
2551 l4kwqe
->pg_valids
= L4_KWQ_UPDATE_PG_VALIDS_DA
;
2553 return dev
->submit_kwqes(dev
, wqes
, 1);
2556 static int cnic_cm_upload_pg(struct cnic_sock
*csk
)
2558 struct cnic_dev
*dev
= csk
->dev
;
2559 struct l4_kwq_upload
*l4kwqe
;
2560 struct kwqe
*wqes
[1];
2562 l4kwqe
= (struct l4_kwq_upload
*) &csk
->kwqe1
;
2563 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2564 wqes
[0] = (struct kwqe
*) l4kwqe
;
2566 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPLOAD_PG
;
2568 L4_LAYER_CODE
<< L4_KWQ_UPLOAD_LAYER_CODE_SHIFT
;
2569 l4kwqe
->cid
= csk
->pg_cid
;
2571 return dev
->submit_kwqes(dev
, wqes
, 1);
2574 static int cnic_cm_conn_req(struct cnic_sock
*csk
)
2576 struct cnic_dev
*dev
= csk
->dev
;
2577 struct l4_kwq_connect_req1
*l4kwqe1
;
2578 struct l4_kwq_connect_req2
*l4kwqe2
;
2579 struct l4_kwq_connect_req3
*l4kwqe3
;
2580 struct kwqe
*wqes
[3];
2584 l4kwqe1
= (struct l4_kwq_connect_req1
*) &csk
->kwqe1
;
2585 l4kwqe2
= (struct l4_kwq_connect_req2
*) &csk
->kwqe2
;
2586 l4kwqe3
= (struct l4_kwq_connect_req3
*) &csk
->kwqe3
;
2587 memset(l4kwqe1
, 0, sizeof(*l4kwqe1
));
2588 memset(l4kwqe2
, 0, sizeof(*l4kwqe2
));
2589 memset(l4kwqe3
, 0, sizeof(*l4kwqe3
));
2591 l4kwqe3
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT3
;
2593 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT
;
2594 l4kwqe3
->ka_timeout
= csk
->ka_timeout
;
2595 l4kwqe3
->ka_interval
= csk
->ka_interval
;
2596 l4kwqe3
->ka_max_probe_count
= csk
->ka_max_probe_count
;
2597 l4kwqe3
->tos
= csk
->tos
;
2598 l4kwqe3
->ttl
= csk
->ttl
;
2599 l4kwqe3
->snd_seq_scale
= csk
->snd_seq_scale
;
2600 l4kwqe3
->pmtu
= csk
->mtu
;
2601 l4kwqe3
->rcv_buf
= csk
->rcv_buf
;
2602 l4kwqe3
->snd_buf
= csk
->snd_buf
;
2603 l4kwqe3
->seed
= csk
->seed
;
2605 wqes
[0] = (struct kwqe
*) l4kwqe1
;
2606 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
2607 wqes
[1] = (struct kwqe
*) l4kwqe2
;
2608 wqes
[2] = (struct kwqe
*) l4kwqe3
;
2611 l4kwqe1
->conn_flags
= L4_KWQ_CONNECT_REQ1_IP_V6
;
2612 l4kwqe2
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT2
;
2614 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT
|
2615 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT
;
2616 l4kwqe2
->src_ip_v6_2
= be32_to_cpu(csk
->src_ip
[1]);
2617 l4kwqe2
->src_ip_v6_3
= be32_to_cpu(csk
->src_ip
[2]);
2618 l4kwqe2
->src_ip_v6_4
= be32_to_cpu(csk
->src_ip
[3]);
2619 l4kwqe2
->dst_ip_v6_2
= be32_to_cpu(csk
->dst_ip
[1]);
2620 l4kwqe2
->dst_ip_v6_3
= be32_to_cpu(csk
->dst_ip
[2]);
2621 l4kwqe2
->dst_ip_v6_4
= be32_to_cpu(csk
->dst_ip
[3]);
2622 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct ipv6hdr
) -
2623 sizeof(struct tcphdr
);
2625 wqes
[1] = (struct kwqe
*) l4kwqe3
;
2626 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct iphdr
) -
2627 sizeof(struct tcphdr
);
2630 l4kwqe1
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT1
;
2632 (L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT
) |
2633 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT
;
2634 l4kwqe1
->cid
= csk
->cid
;
2635 l4kwqe1
->pg_cid
= csk
->pg_cid
;
2636 l4kwqe1
->src_ip
= be32_to_cpu(csk
->src_ip
[0]);
2637 l4kwqe1
->dst_ip
= be32_to_cpu(csk
->dst_ip
[0]);
2638 l4kwqe1
->src_port
= be16_to_cpu(csk
->src_port
);
2639 l4kwqe1
->dst_port
= be16_to_cpu(csk
->dst_port
);
2640 if (csk
->tcp_flags
& SK_TCP_NO_DELAY_ACK
)
2641 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
;
2642 if (csk
->tcp_flags
& SK_TCP_KEEP_ALIVE
)
2643 tcp_flags
|= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE
;
2644 if (csk
->tcp_flags
& SK_TCP_NAGLE
)
2645 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
;
2646 if (csk
->tcp_flags
& SK_TCP_TIMESTAMP
)
2647 tcp_flags
|= L4_KWQ_CONNECT_REQ1_TIME_STAMP
;
2648 if (csk
->tcp_flags
& SK_TCP_SACK
)
2649 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SACK
;
2650 if (csk
->tcp_flags
& SK_TCP_SEG_SCALING
)
2651 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SEG_SCALING
;
2653 l4kwqe1
->tcp_flags
= tcp_flags
;
2655 return dev
->submit_kwqes(dev
, wqes
, num_wqes
);
2658 static int cnic_cm_close_req(struct cnic_sock
*csk
)
2660 struct cnic_dev
*dev
= csk
->dev
;
2661 struct l4_kwq_close_req
*l4kwqe
;
2662 struct kwqe
*wqes
[1];
2664 l4kwqe
= (struct l4_kwq_close_req
*) &csk
->kwqe2
;
2665 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2666 wqes
[0] = (struct kwqe
*) l4kwqe
;
2668 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_CLOSE
;
2669 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT
;
2670 l4kwqe
->cid
= csk
->cid
;
2672 return dev
->submit_kwqes(dev
, wqes
, 1);
2675 static int cnic_cm_abort_req(struct cnic_sock
*csk
)
2677 struct cnic_dev
*dev
= csk
->dev
;
2678 struct l4_kwq_reset_req
*l4kwqe
;
2679 struct kwqe
*wqes
[1];
2681 l4kwqe
= (struct l4_kwq_reset_req
*) &csk
->kwqe2
;
2682 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2683 wqes
[0] = (struct kwqe
*) l4kwqe
;
2685 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_RESET
;
2686 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT
;
2687 l4kwqe
->cid
= csk
->cid
;
2689 return dev
->submit_kwqes(dev
, wqes
, 1);
2692 static int cnic_cm_create(struct cnic_dev
*dev
, int ulp_type
, u32 cid
,
2693 u32 l5_cid
, struct cnic_sock
**csk
, void *context
)
2695 struct cnic_local
*cp
= dev
->cnic_priv
;
2696 struct cnic_sock
*csk1
;
2698 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
2701 csk1
= &cp
->csk_tbl
[l5_cid
];
2702 if (atomic_read(&csk1
->ref_count
))
2705 if (test_and_set_bit(SK_F_INUSE
, &csk1
->flags
))
2710 csk1
->l5_cid
= l5_cid
;
2711 csk1
->ulp_type
= ulp_type
;
2712 csk1
->context
= context
;
2714 csk1
->ka_timeout
= DEF_KA_TIMEOUT
;
2715 csk1
->ka_interval
= DEF_KA_INTERVAL
;
2716 csk1
->ka_max_probe_count
= DEF_KA_MAX_PROBE_COUNT
;
2717 csk1
->tos
= DEF_TOS
;
2718 csk1
->ttl
= DEF_TTL
;
2719 csk1
->snd_seq_scale
= DEF_SND_SEQ_SCALE
;
2720 csk1
->rcv_buf
= DEF_RCV_BUF
;
2721 csk1
->snd_buf
= DEF_SND_BUF
;
2722 csk1
->seed
= DEF_SEED
;
2728 static void cnic_cm_cleanup(struct cnic_sock
*csk
)
2730 if (csk
->src_port
) {
2731 struct cnic_dev
*dev
= csk
->dev
;
2732 struct cnic_local
*cp
= dev
->cnic_priv
;
2734 cnic_free_id(&cp
->csk_port_tbl
, csk
->src_port
);
2739 static void cnic_close_conn(struct cnic_sock
*csk
)
2741 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
)) {
2742 cnic_cm_upload_pg(csk
);
2743 clear_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
2745 cnic_cm_cleanup(csk
);
2748 static int cnic_cm_destroy(struct cnic_sock
*csk
)
2750 if (!cnic_in_use(csk
))
2754 clear_bit(SK_F_INUSE
, &csk
->flags
);
2755 smp_mb__after_clear_bit();
2756 while (atomic_read(&csk
->ref_count
) != 1)
2758 cnic_cm_cleanup(csk
);
2765 static inline u16
cnic_get_vlan(struct net_device
*dev
,
2766 struct net_device
**vlan_dev
)
2768 if (dev
->priv_flags
& IFF_802_1Q_VLAN
) {
2769 *vlan_dev
= vlan_dev_real_dev(dev
);
2770 return vlan_dev_vlan_id(dev
);
2776 static int cnic_get_v4_route(struct sockaddr_in
*dst_addr
,
2777 struct dst_entry
**dst
)
2779 #if defined(CONFIG_INET)
2784 memset(&fl
, 0, sizeof(fl
));
2785 fl
.nl_u
.ip4_u
.daddr
= dst_addr
->sin_addr
.s_addr
;
2787 err
= ip_route_output_key(&init_net
, &rt
, &fl
);
2792 return -ENETUNREACH
;
2796 static int cnic_get_v6_route(struct sockaddr_in6
*dst_addr
,
2797 struct dst_entry
**dst
)
2799 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
2802 memset(&fl
, 0, sizeof(fl
));
2803 ipv6_addr_copy(&fl
.fl6_dst
, &dst_addr
->sin6_addr
);
2804 if (ipv6_addr_type(&fl
.fl6_dst
) & IPV6_ADDR_LINKLOCAL
)
2805 fl
.oif
= dst_addr
->sin6_scope_id
;
2807 *dst
= ip6_route_output(&init_net
, NULL
, &fl
);
2812 return -ENETUNREACH
;
2815 static struct cnic_dev
*cnic_cm_select_dev(struct sockaddr_in
*dst_addr
,
2818 struct cnic_dev
*dev
= NULL
;
2819 struct dst_entry
*dst
;
2820 struct net_device
*netdev
= NULL
;
2821 int err
= -ENETUNREACH
;
2823 if (dst_addr
->sin_family
== AF_INET
)
2824 err
= cnic_get_v4_route(dst_addr
, &dst
);
2825 else if (dst_addr
->sin_family
== AF_INET6
) {
2826 struct sockaddr_in6
*dst_addr6
=
2827 (struct sockaddr_in6
*) dst_addr
;
2829 err
= cnic_get_v6_route(dst_addr6
, &dst
);
2839 cnic_get_vlan(dst
->dev
, &netdev
);
2841 dev
= cnic_from_netdev(netdev
);
2850 static int cnic_resolve_addr(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
2852 struct cnic_dev
*dev
= csk
->dev
;
2853 struct cnic_local
*cp
= dev
->cnic_priv
;
2855 return cnic_send_nlmsg(cp
, ISCSI_KEVENT_PATH_REQ
, csk
);
2858 static int cnic_get_route(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
2860 struct cnic_dev
*dev
= csk
->dev
;
2861 struct cnic_local
*cp
= dev
->cnic_priv
;
2862 int is_v6
, err
, rc
= -ENETUNREACH
;
2863 struct dst_entry
*dst
;
2864 struct net_device
*realdev
;
2867 if (saddr
->local
.v6
.sin6_family
== AF_INET6
&&
2868 saddr
->remote
.v6
.sin6_family
== AF_INET6
)
2870 else if (saddr
->local
.v4
.sin_family
== AF_INET
&&
2871 saddr
->remote
.v4
.sin_family
== AF_INET
)
2876 clear_bit(SK_F_IPV6
, &csk
->flags
);
2879 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
2880 set_bit(SK_F_IPV6
, &csk
->flags
);
2881 err
= cnic_get_v6_route(&saddr
->remote
.v6
, &dst
);
2885 if (!dst
|| dst
->error
|| !dst
->dev
)
2888 memcpy(&csk
->dst_ip
[0], &saddr
->remote
.v6
.sin6_addr
,
2889 sizeof(struct in6_addr
));
2890 csk
->dst_port
= saddr
->remote
.v6
.sin6_port
;
2891 local_port
= saddr
->local
.v6
.sin6_port
;
2897 err
= cnic_get_v4_route(&saddr
->remote
.v4
, &dst
);
2901 if (!dst
|| dst
->error
|| !dst
->dev
)
2904 csk
->dst_ip
[0] = saddr
->remote
.v4
.sin_addr
.s_addr
;
2905 csk
->dst_port
= saddr
->remote
.v4
.sin_port
;
2906 local_port
= saddr
->local
.v4
.sin_port
;
2909 csk
->vlan_id
= cnic_get_vlan(dst
->dev
, &realdev
);
2910 if (realdev
!= dev
->netdev
)
2913 if (local_port
>= CNIC_LOCAL_PORT_MIN
&&
2914 local_port
< CNIC_LOCAL_PORT_MAX
) {
2915 if (cnic_alloc_id(&cp
->csk_port_tbl
, local_port
))
2921 local_port
= cnic_alloc_new_id(&cp
->csk_port_tbl
);
2922 if (local_port
== -1) {
2927 csk
->src_port
= local_port
;
2929 csk
->mtu
= dst_mtu(dst
);
2937 static void cnic_init_csk_state(struct cnic_sock
*csk
)
2940 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
2941 clear_bit(SK_F_CLOSING
, &csk
->flags
);
2944 static int cnic_cm_connect(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
2948 if (!cnic_in_use(csk
))
2951 if (test_and_set_bit(SK_F_CONNECT_START
, &csk
->flags
))
2954 cnic_init_csk_state(csk
);
2956 err
= cnic_get_route(csk
, saddr
);
2960 err
= cnic_resolve_addr(csk
, saddr
);
2965 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
2969 static int cnic_cm_abort(struct cnic_sock
*csk
)
2971 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
2974 if (!cnic_in_use(csk
))
2977 if (cnic_abort_prep(csk
))
2978 return cnic_cm_abort_req(csk
);
2980 /* Getting here means that we haven't started connect, or
2981 * connect was not successful.
2984 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
2985 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
2986 opcode
= csk
->state
;
2988 opcode
= L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
;
2989 cp
->close_conn(csk
, opcode
);
2994 static int cnic_cm_close(struct cnic_sock
*csk
)
2996 if (!cnic_in_use(csk
))
2999 if (cnic_close_prep(csk
)) {
3000 csk
->state
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
3001 return cnic_cm_close_req(csk
);
3006 static void cnic_cm_upcall(struct cnic_local
*cp
, struct cnic_sock
*csk
,
3009 struct cnic_ulp_ops
*ulp_ops
;
3010 int ulp_type
= csk
->ulp_type
;
3013 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
3015 if (opcode
== L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
)
3016 ulp_ops
->cm_connect_complete(csk
);
3017 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
3018 ulp_ops
->cm_close_complete(csk
);
3019 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
)
3020 ulp_ops
->cm_remote_abort(csk
);
3021 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_COMP
)
3022 ulp_ops
->cm_abort_complete(csk
);
3023 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
)
3024 ulp_ops
->cm_remote_close(csk
);
3029 static int cnic_cm_set_pg(struct cnic_sock
*csk
)
3031 if (cnic_offld_prep(csk
)) {
3032 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3033 cnic_cm_update_pg(csk
);
3035 cnic_cm_offload_pg(csk
);
3040 static void cnic_cm_process_offld_pg(struct cnic_dev
*dev
, struct l4_kcq
*kcqe
)
3042 struct cnic_local
*cp
= dev
->cnic_priv
;
3043 u32 l5_cid
= kcqe
->pg_host_opaque
;
3044 u8 opcode
= kcqe
->op_code
;
3045 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
3048 if (!cnic_in_use(csk
))
3051 if (opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3052 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3055 csk
->pg_cid
= kcqe
->pg_cid
;
3056 set_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
3057 cnic_cm_conn_req(csk
);
3063 static void cnic_cm_process_kcqe(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
3065 struct cnic_local
*cp
= dev
->cnic_priv
;
3066 struct l4_kcq
*l4kcqe
= (struct l4_kcq
*) kcqe
;
3067 u8 opcode
= l4kcqe
->op_code
;
3069 struct cnic_sock
*csk
;
3071 if (opcode
== L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
||
3072 opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3073 cnic_cm_process_offld_pg(dev
, l4kcqe
);
3077 l5_cid
= l4kcqe
->conn_id
;
3079 l5_cid
= l4kcqe
->cid
;
3080 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
3083 csk
= &cp
->csk_tbl
[l5_cid
];
3086 if (!cnic_in_use(csk
)) {
3092 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
:
3093 if (l4kcqe
->status
== 0)
3094 set_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
);
3096 smp_mb__before_clear_bit();
3097 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3098 cnic_cm_upcall(cp
, csk
, opcode
);
3101 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
3102 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
))
3103 csk
->state
= opcode
;
3105 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
3106 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
3107 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
3108 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
3109 cp
->close_conn(csk
, opcode
);
3112 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
:
3113 cnic_cm_upcall(cp
, csk
, opcode
);
3119 static void cnic_cm_indicate_kcqe(void *data
, struct kcqe
*kcqe
[], u32 num
)
3121 struct cnic_dev
*dev
= data
;
3124 for (i
= 0; i
< num
; i
++)
3125 cnic_cm_process_kcqe(dev
, kcqe
[i
]);
3128 static struct cnic_ulp_ops cm_ulp_ops
= {
3129 .indicate_kcqes
= cnic_cm_indicate_kcqe
,
3132 static void cnic_cm_free_mem(struct cnic_dev
*dev
)
3134 struct cnic_local
*cp
= dev
->cnic_priv
;
3138 cnic_free_id_tbl(&cp
->csk_port_tbl
);
3141 static int cnic_cm_alloc_mem(struct cnic_dev
*dev
)
3143 struct cnic_local
*cp
= dev
->cnic_priv
;
3145 cp
->csk_tbl
= kzalloc(sizeof(struct cnic_sock
) * MAX_CM_SK_TBL_SZ
,
3150 if (cnic_init_id_tbl(&cp
->csk_port_tbl
, CNIC_LOCAL_PORT_RANGE
,
3151 CNIC_LOCAL_PORT_MIN
)) {
3152 cnic_cm_free_mem(dev
);
3158 static int cnic_ready_to_close(struct cnic_sock
*csk
, u32 opcode
)
3160 if ((opcode
== csk
->state
) ||
3161 (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
&&
3162 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)) {
3163 if (!test_and_set_bit(SK_F_CLOSING
, &csk
->flags
))
3169 static void cnic_close_bnx2_conn(struct cnic_sock
*csk
, u32 opcode
)
3171 struct cnic_dev
*dev
= csk
->dev
;
3172 struct cnic_local
*cp
= dev
->cnic_priv
;
3174 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
3175 if (cnic_ready_to_close(csk
, opcode
)) {
3176 cnic_close_conn(csk
);
3177 cnic_cm_upcall(cp
, csk
, opcode
);
3181 static void cnic_cm_stop_bnx2_hw(struct cnic_dev
*dev
)
3185 static int cnic_cm_init_bnx2_hw(struct cnic_dev
*dev
)
3189 get_random_bytes(&seed
, 4);
3190 cnic_ctx_wr(dev
, 45, 0, seed
);
3194 static void cnic_close_bnx2x_conn(struct cnic_sock
*csk
, u32 opcode
)
3196 struct cnic_dev
*dev
= csk
->dev
;
3197 struct cnic_local
*cp
= dev
->cnic_priv
;
3198 struct cnic_context
*ctx
= &cp
->ctx_tbl
[csk
->l5_cid
];
3199 union l5cm_specific_data l5_data
;
3201 int close_complete
= 0;
3204 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
3205 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
3206 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
3207 if (cnic_ready_to_close(csk
, opcode
))
3208 cmd
= L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
;
3210 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
3211 cmd
= L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
;
3213 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
3218 memset(&l5_data
, 0, sizeof(l5_data
));
3220 cnic_submit_kwqe_16(dev
, cmd
, csk
->cid
, ISCSI_CONNECTION_TYPE
,
3222 } else if (close_complete
) {
3223 ctx
->timestamp
= jiffies
;
3224 cnic_close_conn(csk
);
3225 cnic_cm_upcall(cp
, csk
, csk
->state
);
3229 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev
*dev
)
3233 static int cnic_cm_init_bnx2x_hw(struct cnic_dev
*dev
)
3235 struct cnic_local
*cp
= dev
->cnic_priv
;
3236 int func
= CNIC_FUNC(cp
);
3238 cnic_init_bnx2x_mac(dev
);
3239 cnic_bnx2x_set_tcp_timestamp(dev
, 1);
3241 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
3242 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func
), 0);
3244 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3245 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func
), 1);
3246 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3247 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func
),
3250 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3251 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func
), DEF_TTL
);
3252 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3253 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func
), DEF_TOS
);
3254 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3255 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func
), 2);
3256 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3257 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func
), DEF_SWS_TIMER
);
3259 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_TCP_MAX_CWND_OFFSET(func
),
3264 static int cnic_cm_open(struct cnic_dev
*dev
)
3266 struct cnic_local
*cp
= dev
->cnic_priv
;
3269 err
= cnic_cm_alloc_mem(dev
);
3273 err
= cp
->start_cm(dev
);
3278 dev
->cm_create
= cnic_cm_create
;
3279 dev
->cm_destroy
= cnic_cm_destroy
;
3280 dev
->cm_connect
= cnic_cm_connect
;
3281 dev
->cm_abort
= cnic_cm_abort
;
3282 dev
->cm_close
= cnic_cm_close
;
3283 dev
->cm_select_dev
= cnic_cm_select_dev
;
3285 cp
->ulp_handle
[CNIC_ULP_L4
] = dev
;
3286 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], &cm_ulp_ops
);
3290 cnic_cm_free_mem(dev
);
3294 static int cnic_cm_shutdown(struct cnic_dev
*dev
)
3296 struct cnic_local
*cp
= dev
->cnic_priv
;
3304 for (i
= 0; i
< MAX_CM_SK_TBL_SZ
; i
++) {
3305 struct cnic_sock
*csk
= &cp
->csk_tbl
[i
];
3307 clear_bit(SK_F_INUSE
, &csk
->flags
);
3308 cnic_cm_cleanup(csk
);
3310 cnic_cm_free_mem(dev
);
3315 static void cnic_init_context(struct cnic_dev
*dev
, u32 cid
)
3317 struct cnic_local
*cp
= dev
->cnic_priv
;
3321 if (CHIP_NUM(cp
) == CHIP_NUM_5709
)
3324 cid_addr
= GET_CID_ADDR(cid
);
3326 for (i
= 0; i
< CTX_SIZE
; i
+= 4)
3327 cnic_ctx_wr(dev
, cid_addr
, i
, 0);
3330 static int cnic_setup_5709_context(struct cnic_dev
*dev
, int valid
)
3332 struct cnic_local
*cp
= dev
->cnic_priv
;
3334 u32 valid_bit
= valid
? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
: 0;
3336 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
3339 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
3341 u32 idx
= cp
->ctx_arr
[i
].cid
/ cp
->cids_per_blk
;
3344 memset(cp
->ctx_arr
[i
].ctx
, 0, BCM_PAGE_SIZE
);
3346 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
3347 (cp
->ctx_arr
[i
].mapping
& 0xffffffff) | valid_bit
);
3348 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
3349 (u64
) cp
->ctx_arr
[i
].mapping
>> 32);
3350 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, idx
|
3351 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
3352 for (j
= 0; j
< 10; j
++) {
3354 val
= CNIC_RD(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
3355 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
3359 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
3367 static void cnic_free_irq(struct cnic_dev
*dev
)
3369 struct cnic_local
*cp
= dev
->cnic_priv
;
3370 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3372 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3373 cp
->disable_int_sync(dev
);
3374 tasklet_disable(&cp
->cnic_irq_task
);
3375 free_irq(ethdev
->irq_arr
[0].vector
, dev
);
3379 static int cnic_init_bnx2_irq(struct cnic_dev
*dev
)
3381 struct cnic_local
*cp
= dev
->cnic_priv
;
3382 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3384 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3386 int sblk_num
= cp
->status_blk_num
;
3387 u32 base
= ((sblk_num
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
3388 BNX2_HC_SB_CONFIG_1
;
3390 CNIC_WR(dev
, base
, BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
3392 CNIC_WR(dev
, base
+ BNX2_HC_COMP_PROD_TRIP_OFF
, (2 << 16) | 8);
3393 CNIC_WR(dev
, base
+ BNX2_HC_COM_TICKS_OFF
, (64 << 16) | 220);
3394 CNIC_WR(dev
, base
+ BNX2_HC_CMD_TICKS_OFF
, (64 << 16) | 220);
3396 cp
->bnx2_status_blk
= cp
->status_blk
;
3397 cp
->last_status_idx
= cp
->bnx2_status_blk
->status_idx
;
3398 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2_msix
,
3399 (unsigned long) dev
);
3400 err
= request_irq(ethdev
->irq_arr
[0].vector
, cnic_irq
, 0,
3403 tasklet_disable(&cp
->cnic_irq_task
);
3406 while (cp
->bnx2_status_blk
->status_completion_producer_index
&&
3408 CNIC_WR(dev
, BNX2_HC_COALESCE_NOW
,
3409 1 << (11 + sblk_num
));
3414 if (cp
->bnx2_status_blk
->status_completion_producer_index
) {
3420 struct status_block
*sblk
= cp
->status_blk
;
3421 u32 hc_cmd
= CNIC_RD(dev
, BNX2_HC_COMMAND
);
3424 while (sblk
->status_completion_producer_index
&& i
< 10) {
3425 CNIC_WR(dev
, BNX2_HC_COMMAND
,
3426 hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
3431 if (sblk
->status_completion_producer_index
)
3438 printk(KERN_ERR PFX
"%s: " "KCQ index not resetting to 0.\n",
3443 static void cnic_enable_bnx2_int(struct cnic_dev
*dev
)
3445 struct cnic_local
*cp
= dev
->cnic_priv
;
3446 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3448 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
3451 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
3452 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
3455 static void cnic_disable_bnx2_int_sync(struct cnic_dev
*dev
)
3457 struct cnic_local
*cp
= dev
->cnic_priv
;
3458 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3460 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
3463 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
3464 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3465 CNIC_RD(dev
, BNX2_PCICFG_INT_ACK_CMD
);
3466 synchronize_irq(ethdev
->irq_arr
[0].vector
);
3469 static void cnic_init_bnx2_tx_ring(struct cnic_dev
*dev
)
3471 struct cnic_local
*cp
= dev
->cnic_priv
;
3472 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3473 u32 cid_addr
, tx_cid
, sb_id
;
3474 u32 val
, offset0
, offset1
, offset2
, offset3
;
3478 struct status_block
*s_blk
= cp
->status_blk
;
3480 sb_id
= cp
->status_blk_num
;
3482 cnic_init_context(dev
, tx_cid
);
3483 cnic_init_context(dev
, tx_cid
+ 1);
3484 cp
->tx_cons_ptr
= &s_blk
->status_tx_quick_consumer_index2
;
3485 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3486 struct status_block_msix
*sblk
= cp
->status_blk
;
3488 tx_cid
= TX_TSS_CID
+ sb_id
- 1;
3489 cnic_init_context(dev
, tx_cid
);
3490 CNIC_WR(dev
, BNX2_TSCH_TSS_CFG
, (sb_id
<< 24) |
3492 cp
->tx_cons_ptr
= &sblk
->status_tx_quick_consumer_index
;
3494 cp
->tx_cons
= *cp
->tx_cons_ptr
;
3496 cid_addr
= GET_CID_ADDR(tx_cid
);
3497 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
3498 u32 cid_addr2
= GET_CID_ADDR(tx_cid
+ 4) + 0x40;
3500 for (i
= 0; i
< PHY_CTX_SIZE
; i
+= 4)
3501 cnic_ctx_wr(dev
, cid_addr2
, i
, 0);
3503 offset0
= BNX2_L2CTX_TYPE_XI
;
3504 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
3505 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
3506 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
3508 offset0
= BNX2_L2CTX_TYPE
;
3509 offset1
= BNX2_L2CTX_CMD_TYPE
;
3510 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
3511 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
3513 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
3514 cnic_ctx_wr(dev
, cid_addr
, offset0
, val
);
3516 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
3517 cnic_ctx_wr(dev
, cid_addr
, offset1
, val
);
3519 txbd
= (struct tx_bd
*) cp
->l2_ring
;
3521 buf_map
= cp
->l2_buf_map
;
3522 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
++, txbd
++) {
3523 txbd
->tx_bd_haddr_hi
= (u64
) buf_map
>> 32;
3524 txbd
->tx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
3526 val
= (u64
) cp
->l2_ring_map
>> 32;
3527 cnic_ctx_wr(dev
, cid_addr
, offset2
, val
);
3528 txbd
->tx_bd_haddr_hi
= val
;
3530 val
= (u64
) cp
->l2_ring_map
& 0xffffffff;
3531 cnic_ctx_wr(dev
, cid_addr
, offset3
, val
);
3532 txbd
->tx_bd_haddr_lo
= val
;
3535 static void cnic_init_bnx2_rx_ring(struct cnic_dev
*dev
)
3537 struct cnic_local
*cp
= dev
->cnic_priv
;
3538 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3539 u32 cid_addr
, sb_id
, val
, coal_reg
, coal_val
;
3542 struct status_block
*s_blk
= cp
->status_blk
;
3544 sb_id
= cp
->status_blk_num
;
3545 cnic_init_context(dev
, 2);
3546 cp
->rx_cons_ptr
= &s_blk
->status_rx_quick_consumer_index2
;
3547 coal_reg
= BNX2_HC_COMMAND
;
3548 coal_val
= CNIC_RD(dev
, coal_reg
);
3549 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3550 struct status_block_msix
*sblk
= cp
->status_blk
;
3552 cp
->rx_cons_ptr
= &sblk
->status_rx_quick_consumer_index
;
3553 coal_reg
= BNX2_HC_COALESCE_NOW
;
3554 coal_val
= 1 << (11 + sb_id
);
3557 while (!(*cp
->rx_cons_ptr
!= 0) && i
< 10) {
3558 CNIC_WR(dev
, coal_reg
, coal_val
);
3563 cp
->rx_cons
= *cp
->rx_cons_ptr
;
3565 cid_addr
= GET_CID_ADDR(2);
3566 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
|
3567 BNX2_L2CTX_CTX_TYPE_SIZE_L2
| (0x02 << 8);
3568 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
3571 val
= 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT
;
3573 val
= BNX2_L2CTX_L2_STATUSB_NUM(sb_id
);
3574 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_HOST_BDIDX
, val
);
3576 rxbd
= (struct rx_bd
*) (cp
->l2_ring
+ BCM_PAGE_SIZE
);
3577 for (i
= 0; i
< MAX_RX_DESC_CNT
; i
++, rxbd
++) {
3579 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
3581 buf_map
= cp
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
3582 rxbd
->rx_bd_len
= cp
->l2_single_buf_size
;
3583 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
3584 rxbd
->rx_bd_haddr_hi
= (u64
) buf_map
>> 32;
3585 rxbd
->rx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
3587 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) >> 32;
3588 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
3589 rxbd
->rx_bd_haddr_hi
= val
;
3591 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
3592 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
3593 rxbd
->rx_bd_haddr_lo
= val
;
3595 val
= cnic_reg_rd_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
);
3596 cnic_reg_wr_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
, val
| (1 << 2));
3599 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev
*dev
)
3601 struct kwqe
*wqes
[1], l2kwqe
;
3603 memset(&l2kwqe
, 0, sizeof(l2kwqe
));
3605 l2kwqe
.kwqe_op_flag
= (L2_LAYER_CODE
<< KWQE_FLAGS_LAYER_SHIFT
) |
3606 (L2_KWQE_OPCODE_VALUE_FLUSH
<<
3607 KWQE_OPCODE_SHIFT
) | 2;
3608 dev
->submit_kwqes(dev
, wqes
, 1);
3611 static void cnic_set_bnx2_mac(struct cnic_dev
*dev
)
3613 struct cnic_local
*cp
= dev
->cnic_priv
;
3616 val
= cp
->func
<< 2;
3618 cp
->shmem_base
= cnic_reg_rd_ind(dev
, BNX2_SHM_HDR_ADDR_0
+ val
);
3620 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
3621 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER
);
3622 dev
->mac_addr
[0] = (u8
) (val
>> 8);
3623 dev
->mac_addr
[1] = (u8
) val
;
3625 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH4
, val
);
3627 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
3628 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER
);
3629 dev
->mac_addr
[2] = (u8
) (val
>> 24);
3630 dev
->mac_addr
[3] = (u8
) (val
>> 16);
3631 dev
->mac_addr
[4] = (u8
) (val
>> 8);
3632 dev
->mac_addr
[5] = (u8
) val
;
3634 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH5
, val
);
3636 val
= 4 | BNX2_RPM_SORT_USER2_BC_EN
;
3637 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
3638 val
|= BNX2_RPM_SORT_USER2_PROM_VLAN
;
3640 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, 0x0);
3641 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
);
3642 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
| BNX2_RPM_SORT_USER2_ENA
);
3645 static int cnic_start_bnx2_hw(struct cnic_dev
*dev
)
3647 struct cnic_local
*cp
= dev
->cnic_priv
;
3648 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3649 struct status_block
*sblk
= cp
->status_blk
;
3653 cnic_set_bnx2_mac(dev
);
3655 val
= CNIC_RD(dev
, BNX2_MQ_CONFIG
);
3656 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
3657 if (BCM_PAGE_BITS
> 12)
3658 val
|= (12 - 8) << 4;
3660 val
|= (BCM_PAGE_BITS
- 8) << 4;
3662 CNIC_WR(dev
, BNX2_MQ_CONFIG
, val
);
3664 CNIC_WR(dev
, BNX2_HC_COMP_PROD_TRIP
, (2 << 16) | 8);
3665 CNIC_WR(dev
, BNX2_HC_COM_TICKS
, (64 << 16) | 220);
3666 CNIC_WR(dev
, BNX2_HC_CMD_TICKS
, (64 << 16) | 220);
3668 err
= cnic_setup_5709_context(dev
, 1);
3672 cnic_init_context(dev
, KWQ_CID
);
3673 cnic_init_context(dev
, KCQ_CID
);
3675 cp
->kwq_cid_addr
= GET_CID_ADDR(KWQ_CID
);
3676 cp
->kwq_io_addr
= MB_GET_CID_ADDR(KWQ_CID
) + L5_KRNLQ_HOST_QIDX
;
3678 cp
->max_kwq_idx
= MAX_KWQ_IDX
;
3679 cp
->kwq_prod_idx
= 0;
3680 cp
->kwq_con_idx
= 0;
3681 cp
->cnic_local_flags
|= CNIC_LCL_FL_KWQ_INIT
;
3683 if (CHIP_NUM(cp
) == CHIP_NUM_5706
|| CHIP_NUM(cp
) == CHIP_NUM_5708
)
3684 cp
->kwq_con_idx_ptr
= &sblk
->status_rx_quick_consumer_index15
;
3686 cp
->kwq_con_idx_ptr
= &sblk
->status_cmd_consumer_index
;
3688 /* Initialize the kernel work queue context. */
3689 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
3690 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
3691 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_TYPE
, val
);
3693 val
= (BCM_PAGE_SIZE
/ sizeof(struct kwqe
) - 1) << 16;
3694 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
3696 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kwqe
)) << 16) | KWQ_PAGE_CNT
;
3697 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
3699 val
= (u32
) ((u64
) cp
->kwq_info
.pgtbl_map
>> 32);
3700 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
3702 val
= (u32
) cp
->kwq_info
.pgtbl_map
;
3703 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
3705 cp
->kcq_cid_addr
= GET_CID_ADDR(KCQ_CID
);
3706 cp
->kcq_io_addr
= MB_GET_CID_ADDR(KCQ_CID
) + L5_KRNLQ_HOST_QIDX
;
3708 cp
->kcq_prod_idx
= 0;
3710 /* Initialize the kernel complete queue context. */
3711 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
3712 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
3713 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_TYPE
, val
);
3715 val
= (BCM_PAGE_SIZE
/ sizeof(struct kcqe
) - 1) << 16;
3716 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
3718 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kcqe
)) << 16) | KCQ_PAGE_CNT
;
3719 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
3721 val
= (u32
) ((u64
) cp
->kcq_info
.pgtbl_map
>> 32);
3722 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
3724 val
= (u32
) cp
->kcq_info
.pgtbl_map
;
3725 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
3728 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3729 u32 sb_id
= cp
->status_blk_num
;
3730 u32 sb
= BNX2_L2CTX_L5_STATUSB_NUM(sb_id
);
3732 cp
->int_num
= sb_id
<< BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT
;
3733 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
3734 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
3737 /* Enable Commnad Scheduler notification when we write to the
3738 * host producer index of the kernel contexts. */
3739 CNIC_WR(dev
, BNX2_MQ_KNL_CMD_MASK1
, 2);
3741 /* Enable Command Scheduler notification when we write to either
3742 * the Send Queue or Receive Queue producer indexes of the kernel
3743 * bypass contexts. */
3744 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_CMD_MASK1
, 7);
3745 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_WRITE_MASK1
, 7);
3747 /* Notify COM when the driver post an application buffer. */
3748 CNIC_WR(dev
, BNX2_MQ_KNL_RX_V2P_MASK2
, 0x2000);
3750 /* Set the CP and COM doorbells. These two processors polls the
3751 * doorbell for a non zero value before running. This must be done
3752 * after setting up the kernel queue contexts. */
3753 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 1);
3754 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 1);
3756 cnic_init_bnx2_tx_ring(dev
);
3757 cnic_init_bnx2_rx_ring(dev
);
3759 err
= cnic_init_bnx2_irq(dev
);
3761 printk(KERN_ERR PFX
"%s: cnic_init_irq failed\n",
3763 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
3764 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
3771 static void cnic_setup_bnx2x_context(struct cnic_dev
*dev
)
3773 struct cnic_local
*cp
= dev
->cnic_priv
;
3774 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3775 u32 start_offset
= ethdev
->ctx_tbl_offset
;
3778 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
3779 struct cnic_ctx
*ctx
= &cp
->ctx_arr
[i
];
3780 dma_addr_t map
= ctx
->mapping
;
3782 if (cp
->ctx_align
) {
3783 unsigned long mask
= cp
->ctx_align
- 1;
3785 map
= (map
+ mask
) & ~mask
;
3788 cnic_ctx_tbl_wr(dev
, start_offset
+ i
, map
);
3792 static int cnic_init_bnx2x_irq(struct cnic_dev
*dev
)
3794 struct cnic_local
*cp
= dev
->cnic_priv
;
3795 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3798 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2x_bh
,
3799 (unsigned long) dev
);
3800 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3801 err
= request_irq(ethdev
->irq_arr
[0].vector
, cnic_irq
, 0,
3804 tasklet_disable(&cp
->cnic_irq_task
);
3809 static void cnic_enable_bnx2x_int(struct cnic_dev
*dev
)
3811 struct cnic_local
*cp
= dev
->cnic_priv
;
3812 u8 sb_id
= cp
->status_blk_num
;
3813 int port
= CNIC_PORT(cp
);
3815 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
3816 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port
, sb_id
,
3817 HC_INDEX_C_ISCSI_EQ_CONS
),
3819 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
3820 CSTORM_SB_HC_DISABLE_C_OFFSET(port
, sb_id
,
3821 HC_INDEX_C_ISCSI_EQ_CONS
), 0);
3824 static void cnic_disable_bnx2x_int_sync(struct cnic_dev
*dev
)
3828 static void cnic_init_bnx2x_tx_ring(struct cnic_dev
*dev
)
3830 struct cnic_local
*cp
= dev
->cnic_priv
;
3831 union eth_tx_bd_types
*txbd
= (union eth_tx_bd_types
*) cp
->l2_ring
;
3832 struct eth_context
*context
;
3833 struct regpair context_addr
;
3835 int func
= CNIC_FUNC(cp
);
3836 int port
= CNIC_PORT(cp
);
3838 int cli
= BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp
));
3841 memset(txbd
, 0, BCM_PAGE_SIZE
);
3843 buf_map
= cp
->l2_buf_map
;
3844 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
+= 3, txbd
+= 3) {
3845 struct eth_tx_start_bd
*start_bd
= &txbd
->start_bd
;
3846 struct eth_tx_bd
*reg_bd
= &((txbd
+ 2)->reg_bd
);
3848 start_bd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
3849 start_bd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
3850 reg_bd
->addr_hi
= start_bd
->addr_hi
;
3851 reg_bd
->addr_lo
= start_bd
->addr_lo
+ 0x10;
3852 start_bd
->nbytes
= cpu_to_le16(0x10);
3853 start_bd
->nbd
= cpu_to_le16(3);
3854 start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
3855 start_bd
->general_data
= (UNICAST_ADDRESS
<<
3856 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT
);
3857 start_bd
->general_data
|= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
);
3860 context
= cnic_get_bnx2x_ctx(dev
, BNX2X_ISCSI_L2_CID
, 1, &context_addr
);
3862 val
= (u64
) cp
->l2_ring_map
>> 32;
3863 txbd
->next_bd
.addr_hi
= cpu_to_le32(val
);
3865 context
->xstorm_st_context
.tx_bd_page_base_hi
= val
;
3867 val
= (u64
) cp
->l2_ring_map
& 0xffffffff;
3868 txbd
->next_bd
.addr_lo
= cpu_to_le32(val
);
3870 context
->xstorm_st_context
.tx_bd_page_base_lo
= val
;
3872 context
->cstorm_st_context
.sb_index_number
=
3873 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS
;
3874 context
->cstorm_st_context
.status_block_id
= BNX2X_DEF_SB_ID
;
3876 context
->xstorm_st_context
.statistics_data
= (cli
|
3877 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE
);
3879 context
->xstorm_ag_context
.cdu_reserved
=
3880 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID
, func
),
3881 CDU_REGION_NUMBER_XCM_AG
,
3882 ETH_CONNECTION_TYPE
);
3884 /* reset xstorm per client statistics */
3885 val
= BAR_XSTRORM_INTMEM
+
3886 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
3887 for (i
= 0; i
< sizeof(struct xstorm_per_client_stats
) / 4; i
++)
3888 CNIC_WR(dev
, val
+ i
* 4, 0);
3891 &cp
->bnx2x_def_status_blk
->c_def_status_block
.index_values
[
3892 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS
];
3895 static void cnic_init_bnx2x_rx_ring(struct cnic_dev
*dev
)
3897 struct cnic_local
*cp
= dev
->cnic_priv
;
3898 struct eth_rx_bd
*rxbd
= (struct eth_rx_bd
*) (cp
->l2_ring
+
3900 struct eth_rx_cqe_next_page
*rxcqe
= (struct eth_rx_cqe_next_page
*)
3901 (cp
->l2_ring
+ (2 * BCM_PAGE_SIZE
));
3902 struct eth_context
*context
;
3903 struct regpair context_addr
;
3905 int port
= CNIC_PORT(cp
);
3906 int func
= CNIC_FUNC(cp
);
3907 int cli
= BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp
));
3909 struct tstorm_eth_client_config tstorm_client
= {0};
3911 for (i
= 0; i
< BNX2X_MAX_RX_DESC_CNT
; i
++, rxbd
++) {
3913 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
3915 buf_map
= cp
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
3916 rxbd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
3917 rxbd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
3919 context
= cnic_get_bnx2x_ctx(dev
, BNX2X_ISCSI_L2_CID
, 0, &context_addr
);
3921 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) >> 32;
3922 rxbd
->addr_hi
= cpu_to_le32(val
);
3924 context
->ustorm_st_context
.common
.bd_page_base_hi
= val
;
3926 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
3927 rxbd
->addr_lo
= cpu_to_le32(val
);
3929 context
->ustorm_st_context
.common
.bd_page_base_lo
= val
;
3931 context
->ustorm_st_context
.common
.sb_index_numbers
=
3932 BNX2X_ISCSI_RX_SB_INDEX_NUM
;
3933 context
->ustorm_st_context
.common
.clientId
= cli
;
3934 context
->ustorm_st_context
.common
.status_block_id
= BNX2X_DEF_SB_ID
;
3935 context
->ustorm_st_context
.common
.flags
=
3936 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS
;
3937 context
->ustorm_st_context
.common
.statistics_counter_id
= cli
;
3938 context
->ustorm_st_context
.common
.mc_alignment_log_size
= 0;
3939 context
->ustorm_st_context
.common
.bd_buff_size
=
3940 cp
->l2_single_buf_size
;
3942 context
->ustorm_ag_context
.cdu_usage
=
3943 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID
, func
),
3944 CDU_REGION_NUMBER_UCM_AG
,
3945 ETH_CONNECTION_TYPE
);
3947 rxcqe
+= BNX2X_MAX_RCQ_DESC_CNT
;
3948 val
= (u64
) (cp
->l2_ring_map
+ (2 * BCM_PAGE_SIZE
)) >> 32;
3949 rxcqe
->addr_hi
= cpu_to_le32(val
);
3951 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
3952 USTORM_CQE_PAGE_BASE_OFFSET(port
, cli
) + 4, val
);
3954 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
3955 USTORM_CQE_PAGE_NEXT_OFFSET(port
, cli
) + 4, val
);
3957 val
= (u64
) (cp
->l2_ring_map
+ (2 * BCM_PAGE_SIZE
)) & 0xffffffff;
3958 rxcqe
->addr_lo
= cpu_to_le32(val
);
3960 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
3961 USTORM_CQE_PAGE_BASE_OFFSET(port
, cli
), val
);
3963 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
3964 USTORM_CQE_PAGE_NEXT_OFFSET(port
, cli
), val
);
3966 /* client tstorm info */
3967 tstorm_client
.mtu
= cp
->l2_single_buf_size
- 14;
3968 tstorm_client
.config_flags
=
3969 (TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE
|
3970 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
);
3971 tstorm_client
.statistics_counter_id
= cli
;
3973 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
3974 TSTORM_CLIENT_CONFIG_OFFSET(port
, cli
),
3975 ((u32
*)&tstorm_client
)[0]);
3976 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
3977 TSTORM_CLIENT_CONFIG_OFFSET(port
, cli
) + 4,
3978 ((u32
*)&tstorm_client
)[1]);
3980 /* reset tstorm per client statistics */
3981 val
= BAR_TSTRORM_INTMEM
+
3982 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
3983 for (i
= 0; i
< sizeof(struct tstorm_per_client_stats
) / 4; i
++)
3984 CNIC_WR(dev
, val
+ i
* 4, 0);
3986 /* reset ustorm per client statistics */
3987 val
= BAR_USTRORM_INTMEM
+
3988 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
3989 for (i
= 0; i
< sizeof(struct ustorm_per_client_stats
) / 4; i
++)
3990 CNIC_WR(dev
, val
+ i
* 4, 0);
3993 &cp
->bnx2x_def_status_blk
->u_def_status_block
.index_values
[
3994 HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS
];
3997 static void cnic_get_bnx2x_iscsi_info(struct cnic_dev
*dev
)
3999 struct cnic_local
*cp
= dev
->cnic_priv
;
4000 u32 base
, addr
, val
;
4001 int port
= CNIC_PORT(cp
);
4003 dev
->max_iscsi_conn
= 0;
4004 base
= CNIC_RD(dev
, MISC_REG_SHARED_MEM_ADDR
);
4005 if (base
< 0xa0000 || base
>= 0xc0000)
4008 addr
= BNX2X_SHMEM_ADDR(base
,
4009 dev_info
.port_hw_config
[port
].iscsi_mac_upper
);
4011 val
= CNIC_RD(dev
, addr
);
4013 dev
->mac_addr
[0] = (u8
) (val
>> 8);
4014 dev
->mac_addr
[1] = (u8
) val
;
4016 addr
= BNX2X_SHMEM_ADDR(base
,
4017 dev_info
.port_hw_config
[port
].iscsi_mac_lower
);
4019 val
= CNIC_RD(dev
, addr
);
4021 dev
->mac_addr
[2] = (u8
) (val
>> 24);
4022 dev
->mac_addr
[3] = (u8
) (val
>> 16);
4023 dev
->mac_addr
[4] = (u8
) (val
>> 8);
4024 dev
->mac_addr
[5] = (u8
) val
;
4026 addr
= BNX2X_SHMEM_ADDR(base
, validity_map
[port
]);
4027 val
= CNIC_RD(dev
, addr
);
4029 if (!(val
& SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT
)) {
4032 addr
= BNX2X_SHMEM_ADDR(base
,
4033 drv_lic_key
[port
].max_iscsi_init_conn
);
4034 val16
= CNIC_RD16(dev
, addr
);
4038 dev
->max_iscsi_conn
= val16
;
4040 if (BNX2X_CHIP_IS_E1H(cp
->chip_id
)) {
4041 int func
= CNIC_FUNC(cp
);
4043 addr
= BNX2X_SHMEM_ADDR(base
,
4044 mf_cfg
.func_mf_config
[func
].e1hov_tag
);
4045 val
= CNIC_RD(dev
, addr
);
4046 val
&= FUNC_MF_CFG_E1HOV_TAG_MASK
;
4047 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
4048 addr
= BNX2X_SHMEM_ADDR(base
,
4049 mf_cfg
.func_mf_config
[func
].config
);
4050 val
= CNIC_RD(dev
, addr
);
4051 val
&= FUNC_MF_CFG_PROTOCOL_MASK
;
4052 if (val
!= FUNC_MF_CFG_PROTOCOL_ISCSI
)
4053 dev
->max_iscsi_conn
= 0;
4058 static int cnic_start_bnx2x_hw(struct cnic_dev
*dev
)
4060 struct cnic_local
*cp
= dev
->cnic_priv
;
4061 int func
= CNIC_FUNC(cp
), ret
, i
;
4062 int port
= CNIC_PORT(cp
);
4064 u8 sb_id
= cp
->status_blk_num
;
4066 ret
= cnic_init_id_tbl(&cp
->cid_tbl
, MAX_ISCSI_TBL_SZ
,
4067 BNX2X_ISCSI_START_CID
);
4072 cp
->kcq_io_addr
= BAR_CSTRORM_INTMEM
+
4073 CSTORM_ISCSI_EQ_PROD_OFFSET(func
, 0);
4074 cp
->kcq_prod_idx
= 0;
4076 cnic_get_bnx2x_iscsi_info(dev
);
4079 CNIC_WR16(dev
, cp
->kcq_io_addr
, MAX_KCQ_IDX
);
4080 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4081 CSTORM_ISCSI_EQ_CONS_OFFSET(func
, 0), 0);
4082 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4083 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func
, 0),
4084 cp
->kcq_info
.pg_map_arr
[1] & 0xffffffff);
4085 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4086 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func
, 0) + 4,
4087 (u64
) cp
->kcq_info
.pg_map_arr
[1] >> 32);
4088 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4089 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func
, 0),
4090 cp
->kcq_info
.pg_map_arr
[0] & 0xffffffff);
4091 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4092 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func
, 0) + 4,
4093 (u64
) cp
->kcq_info
.pg_map_arr
[0] >> 32);
4094 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4095 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func
, 0), 1);
4096 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
4097 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func
, 0), cp
->status_blk_num
);
4098 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4099 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func
, 0),
4100 HC_INDEX_C_ISCSI_EQ_CONS
);
4102 for (i
= 0; i
< cp
->conn_buf_info
.num_pages
; i
++) {
4103 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4104 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func
, i
),
4105 cp
->conn_buf_info
.pgtbl
[2 * i
]);
4106 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4107 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func
, i
) + 4,
4108 cp
->conn_buf_info
.pgtbl
[(2 * i
) + 1]);
4111 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
4112 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func
),
4113 cp
->gbl_buf_info
.pg_map_arr
[0] & 0xffffffff);
4114 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
4115 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func
) + 4,
4116 (u64
) cp
->gbl_buf_info
.pg_map_arr
[0] >> 32);
4118 cnic_setup_bnx2x_context(dev
);
4120 eq_idx
= CNIC_RD16(dev
, BAR_CSTRORM_INTMEM
+
4121 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
) +
4122 offsetof(struct cstorm_status_block_c
,
4123 index_values
[HC_INDEX_C_ISCSI_EQ_CONS
]));
4125 printk(KERN_ERR PFX
"%s: EQ cons index %x != 0\n",
4126 dev
->netdev
->name
, eq_idx
);
4129 ret
= cnic_init_bnx2x_irq(dev
);
4133 cnic_init_bnx2x_tx_ring(dev
);
4134 cnic_init_bnx2x_rx_ring(dev
);
4139 static void cnic_init_rings(struct cnic_dev
*dev
)
4141 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
4142 cnic_init_bnx2_tx_ring(dev
);
4143 cnic_init_bnx2_rx_ring(dev
);
4144 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
4145 struct cnic_local
*cp
= dev
->cnic_priv
;
4146 u32 cli
= BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp
));
4147 union l5cm_specific_data l5_data
;
4148 struct ustorm_eth_rx_producers rx_prods
= {0};
4151 rx_prods
.bd_prod
= 0;
4152 rx_prods
.cqe_prod
= BNX2X_MAX_RCQ_DESC_CNT
;
4155 off
= BAR_USTRORM_INTMEM
+
4156 USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp
), cli
);
4158 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
) / 4; i
++)
4159 CNIC_WR(dev
, off
+ i
* 4, ((u32
*) &rx_prods
)[i
]);
4161 cnic_init_bnx2x_tx_ring(dev
);
4162 cnic_init_bnx2x_rx_ring(dev
);
4164 l5_data
.phy_address
.lo
= cli
;
4165 l5_data
.phy_address
.hi
= 0;
4166 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
,
4167 BNX2X_ISCSI_L2_CID
, ETH_CONNECTION_TYPE
, &l5_data
);
4168 cnic_ring_ctl(dev
, BNX2X_ISCSI_L2_CID
, cli
, 1);
4172 static void cnic_shutdown_rings(struct cnic_dev
*dev
)
4174 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
4175 cnic_shutdown_bnx2_rx_ring(dev
);
4176 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
4177 struct cnic_local
*cp
= dev
->cnic_priv
;
4178 u32 cli
= BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp
));
4179 union l5cm_specific_data l5_data
;
4181 cnic_ring_ctl(dev
, BNX2X_ISCSI_L2_CID
, cli
, 0);
4183 l5_data
.phy_address
.lo
= cli
;
4184 l5_data
.phy_address
.hi
= 0;
4185 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_HALT
,
4186 BNX2X_ISCSI_L2_CID
, ETH_CONNECTION_TYPE
, &l5_data
);
4189 memset(&l5_data
, 0, sizeof(l5_data
));
4190 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_CFC_DEL
,
4191 BNX2X_ISCSI_L2_CID
, ETH_CONNECTION_TYPE
|
4192 (1 << SPE_HDR_COMMON_RAMROD_SHIFT
), &l5_data
);
4197 static int cnic_register_netdev(struct cnic_dev
*dev
)
4199 struct cnic_local
*cp
= dev
->cnic_priv
;
4200 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4206 if (ethdev
->drv_state
& CNIC_DRV_STATE_REGD
)
4209 err
= ethdev
->drv_register_cnic(dev
->netdev
, cp
->cnic_ops
, dev
);
4211 printk(KERN_ERR PFX
"%s: register_cnic failed\n",
4217 static void cnic_unregister_netdev(struct cnic_dev
*dev
)
4219 struct cnic_local
*cp
= dev
->cnic_priv
;
4220 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4225 ethdev
->drv_unregister_cnic(dev
->netdev
);
4228 static int cnic_start_hw(struct cnic_dev
*dev
)
4230 struct cnic_local
*cp
= dev
->cnic_priv
;
4231 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4234 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
4237 dev
->regview
= ethdev
->io_base
;
4238 cp
->chip_id
= ethdev
->chip_id
;
4239 pci_dev_get(dev
->pcidev
);
4240 cp
->func
= PCI_FUNC(dev
->pcidev
->devfn
);
4241 cp
->status_blk
= ethdev
->irq_arr
[0].status_blk
;
4242 cp
->status_blk_num
= ethdev
->irq_arr
[0].status_blk_num
;
4244 err
= cp
->alloc_resc(dev
);
4246 printk(KERN_ERR PFX
"%s: allocate resource failure\n",
4251 err
= cp
->start_hw(dev
);
4255 err
= cnic_cm_open(dev
);
4259 set_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
4261 cp
->enable_int(dev
);
4267 pci_dev_put(dev
->pcidev
);
4271 static void cnic_stop_bnx2_hw(struct cnic_dev
*dev
)
4273 cnic_disable_bnx2_int_sync(dev
);
4275 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
4276 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
4278 cnic_init_context(dev
, KWQ_CID
);
4279 cnic_init_context(dev
, KCQ_CID
);
4281 cnic_setup_5709_context(dev
, 0);
4284 cnic_free_resc(dev
);
4288 static void cnic_stop_bnx2x_hw(struct cnic_dev
*dev
)
4290 struct cnic_local
*cp
= dev
->cnic_priv
;
4291 u8 sb_id
= cp
->status_blk_num
;
4292 int port
= CNIC_PORT(cp
);
4295 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
4296 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
) +
4297 offsetof(struct cstorm_status_block_c
,
4298 index_values
[HC_INDEX_C_ISCSI_EQ_CONS
]),
4300 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4301 CSTORM_ISCSI_EQ_CONS_OFFSET(cp
->func
, 0), 0);
4302 CNIC_WR16(dev
, cp
->kcq_io_addr
, 0);
4303 cnic_free_resc(dev
);
4306 static void cnic_stop_hw(struct cnic_dev
*dev
)
4308 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
4309 struct cnic_local
*cp
= dev
->cnic_priv
;
4311 clear_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
4312 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], NULL
);
4314 cnic_cm_shutdown(dev
);
4316 pci_dev_put(dev
->pcidev
);
4320 static void cnic_free_dev(struct cnic_dev
*dev
)
4324 while ((atomic_read(&dev
->ref_count
) != 0) && i
< 10) {
4328 if (atomic_read(&dev
->ref_count
) != 0)
4329 printk(KERN_ERR PFX
"%s: Failed waiting for ref count to go"
4330 " to zero.\n", dev
->netdev
->name
);
4332 printk(KERN_INFO PFX
"Removed CNIC device: %s\n", dev
->netdev
->name
);
4333 dev_put(dev
->netdev
);
4337 static struct cnic_dev
*cnic_alloc_dev(struct net_device
*dev
,
4338 struct pci_dev
*pdev
)
4340 struct cnic_dev
*cdev
;
4341 struct cnic_local
*cp
;
4344 alloc_size
= sizeof(struct cnic_dev
) + sizeof(struct cnic_local
);
4346 cdev
= kzalloc(alloc_size
, GFP_KERNEL
);
4348 printk(KERN_ERR PFX
"%s: allocate dev struct failure\n",
4354 cdev
->cnic_priv
= (char *)cdev
+ sizeof(struct cnic_dev
);
4355 cdev
->register_device
= cnic_register_device
;
4356 cdev
->unregister_device
= cnic_unregister_device
;
4357 cdev
->iscsi_nl_msg_recv
= cnic_iscsi_nl_msg_recv
;
4359 cp
= cdev
->cnic_priv
;
4362 cp
->l2_single_buf_size
= 0x400;
4363 cp
->l2_rx_ring_size
= 3;
4365 spin_lock_init(&cp
->cnic_ulp_lock
);
4367 printk(KERN_INFO PFX
"Added CNIC device: %s\n", dev
->name
);
4372 static struct cnic_dev
*init_bnx2_cnic(struct net_device
*dev
)
4374 struct pci_dev
*pdev
;
4375 struct cnic_dev
*cdev
;
4376 struct cnic_local
*cp
;
4377 struct cnic_eth_dev
*ethdev
= NULL
;
4378 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
4380 probe
= symbol_get(bnx2_cnic_probe
);
4382 ethdev
= (*probe
)(dev
);
4383 symbol_put(bnx2_cnic_probe
);
4388 pdev
= ethdev
->pdev
;
4394 if (pdev
->device
== PCI_DEVICE_ID_NX2_5709
||
4395 pdev
->device
== PCI_DEVICE_ID_NX2_5709S
) {
4398 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev
);
4406 cdev
= cnic_alloc_dev(dev
, pdev
);
4410 set_bit(CNIC_F_BNX2_CLASS
, &cdev
->flags
);
4411 cdev
->submit_kwqes
= cnic_submit_bnx2_kwqes
;
4413 cp
= cdev
->cnic_priv
;
4414 cp
->ethdev
= ethdev
;
4415 cdev
->pcidev
= pdev
;
4417 cp
->cnic_ops
= &cnic_bnx2_ops
;
4418 cp
->start_hw
= cnic_start_bnx2_hw
;
4419 cp
->stop_hw
= cnic_stop_bnx2_hw
;
4420 cp
->setup_pgtbl
= cnic_setup_page_tbl
;
4421 cp
->alloc_resc
= cnic_alloc_bnx2_resc
;
4422 cp
->free_resc
= cnic_free_resc
;
4423 cp
->start_cm
= cnic_cm_init_bnx2_hw
;
4424 cp
->stop_cm
= cnic_cm_stop_bnx2_hw
;
4425 cp
->enable_int
= cnic_enable_bnx2_int
;
4426 cp
->disable_int_sync
= cnic_disable_bnx2_int_sync
;
4427 cp
->close_conn
= cnic_close_bnx2_conn
;
4428 cp
->next_idx
= cnic_bnx2_next_idx
;
4429 cp
->hw_idx
= cnic_bnx2_hw_idx
;
4437 static struct cnic_dev
*init_bnx2x_cnic(struct net_device
*dev
)
4439 struct pci_dev
*pdev
;
4440 struct cnic_dev
*cdev
;
4441 struct cnic_local
*cp
;
4442 struct cnic_eth_dev
*ethdev
= NULL
;
4443 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
4445 probe
= symbol_get(bnx2x_cnic_probe
);
4447 ethdev
= (*probe
)(dev
);
4448 symbol_put(bnx2x_cnic_probe
);
4453 pdev
= ethdev
->pdev
;
4458 cdev
= cnic_alloc_dev(dev
, pdev
);
4464 set_bit(CNIC_F_BNX2X_CLASS
, &cdev
->flags
);
4465 cdev
->submit_kwqes
= cnic_submit_bnx2x_kwqes
;
4467 cp
= cdev
->cnic_priv
;
4468 cp
->ethdev
= ethdev
;
4469 cdev
->pcidev
= pdev
;
4471 cp
->cnic_ops
= &cnic_bnx2x_ops
;
4472 cp
->start_hw
= cnic_start_bnx2x_hw
;
4473 cp
->stop_hw
= cnic_stop_bnx2x_hw
;
4474 cp
->setup_pgtbl
= cnic_setup_page_tbl_le
;
4475 cp
->alloc_resc
= cnic_alloc_bnx2x_resc
;
4476 cp
->free_resc
= cnic_free_resc
;
4477 cp
->start_cm
= cnic_cm_init_bnx2x_hw
;
4478 cp
->stop_cm
= cnic_cm_stop_bnx2x_hw
;
4479 cp
->enable_int
= cnic_enable_bnx2x_int
;
4480 cp
->disable_int_sync
= cnic_disable_bnx2x_int_sync
;
4481 cp
->ack_int
= cnic_ack_bnx2x_msix
;
4482 cp
->close_conn
= cnic_close_bnx2x_conn
;
4483 cp
->next_idx
= cnic_bnx2x_next_idx
;
4484 cp
->hw_idx
= cnic_bnx2x_hw_idx
;
4488 static struct cnic_dev
*is_cnic_dev(struct net_device
*dev
)
4490 struct ethtool_drvinfo drvinfo
;
4491 struct cnic_dev
*cdev
= NULL
;
4493 if (dev
->ethtool_ops
&& dev
->ethtool_ops
->get_drvinfo
) {
4494 memset(&drvinfo
, 0, sizeof(drvinfo
));
4495 dev
->ethtool_ops
->get_drvinfo(dev
, &drvinfo
);
4497 if (!strcmp(drvinfo
.driver
, "bnx2"))
4498 cdev
= init_bnx2_cnic(dev
);
4499 if (!strcmp(drvinfo
.driver
, "bnx2x"))
4500 cdev
= init_bnx2x_cnic(dev
);
4502 write_lock(&cnic_dev_lock
);
4503 list_add(&cdev
->list
, &cnic_dev_list
);
4504 write_unlock(&cnic_dev_lock
);
4511 * netdev event handler
4513 static int cnic_netdev_event(struct notifier_block
*this, unsigned long event
,
4516 struct net_device
*netdev
= ptr
;
4517 struct cnic_dev
*dev
;
4521 dev
= cnic_from_netdev(netdev
);
4523 if (!dev
&& (event
== NETDEV_REGISTER
|| event
== NETDEV_UP
)) {
4524 /* Check for the hot-plug device */
4525 dev
= is_cnic_dev(netdev
);
4532 struct cnic_local
*cp
= dev
->cnic_priv
;
4536 else if (event
== NETDEV_UNREGISTER
)
4539 if (event
== NETDEV_UP
) {
4540 if (cnic_register_netdev(dev
) != 0) {
4544 if (!cnic_start_hw(dev
))
4545 cnic_ulp_start(dev
);
4549 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
4550 struct cnic_ulp_ops
*ulp_ops
;
4553 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
4554 if (!ulp_ops
|| !ulp_ops
->indicate_netevent
)
4557 ctx
= cp
->ulp_handle
[if_type
];
4559 ulp_ops
->indicate_netevent(ctx
, event
);
4563 if (event
== NETDEV_GOING_DOWN
) {
4566 cnic_unregister_netdev(dev
);
4567 } else if (event
== NETDEV_UNREGISTER
) {
4568 write_lock(&cnic_dev_lock
);
4569 list_del_init(&dev
->list
);
4570 write_unlock(&cnic_dev_lock
);
4582 static struct notifier_block cnic_netdev_notifier
= {
4583 .notifier_call
= cnic_netdev_event
4586 static void cnic_release(void)
4588 struct cnic_dev
*dev
;
4590 while (!list_empty(&cnic_dev_list
)) {
4591 dev
= list_entry(cnic_dev_list
.next
, struct cnic_dev
, list
);
4592 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
4598 cnic_unregister_netdev(dev
);
4599 list_del_init(&dev
->list
);
4604 static int __init
cnic_init(void)
4608 printk(KERN_INFO
"%s", version
);
4610 rc
= register_netdevice_notifier(&cnic_netdev_notifier
);
4619 static void __exit
cnic_exit(void)
4621 unregister_netdevice_notifier(&cnic_netdev_notifier
);
4626 module_init(cnic_init
);
4627 module_exit(cnic_exit
);