2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
12 * Contact Information:
13 * linux-drivers@emulex.com
17 * Costa Mesa, CA 92626
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29 #include <linux/iscsi_boot_sysfs.h>
31 #include <scsi/libiscsi.h>
32 #include <scsi/scsi_transport_iscsi.h>
33 #include <scsi/scsi_transport.h>
34 #include <scsi/scsi_cmnd.h>
35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi.h>
42 static unsigned int be_iopoll_budget
= 10;
43 static unsigned int be_max_phys_size
= 64;
44 static unsigned int enable_msix
= 1;
45 static unsigned int gcrashmode
= 0;
46 static unsigned int num_hba
= 0;
48 MODULE_DEVICE_TABLE(pci
, beiscsi_pci_id_table
);
49 MODULE_DESCRIPTION(DRV_DESC
" " BUILD_STR
);
50 MODULE_AUTHOR("ServerEngines Corporation");
51 MODULE_LICENSE("GPL");
52 module_param(be_iopoll_budget
, int, 0);
53 module_param(enable_msix
, int, 0);
54 module_param(be_max_phys_size
, uint
, S_IRUGO
);
55 MODULE_PARM_DESC(be_max_phys_size
, "Maximum Size (In Kilobytes) of physically"
56 "contiguous memory that can be allocated."
59 static int beiscsi_slave_configure(struct scsi_device
*sdev
)
61 blk_queue_max_segment_size(sdev
->request_queue
, 65536);
65 static int beiscsi_eh_abort(struct scsi_cmnd
*sc
)
67 struct iscsi_cls_session
*cls_session
;
68 struct iscsi_task
*aborted_task
= (struct iscsi_task
*)sc
->SCp
.ptr
;
69 struct beiscsi_io_task
*aborted_io_task
;
70 struct iscsi_conn
*conn
;
71 struct beiscsi_conn
*beiscsi_conn
;
72 struct beiscsi_hba
*phba
;
73 struct iscsi_session
*session
;
74 struct invalidate_command_table
*inv_tbl
;
75 struct be_dma_mem nonemb_cmd
;
76 unsigned int cid
, tag
, num_invalidate
;
78 cls_session
= starget_to_session(scsi_target(sc
->device
));
79 session
= cls_session
->dd_data
;
81 spin_lock_bh(&session
->lock
);
82 if (!aborted_task
|| !aborted_task
->sc
) {
84 spin_unlock_bh(&session
->lock
);
88 aborted_io_task
= aborted_task
->dd_data
;
89 if (!aborted_io_task
->scsi_cmnd
) {
90 /* raced or invalid command */
91 spin_unlock_bh(&session
->lock
);
94 spin_unlock_bh(&session
->lock
);
95 conn
= aborted_task
->conn
;
96 beiscsi_conn
= conn
->dd_data
;
97 phba
= beiscsi_conn
->phba
;
100 cid
= beiscsi_conn
->beiscsi_conn_cid
;
101 inv_tbl
= phba
->inv_tbl
;
102 memset(inv_tbl
, 0x0, sizeof(*inv_tbl
));
104 inv_tbl
->icd
= aborted_io_task
->psgl_handle
->sgl_index
;
106 nonemb_cmd
.va
= pci_alloc_consistent(phba
->ctrl
.pdev
,
107 sizeof(struct invalidate_commands_params_in
),
109 if (nonemb_cmd
.va
== NULL
) {
111 "Failed to allocate memory for"
112 "mgmt_invalidate_icds\n");
115 nonemb_cmd
.size
= sizeof(struct invalidate_commands_params_in
);
117 tag
= mgmt_invalidate_icds(phba
, inv_tbl
, num_invalidate
,
120 shost_printk(KERN_WARNING
, phba
->shost
,
121 "mgmt_invalidate_icds could not be"
123 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
124 nonemb_cmd
.va
, nonemb_cmd
.dma
);
128 wait_event_interruptible(phba
->ctrl
.mcc_wait
[tag
],
129 phba
->ctrl
.mcc_numtag
[tag
]);
130 free_mcc_tag(&phba
->ctrl
, tag
);
132 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
133 nonemb_cmd
.va
, nonemb_cmd
.dma
);
134 return iscsi_eh_abort(sc
);
137 static int beiscsi_eh_device_reset(struct scsi_cmnd
*sc
)
139 struct iscsi_task
*abrt_task
;
140 struct beiscsi_io_task
*abrt_io_task
;
141 struct iscsi_conn
*conn
;
142 struct beiscsi_conn
*beiscsi_conn
;
143 struct beiscsi_hba
*phba
;
144 struct iscsi_session
*session
;
145 struct iscsi_cls_session
*cls_session
;
146 struct invalidate_command_table
*inv_tbl
;
147 struct be_dma_mem nonemb_cmd
;
148 unsigned int cid
, tag
, i
, num_invalidate
;
151 /* invalidate iocbs */
152 cls_session
= starget_to_session(scsi_target(sc
->device
));
153 session
= cls_session
->dd_data
;
154 spin_lock_bh(&session
->lock
);
155 if (!session
->leadconn
|| session
->state
!= ISCSI_STATE_LOGGED_IN
)
158 conn
= session
->leadconn
;
159 beiscsi_conn
= conn
->dd_data
;
160 phba
= beiscsi_conn
->phba
;
161 cid
= beiscsi_conn
->beiscsi_conn_cid
;
162 inv_tbl
= phba
->inv_tbl
;
163 memset(inv_tbl
, 0x0, sizeof(*inv_tbl
) * BE2_CMDS_PER_CXN
);
165 for (i
= 0; i
< conn
->session
->cmds_max
; i
++) {
166 abrt_task
= conn
->session
->cmds
[i
];
167 abrt_io_task
= abrt_task
->dd_data
;
168 if (!abrt_task
->sc
|| abrt_task
->state
== ISCSI_TASK_FREE
)
171 if (abrt_task
->sc
->device
->lun
!= abrt_task
->sc
->device
->lun
)
175 inv_tbl
->icd
= abrt_io_task
->psgl_handle
->sgl_index
;
179 spin_unlock_bh(&session
->lock
);
180 inv_tbl
= phba
->inv_tbl
;
182 nonemb_cmd
.va
= pci_alloc_consistent(phba
->ctrl
.pdev
,
183 sizeof(struct invalidate_commands_params_in
),
185 if (nonemb_cmd
.va
== NULL
) {
187 "Failed to allocate memory for"
188 "mgmt_invalidate_icds\n");
191 nonemb_cmd
.size
= sizeof(struct invalidate_commands_params_in
);
192 memset(nonemb_cmd
.va
, 0, nonemb_cmd
.size
);
193 tag
= mgmt_invalidate_icds(phba
, inv_tbl
, num_invalidate
,
196 shost_printk(KERN_WARNING
, phba
->shost
,
197 "mgmt_invalidate_icds could not be"
199 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
200 nonemb_cmd
.va
, nonemb_cmd
.dma
);
203 wait_event_interruptible(phba
->ctrl
.mcc_wait
[tag
],
204 phba
->ctrl
.mcc_numtag
[tag
]);
205 free_mcc_tag(&phba
->ctrl
, tag
);
207 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
208 nonemb_cmd
.va
, nonemb_cmd
.dma
);
209 return iscsi_eh_device_reset(sc
);
211 spin_unlock_bh(&session
->lock
);
215 static ssize_t
beiscsi_show_boot_tgt_info(void *data
, int type
, char *buf
)
217 struct beiscsi_hba
*phba
= data
;
222 case ISCSI_BOOT_TGT_NAME
:
223 rc
= sprintf(buf
, "%.*s\n",
224 (int)strlen(phba
->boot_sess
.target_name
),
225 (char *)&phba
->boot_sess
.target_name
);
227 case ISCSI_BOOT_TGT_IP_ADDR
:
228 if (phba
->boot_sess
.conn_list
[0].dest_ipaddr
.ip_type
== 0x1)
229 rc
= sprintf(buf
, "%pI4\n",
230 (char *)&phba
->boot_sess
.conn_list
[0].
231 dest_ipaddr
.ip_address
);
233 rc
= sprintf(str
, "%pI6\n",
234 (char *)&phba
->boot_sess
.conn_list
[0].
235 dest_ipaddr
.ip_address
);
237 case ISCSI_BOOT_TGT_PORT
:
238 rc
= sprintf(str
, "%d\n", phba
->boot_sess
.conn_list
[0].
242 case ISCSI_BOOT_TGT_CHAP_NAME
:
243 rc
= sprintf(str
, "%.*s\n",
244 phba
->boot_sess
.conn_list
[0].
245 negotiated_login_options
.auth_data
.chap
.
246 target_chap_name_length
,
247 (char *)&phba
->boot_sess
.conn_list
[0].
248 negotiated_login_options
.auth_data
.chap
.
251 case ISCSI_BOOT_TGT_CHAP_SECRET
:
252 rc
= sprintf(str
, "%.*s\n",
253 phba
->boot_sess
.conn_list
[0].
254 negotiated_login_options
.auth_data
.chap
.
255 target_secret_length
,
256 (char *)&phba
->boot_sess
.conn_list
[0].
257 negotiated_login_options
.auth_data
.chap
.
261 case ISCSI_BOOT_TGT_REV_CHAP_NAME
:
262 rc
= sprintf(str
, "%.*s\n",
263 phba
->boot_sess
.conn_list
[0].
264 negotiated_login_options
.auth_data
.chap
.
265 intr_chap_name_length
,
266 (char *)&phba
->boot_sess
.conn_list
[0].
267 negotiated_login_options
.auth_data
.chap
.
271 case ISCSI_BOOT_TGT_REV_CHAP_SECRET
:
272 rc
= sprintf(str
, "%.*s\n",
273 phba
->boot_sess
.conn_list
[0].
274 negotiated_login_options
.auth_data
.chap
.
276 (char *)&phba
->boot_sess
.conn_list
[0].
277 negotiated_login_options
.auth_data
.chap
.
280 case ISCSI_BOOT_TGT_FLAGS
:
281 rc
= sprintf(str
, "2\n");
283 case ISCSI_BOOT_TGT_NIC_ASSOC
:
284 rc
= sprintf(str
, "0\n");
293 static ssize_t
beiscsi_show_boot_ini_info(void *data
, int type
, char *buf
)
295 struct beiscsi_hba
*phba
= data
;
300 case ISCSI_BOOT_INI_INITIATOR_NAME
:
301 rc
= sprintf(str
, "%s\n", phba
->boot_sess
.initiator_iscsiname
);
310 static ssize_t
beiscsi_show_boot_eth_info(void *data
, int type
, char *buf
)
312 struct beiscsi_hba
*phba
= data
;
317 case ISCSI_BOOT_ETH_FLAGS
:
318 rc
= sprintf(str
, "2\n");
320 case ISCSI_BOOT_ETH_INDEX
:
321 rc
= sprintf(str
, "0\n");
323 case ISCSI_BOOT_ETH_MAC
:
324 rc
= beiscsi_get_macaddr(buf
, phba
);
326 SE_DEBUG(DBG_LVL_1
, "beiscsi_get_macaddr Failed\n");
338 static mode_t
beiscsi_tgt_get_attr_visibility(void *data
, int type
)
343 case ISCSI_BOOT_TGT_NAME
:
344 case ISCSI_BOOT_TGT_IP_ADDR
:
345 case ISCSI_BOOT_TGT_PORT
:
346 case ISCSI_BOOT_TGT_CHAP_NAME
:
347 case ISCSI_BOOT_TGT_CHAP_SECRET
:
348 case ISCSI_BOOT_TGT_REV_CHAP_NAME
:
349 case ISCSI_BOOT_TGT_REV_CHAP_SECRET
:
350 case ISCSI_BOOT_TGT_NIC_ASSOC
:
351 case ISCSI_BOOT_TGT_FLAGS
:
361 static mode_t
beiscsi_ini_get_attr_visibility(void *data
, int type
)
366 case ISCSI_BOOT_INI_INITIATOR_NAME
:
377 static mode_t
beiscsi_eth_get_attr_visibility(void *data
, int type
)
382 case ISCSI_BOOT_ETH_FLAGS
:
383 case ISCSI_BOOT_ETH_MAC
:
384 case ISCSI_BOOT_ETH_INDEX
:
394 static int beiscsi_setup_boot_info(struct beiscsi_hba
*phba
)
396 struct iscsi_boot_kobj
*boot_kobj
;
398 phba
->boot_kset
= iscsi_boot_create_host_kset(phba
->shost
->host_no
);
399 if (!phba
->boot_kset
)
402 /* get boot info using mgmt cmd */
403 boot_kobj
= iscsi_boot_create_target(phba
->boot_kset
, 0, phba
,
404 beiscsi_show_boot_tgt_info
,
405 beiscsi_tgt_get_attr_visibility
);
409 boot_kobj
= iscsi_boot_create_initiator(phba
->boot_kset
, 0, phba
,
410 beiscsi_show_boot_ini_info
,
411 beiscsi_ini_get_attr_visibility
);
415 boot_kobj
= iscsi_boot_create_ethernet(phba
->boot_kset
, 0, phba
,
416 beiscsi_show_boot_eth_info
,
417 beiscsi_eth_get_attr_visibility
);
424 iscsi_boot_destroy_kset(phba
->boot_kset
);
428 /*------------------- PCI Driver operations and data ----------------- */
429 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table
) = {
430 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
431 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
432 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
433 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
434 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID3
) },
437 MODULE_DEVICE_TABLE(pci
, beiscsi_pci_id_table
);
439 static struct scsi_host_template beiscsi_sht
= {
440 .module
= THIS_MODULE
,
441 .name
= "ServerEngines 10Gbe open-iscsi Initiator Driver",
442 .proc_name
= DRV_NAME
,
443 .queuecommand
= iscsi_queuecommand
,
444 .change_queue_depth
= iscsi_change_queue_depth
,
445 .slave_configure
= beiscsi_slave_configure
,
446 .target_alloc
= iscsi_target_alloc
,
447 .eh_abort_handler
= beiscsi_eh_abort
,
448 .eh_device_reset_handler
= beiscsi_eh_device_reset
,
449 .eh_target_reset_handler
= iscsi_eh_session_reset
,
450 .sg_tablesize
= BEISCSI_SGLIST_ELEMENTS
,
451 .can_queue
= BE2_IO_DEPTH
,
453 .max_sectors
= BEISCSI_MAX_SECTORS
,
454 .cmd_per_lun
= BEISCSI_CMD_PER_LUN
,
455 .use_clustering
= ENABLE_CLUSTERING
,
458 static struct scsi_transport_template
*beiscsi_scsi_transport
;
460 static struct beiscsi_hba
*beiscsi_hba_alloc(struct pci_dev
*pcidev
)
462 struct beiscsi_hba
*phba
;
463 struct Scsi_Host
*shost
;
465 shost
= iscsi_host_alloc(&beiscsi_sht
, sizeof(*phba
), 0);
467 dev_err(&pcidev
->dev
, "beiscsi_hba_alloc -"
468 "iscsi_host_alloc failed\n");
471 shost
->dma_boundary
= pcidev
->dma_mask
;
472 shost
->max_id
= BE2_MAX_SESSIONS
;
473 shost
->max_channel
= 0;
474 shost
->max_cmd_len
= BEISCSI_MAX_CMD_LEN
;
475 shost
->max_lun
= BEISCSI_NUM_MAX_LUN
;
476 shost
->transportt
= beiscsi_scsi_transport
;
477 phba
= iscsi_host_priv(shost
);
478 memset(phba
, 0, sizeof(*phba
));
480 phba
->pcidev
= pci_dev_get(pcidev
);
481 pci_set_drvdata(pcidev
, phba
);
483 if (iscsi_host_add(shost
, &phba
->pcidev
->dev
))
486 if (beiscsi_setup_boot_info(phba
))
488 * log error but continue, because we may not be using
491 shost_printk(KERN_ERR
, phba
->shost
, "Could not set up "
497 pci_dev_put(phba
->pcidev
);
498 iscsi_host_free(phba
->shost
);
502 static void beiscsi_unmap_pci_function(struct beiscsi_hba
*phba
)
505 iounmap(phba
->csr_va
);
509 iounmap(phba
->db_va
);
513 iounmap(phba
->pci_va
);
518 static int beiscsi_map_pci_bars(struct beiscsi_hba
*phba
,
519 struct pci_dev
*pcidev
)
524 addr
= ioremap_nocache(pci_resource_start(pcidev
, 2),
525 pci_resource_len(pcidev
, 2));
528 phba
->ctrl
.csr
= addr
;
530 phba
->csr_pa
.u
.a64
.address
= pci_resource_start(pcidev
, 2);
532 addr
= ioremap_nocache(pci_resource_start(pcidev
, 4), 128 * 1024);
535 phba
->ctrl
.db
= addr
;
537 phba
->db_pa
.u
.a64
.address
= pci_resource_start(pcidev
, 4);
539 if (phba
->generation
== BE_GEN2
)
544 addr
= ioremap_nocache(pci_resource_start(pcidev
, pcicfg_reg
),
545 pci_resource_len(pcidev
, pcicfg_reg
));
549 phba
->ctrl
.pcicfg
= addr
;
551 phba
->pci_pa
.u
.a64
.address
= pci_resource_start(pcidev
, pcicfg_reg
);
555 beiscsi_unmap_pci_function(phba
);
559 static int beiscsi_enable_pci(struct pci_dev
*pcidev
)
563 ret
= pci_enable_device(pcidev
);
565 dev_err(&pcidev
->dev
, "beiscsi_enable_pci - enable device "
566 "failed. Returning -ENODEV\n");
570 pci_set_master(pcidev
);
571 if (pci_set_consistent_dma_mask(pcidev
, DMA_BIT_MASK(64))) {
572 ret
= pci_set_consistent_dma_mask(pcidev
, DMA_BIT_MASK(32));
574 dev_err(&pcidev
->dev
, "Could not set PCI DMA Mask\n");
575 pci_disable_device(pcidev
);
582 static int be_ctrl_init(struct beiscsi_hba
*phba
, struct pci_dev
*pdev
)
584 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
585 struct be_dma_mem
*mbox_mem_alloc
= &ctrl
->mbox_mem_alloced
;
586 struct be_dma_mem
*mbox_mem_align
= &ctrl
->mbox_mem
;
590 status
= beiscsi_map_pci_bars(phba
, pdev
);
593 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
594 mbox_mem_alloc
->va
= pci_alloc_consistent(pdev
,
595 mbox_mem_alloc
->size
,
596 &mbox_mem_alloc
->dma
);
597 if (!mbox_mem_alloc
->va
) {
598 beiscsi_unmap_pci_function(phba
);
603 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
604 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
605 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
606 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
607 spin_lock_init(&ctrl
->mbox_lock
);
608 spin_lock_init(&phba
->ctrl
.mcc_lock
);
609 spin_lock_init(&phba
->ctrl
.mcc_cq_lock
);
614 static void beiscsi_get_params(struct beiscsi_hba
*phba
)
616 phba
->params
.ios_per_ctrl
= (phba
->fw_config
.iscsi_icd_count
617 - (phba
->fw_config
.iscsi_cid_count
620 phba
->params
.cxns_per_ctrl
= phba
->fw_config
.iscsi_cid_count
;
621 phba
->params
.asyncpdus_per_ctrl
= phba
->fw_config
.iscsi_cid_count
* 2;
622 phba
->params
.icds_per_ctrl
= phba
->fw_config
.iscsi_icd_count
;
623 phba
->params
.num_sge_per_io
= BE2_SGE
;
624 phba
->params
.defpdu_hdr_sz
= BE2_DEFPDU_HDR_SZ
;
625 phba
->params
.defpdu_data_sz
= BE2_DEFPDU_DATA_SZ
;
626 phba
->params
.eq_timer
= 64;
627 phba
->params
.num_eq_entries
=
628 (((BE2_CMDS_PER_CXN
* 2 + phba
->fw_config
.iscsi_cid_count
* 2
629 + BE2_TMFS
) / 512) + 1) * 512;
630 phba
->params
.num_eq_entries
= (phba
->params
.num_eq_entries
< 1024)
631 ? 1024 : phba
->params
.num_eq_entries
;
632 SE_DEBUG(DBG_LVL_8
, "phba->params.num_eq_entries=%d\n",
633 phba
->params
.num_eq_entries
);
634 phba
->params
.num_cq_entries
=
635 (((BE2_CMDS_PER_CXN
* 2 + phba
->fw_config
.iscsi_cid_count
* 2
636 + BE2_TMFS
) / 512) + 1) * 512;
637 phba
->params
.wrbs_per_cxn
= 256;
640 static void hwi_ring_eq_db(struct beiscsi_hba
*phba
,
641 unsigned int id
, unsigned int clr_interrupt
,
642 unsigned int num_processed
,
643 unsigned char rearm
, unsigned char event
)
646 val
|= id
& DB_EQ_RING_ID_MASK
;
648 val
|= 1 << DB_EQ_REARM_SHIFT
;
650 val
|= 1 << DB_EQ_CLR_SHIFT
;
652 val
|= 1 << DB_EQ_EVNT_SHIFT
;
653 val
|= num_processed
<< DB_EQ_NUM_POPPED_SHIFT
;
654 iowrite32(val
, phba
->db_va
+ DB_EQ_OFFSET
);
658 * be_isr_mcc - The isr routine of the driver.
660 * @dev_id: Pointer to host adapter structure
662 static irqreturn_t
be_isr_mcc(int irq
, void *dev_id
)
664 struct beiscsi_hba
*phba
;
665 struct be_eq_entry
*eqe
= NULL
;
666 struct be_queue_info
*eq
;
667 struct be_queue_info
*mcc
;
668 unsigned int num_eq_processed
;
669 struct be_eq_obj
*pbe_eq
;
675 mcc
= &phba
->ctrl
.mcc_obj
.cq
;
676 eqe
= queue_tail_node(eq
);
678 SE_DEBUG(DBG_LVL_1
, "eqe is NULL\n");
680 num_eq_processed
= 0;
682 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
684 if (((eqe
->dw
[offsetof(struct amap_eq_entry
,
686 EQE_RESID_MASK
) >> 16) == mcc
->id
) {
687 spin_lock_irqsave(&phba
->isr_lock
, flags
);
688 phba
->todo_mcc_cq
= 1;
689 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
691 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
693 eqe
= queue_tail_node(eq
);
696 if (phba
->todo_mcc_cq
)
697 queue_work(phba
->wq
, &phba
->work_cqs
);
698 if (num_eq_processed
)
699 hwi_ring_eq_db(phba
, eq
->id
, 1, num_eq_processed
, 1, 1);
705 * be_isr_msix - The isr routine of the driver.
707 * @dev_id: Pointer to host adapter structure
709 static irqreturn_t
be_isr_msix(int irq
, void *dev_id
)
711 struct beiscsi_hba
*phba
;
712 struct be_eq_entry
*eqe
= NULL
;
713 struct be_queue_info
*eq
;
714 struct be_queue_info
*cq
;
715 unsigned int num_eq_processed
;
716 struct be_eq_obj
*pbe_eq
;
722 eqe
= queue_tail_node(eq
);
724 SE_DEBUG(DBG_LVL_1
, "eqe is NULL\n");
727 num_eq_processed
= 0;
728 if (blk_iopoll_enabled
) {
729 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
731 if (!blk_iopoll_sched_prep(&pbe_eq
->iopoll
))
732 blk_iopoll_sched(&pbe_eq
->iopoll
);
734 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
736 eqe
= queue_tail_node(eq
);
739 if (num_eq_processed
)
740 hwi_ring_eq_db(phba
, eq
->id
, 1, num_eq_processed
, 0, 1);
744 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
746 spin_lock_irqsave(&phba
->isr_lock
, flags
);
748 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
749 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
751 eqe
= queue_tail_node(eq
);
755 queue_work(phba
->wq
, &phba
->work_cqs
);
757 if (num_eq_processed
)
758 hwi_ring_eq_db(phba
, eq
->id
, 1, num_eq_processed
, 1, 1);
765 * be_isr - The isr routine of the driver.
767 * @dev_id: Pointer to host adapter structure
769 static irqreturn_t
be_isr(int irq
, void *dev_id
)
771 struct beiscsi_hba
*phba
;
772 struct hwi_controller
*phwi_ctrlr
;
773 struct hwi_context_memory
*phwi_context
;
774 struct be_eq_entry
*eqe
= NULL
;
775 struct be_queue_info
*eq
;
776 struct be_queue_info
*cq
;
777 struct be_queue_info
*mcc
;
778 unsigned long flags
, index
;
779 unsigned int num_mcceq_processed
, num_ioeq_processed
;
780 struct be_ctrl_info
*ctrl
;
781 struct be_eq_obj
*pbe_eq
;
786 isr
= ioread32(ctrl
->csr
+ CEV_ISR0_OFFSET
+
787 (PCI_FUNC(ctrl
->pdev
->devfn
) * CEV_ISR_SIZE
));
791 phwi_ctrlr
= phba
->phwi_ctrlr
;
792 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
793 pbe_eq
= &phwi_context
->be_eq
[0];
795 eq
= &phwi_context
->be_eq
[0].q
;
796 mcc
= &phba
->ctrl
.mcc_obj
.cq
;
798 eqe
= queue_tail_node(eq
);
800 SE_DEBUG(DBG_LVL_1
, "eqe is NULL\n");
802 num_ioeq_processed
= 0;
803 num_mcceq_processed
= 0;
804 if (blk_iopoll_enabled
) {
805 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
807 if (((eqe
->dw
[offsetof(struct amap_eq_entry
,
809 EQE_RESID_MASK
) >> 16) == mcc
->id
) {
810 spin_lock_irqsave(&phba
->isr_lock
, flags
);
811 phba
->todo_mcc_cq
= 1;
812 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
813 num_mcceq_processed
++;
815 if (!blk_iopoll_sched_prep(&pbe_eq
->iopoll
))
816 blk_iopoll_sched(&pbe_eq
->iopoll
);
817 num_ioeq_processed
++;
819 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
821 eqe
= queue_tail_node(eq
);
823 if (num_ioeq_processed
|| num_mcceq_processed
) {
824 if (phba
->todo_mcc_cq
)
825 queue_work(phba
->wq
, &phba
->work_cqs
);
827 if ((num_mcceq_processed
) && (!num_ioeq_processed
))
828 hwi_ring_eq_db(phba
, eq
->id
, 0,
829 (num_ioeq_processed
+
830 num_mcceq_processed
) , 1, 1);
832 hwi_ring_eq_db(phba
, eq
->id
, 0,
833 (num_ioeq_processed
+
834 num_mcceq_processed
), 0, 1);
840 cq
= &phwi_context
->be_cq
[0];
841 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
844 if (((eqe
->dw
[offsetof(struct amap_eq_entry
,
846 EQE_RESID_MASK
) >> 16) != cq
->id
) {
847 spin_lock_irqsave(&phba
->isr_lock
, flags
);
848 phba
->todo_mcc_cq
= 1;
849 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
851 spin_lock_irqsave(&phba
->isr_lock
, flags
);
853 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
855 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
857 eqe
= queue_tail_node(eq
);
858 num_ioeq_processed
++;
860 if (phba
->todo_cq
|| phba
->todo_mcc_cq
)
861 queue_work(phba
->wq
, &phba
->work_cqs
);
863 if (num_ioeq_processed
) {
864 hwi_ring_eq_db(phba
, eq
->id
, 0,
865 num_ioeq_processed
, 1, 1);
872 static int beiscsi_init_irqs(struct beiscsi_hba
*phba
)
874 struct pci_dev
*pcidev
= phba
->pcidev
;
875 struct hwi_controller
*phwi_ctrlr
;
876 struct hwi_context_memory
*phwi_context
;
877 int ret
, msix_vec
, i
, j
;
880 phwi_ctrlr
= phba
->phwi_ctrlr
;
881 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
883 if (phba
->msix_enabled
) {
884 for (i
= 0; i
< phba
->num_cpus
; i
++) {
885 sprintf(desc
, "beiscsi_msix_%04x", i
);
886 msix_vec
= phba
->msix_entries
[i
].vector
;
887 ret
= request_irq(msix_vec
, be_isr_msix
, 0, desc
,
888 &phwi_context
->be_eq
[i
]);
890 shost_printk(KERN_ERR
, phba
->shost
,
891 "beiscsi_init_irqs-Failed to"
892 "register msix for i = %d\n", i
);
898 msix_vec
= phba
->msix_entries
[i
].vector
;
899 ret
= request_irq(msix_vec
, be_isr_mcc
, 0, "beiscsi_msix_mcc",
900 &phwi_context
->be_eq
[i
]);
902 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_init_irqs-"
903 "Failed to register beiscsi_msix_mcc\n");
909 ret
= request_irq(pcidev
->irq
, be_isr
, IRQF_SHARED
,
912 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_init_irqs-"
913 "Failed to register irq\\n");
919 for (j
= i
- 1; j
== 0; j
++)
920 free_irq(msix_vec
, &phwi_context
->be_eq
[j
]);
924 static void hwi_ring_cq_db(struct beiscsi_hba
*phba
,
925 unsigned int id
, unsigned int num_processed
,
926 unsigned char rearm
, unsigned char event
)
929 val
|= id
& DB_CQ_RING_ID_MASK
;
931 val
|= 1 << DB_CQ_REARM_SHIFT
;
932 val
|= num_processed
<< DB_CQ_NUM_POPPED_SHIFT
;
933 iowrite32(val
, phba
->db_va
+ DB_CQ_OFFSET
);
937 beiscsi_process_async_pdu(struct beiscsi_conn
*beiscsi_conn
,
938 struct beiscsi_hba
*phba
,
940 struct pdu_base
*ppdu
,
941 unsigned long pdu_len
,
942 void *pbuffer
, unsigned long buf_len
)
944 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
945 struct iscsi_session
*session
= conn
->session
;
946 struct iscsi_task
*task
;
947 struct beiscsi_io_task
*io_task
;
948 struct iscsi_hdr
*login_hdr
;
950 switch (ppdu
->dw
[offsetof(struct amap_pdu_base
, opcode
) / 32] &
951 PDUBASE_OPCODE_MASK
) {
952 case ISCSI_OP_NOOP_IN
:
956 case ISCSI_OP_ASYNC_EVENT
:
958 case ISCSI_OP_REJECT
:
960 WARN_ON(!(buf_len
== 48));
961 SE_DEBUG(DBG_LVL_1
, "In ISCSI_OP_REJECT\n");
963 case ISCSI_OP_LOGIN_RSP
:
964 case ISCSI_OP_TEXT_RSP
:
965 task
= conn
->login_task
;
966 io_task
= task
->dd_data
;
967 login_hdr
= (struct iscsi_hdr
*)ppdu
;
968 login_hdr
->itt
= io_task
->libiscsi_itt
;
971 shost_printk(KERN_WARNING
, phba
->shost
,
972 "Unrecognized opcode 0x%x in async msg\n",
974 dw
[offsetof(struct amap_pdu_base
, opcode
) / 32]
975 & PDUBASE_OPCODE_MASK
));
979 spin_lock_bh(&session
->lock
);
980 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)ppdu
, pbuffer
, buf_len
);
981 spin_unlock_bh(&session
->lock
);
985 static struct sgl_handle
*alloc_io_sgl_handle(struct beiscsi_hba
*phba
)
987 struct sgl_handle
*psgl_handle
;
989 if (phba
->io_sgl_hndl_avbl
) {
991 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
992 phba
->io_sgl_alloc_index
);
993 psgl_handle
= phba
->io_sgl_hndl_base
[phba
->
995 phba
->io_sgl_hndl_base
[phba
->io_sgl_alloc_index
] = NULL
;
996 phba
->io_sgl_hndl_avbl
--;
997 if (phba
->io_sgl_alloc_index
== (phba
->params
.
999 phba
->io_sgl_alloc_index
= 0;
1001 phba
->io_sgl_alloc_index
++;
1008 free_io_sgl_handle(struct beiscsi_hba
*phba
, struct sgl_handle
*psgl_handle
)
1010 SE_DEBUG(DBG_LVL_8
, "In free_,io_sgl_free_index=%d\n",
1011 phba
->io_sgl_free_index
);
1012 if (phba
->io_sgl_hndl_base
[phba
->io_sgl_free_index
]) {
1014 * this can happen if clean_task is called on a task that
1015 * failed in xmit_task or alloc_pdu.
1018 "Double Free in IO SGL io_sgl_free_index=%d,"
1019 "value there=%p\n", phba
->io_sgl_free_index
,
1020 phba
->io_sgl_hndl_base
[phba
->io_sgl_free_index
]);
1023 phba
->io_sgl_hndl_base
[phba
->io_sgl_free_index
] = psgl_handle
;
1024 phba
->io_sgl_hndl_avbl
++;
1025 if (phba
->io_sgl_free_index
== (phba
->params
.ios_per_ctrl
- 1))
1026 phba
->io_sgl_free_index
= 0;
1028 phba
->io_sgl_free_index
++;
1032 * alloc_wrb_handle - To allocate a wrb handle
1033 * @phba: The hba pointer
1034 * @cid: The cid to use for allocation
1036 * This happens under session_lock until submission to chip
1038 struct wrb_handle
*alloc_wrb_handle(struct beiscsi_hba
*phba
, unsigned int cid
)
1040 struct hwi_wrb_context
*pwrb_context
;
1041 struct hwi_controller
*phwi_ctrlr
;
1042 struct wrb_handle
*pwrb_handle
, *pwrb_handle_tmp
;
1044 phwi_ctrlr
= phba
->phwi_ctrlr
;
1045 pwrb_context
= &phwi_ctrlr
->wrb_context
[cid
];
1046 if (pwrb_context
->wrb_handles_available
>= 2) {
1047 pwrb_handle
= pwrb_context
->pwrb_handle_base
[
1048 pwrb_context
->alloc_index
];
1049 pwrb_context
->wrb_handles_available
--;
1050 if (pwrb_context
->alloc_index
==
1051 (phba
->params
.wrbs_per_cxn
- 1))
1052 pwrb_context
->alloc_index
= 0;
1054 pwrb_context
->alloc_index
++;
1055 pwrb_handle_tmp
= pwrb_context
->pwrb_handle_base
[
1056 pwrb_context
->alloc_index
];
1057 pwrb_handle
->nxt_wrb_index
= pwrb_handle_tmp
->wrb_index
;
1064 * free_wrb_handle - To free the wrb handle back to pool
1065 * @phba: The hba pointer
1066 * @pwrb_context: The context to free from
1067 * @pwrb_handle: The wrb_handle to free
1069 * This happens under session_lock until submission to chip
1072 free_wrb_handle(struct beiscsi_hba
*phba
, struct hwi_wrb_context
*pwrb_context
,
1073 struct wrb_handle
*pwrb_handle
)
1075 pwrb_context
->pwrb_handle_base
[pwrb_context
->free_index
] = pwrb_handle
;
1076 pwrb_context
->wrb_handles_available
++;
1077 if (pwrb_context
->free_index
== (phba
->params
.wrbs_per_cxn
- 1))
1078 pwrb_context
->free_index
= 0;
1080 pwrb_context
->free_index
++;
1083 "FREE WRB: pwrb_handle=%p free_index=0x%x"
1084 "wrb_handles_available=%d\n",
1085 pwrb_handle
, pwrb_context
->free_index
,
1086 pwrb_context
->wrb_handles_available
);
1089 static struct sgl_handle
*alloc_mgmt_sgl_handle(struct beiscsi_hba
*phba
)
1091 struct sgl_handle
*psgl_handle
;
1093 if (phba
->eh_sgl_hndl_avbl
) {
1094 psgl_handle
= phba
->eh_sgl_hndl_base
[phba
->eh_sgl_alloc_index
];
1095 phba
->eh_sgl_hndl_base
[phba
->eh_sgl_alloc_index
] = NULL
;
1096 SE_DEBUG(DBG_LVL_8
, "mgmt_sgl_alloc_index=%d=0x%x\n",
1097 phba
->eh_sgl_alloc_index
, phba
->eh_sgl_alloc_index
);
1098 phba
->eh_sgl_hndl_avbl
--;
1099 if (phba
->eh_sgl_alloc_index
==
1100 (phba
->params
.icds_per_ctrl
- phba
->params
.ios_per_ctrl
-
1102 phba
->eh_sgl_alloc_index
= 0;
1104 phba
->eh_sgl_alloc_index
++;
1111 free_mgmt_sgl_handle(struct beiscsi_hba
*phba
, struct sgl_handle
*psgl_handle
)
1114 SE_DEBUG(DBG_LVL_8
, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
1115 phba
->eh_sgl_free_index
);
1116 if (phba
->eh_sgl_hndl_base
[phba
->eh_sgl_free_index
]) {
1118 * this can happen if clean_task is called on a task that
1119 * failed in xmit_task or alloc_pdu.
1122 "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
1123 phba
->eh_sgl_free_index
);
1126 phba
->eh_sgl_hndl_base
[phba
->eh_sgl_free_index
] = psgl_handle
;
1127 phba
->eh_sgl_hndl_avbl
++;
1128 if (phba
->eh_sgl_free_index
==
1129 (phba
->params
.icds_per_ctrl
- phba
->params
.ios_per_ctrl
- 1))
1130 phba
->eh_sgl_free_index
= 0;
1132 phba
->eh_sgl_free_index
++;
1136 be_complete_io(struct beiscsi_conn
*beiscsi_conn
,
1137 struct iscsi_task
*task
, struct sol_cqe
*psol
)
1139 struct beiscsi_io_task
*io_task
= task
->dd_data
;
1140 struct be_status_bhs
*sts_bhs
=
1141 (struct be_status_bhs
*)io_task
->cmd_bhs
;
1142 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1143 unsigned int sense_len
;
1144 unsigned char *sense
;
1145 u32 resid
= 0, exp_cmdsn
, max_cmdsn
;
1146 u8 rsp
, status
, flags
;
1149 dw
[offsetof(struct amap_sol_cqe
, i_exp_cmd_sn
) / 32]
1150 & SOL_EXP_CMD_SN_MASK
);
1151 max_cmdsn
= ((psol
->
1152 dw
[offsetof(struct amap_sol_cqe
, i_exp_cmd_sn
) / 32]
1153 & SOL_EXP_CMD_SN_MASK
) +
1154 ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_cmd_wnd
)
1155 / 32] & SOL_CMD_WND_MASK
) >> 24) - 1);
1156 rsp
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_resp
) / 32]
1157 & SOL_RESP_MASK
) >> 16);
1158 status
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_sts
) / 32]
1159 & SOL_STS_MASK
) >> 8);
1160 flags
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_flags
) / 32]
1161 & SOL_FLAGS_MASK
) >> 24) | 0x80;
1163 task
->sc
->result
= (DID_OK
<< 16) | status
;
1164 if (rsp
!= ISCSI_STATUS_CMD_COMPLETED
) {
1165 task
->sc
->result
= DID_ERROR
<< 16;
1169 /* bidi not initially supported */
1170 if (flags
& (ISCSI_FLAG_CMD_UNDERFLOW
| ISCSI_FLAG_CMD_OVERFLOW
)) {
1171 resid
= (psol
->dw
[offsetof(struct amap_sol_cqe
, i_res_cnt
) /
1172 32] & SOL_RES_CNT_MASK
);
1174 if (!status
&& (flags
& ISCSI_FLAG_CMD_OVERFLOW
))
1175 task
->sc
->result
= DID_ERROR
<< 16;
1177 if (flags
& ISCSI_FLAG_CMD_UNDERFLOW
) {
1178 scsi_set_resid(task
->sc
, resid
);
1179 if (!status
&& (scsi_bufflen(task
->sc
) - resid
<
1180 task
->sc
->underflow
))
1181 task
->sc
->result
= DID_ERROR
<< 16;
1185 if (status
== SAM_STAT_CHECK_CONDITION
) {
1186 unsigned short *slen
= (unsigned short *)sts_bhs
->sense_info
;
1187 sense
= sts_bhs
->sense_info
+ sizeof(unsigned short);
1188 sense_len
= cpu_to_be16(*slen
);
1189 memcpy(task
->sc
->sense_buffer
, sense
,
1190 min_t(u16
, sense_len
, SCSI_SENSE_BUFFERSIZE
));
1193 if (io_task
->cmd_bhs
->iscsi_hdr
.flags
& ISCSI_FLAG_CMD_READ
) {
1194 if (psol
->dw
[offsetof(struct amap_sol_cqe
, i_res_cnt
) / 32]
1196 conn
->rxdata_octets
+= (psol
->
1197 dw
[offsetof(struct amap_sol_cqe
, i_res_cnt
) / 32]
1198 & SOL_RES_CNT_MASK
);
1201 scsi_dma_unmap(io_task
->scsi_cmnd
);
1202 iscsi_complete_scsi_task(task
, exp_cmdsn
, max_cmdsn
);
1206 be_complete_logout(struct beiscsi_conn
*beiscsi_conn
,
1207 struct iscsi_task
*task
, struct sol_cqe
*psol
)
1209 struct iscsi_logout_rsp
*hdr
;
1210 struct beiscsi_io_task
*io_task
= task
->dd_data
;
1211 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1213 hdr
= (struct iscsi_logout_rsp
*)task
->hdr
;
1214 hdr
->opcode
= ISCSI_OP_LOGOUT_RSP
;
1217 hdr
->flags
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_flags
) / 32]
1218 & SOL_FLAGS_MASK
) >> 24) | 0x80;
1219 hdr
->response
= (psol
->dw
[offsetof(struct amap_sol_cqe
, i_resp
) /
1220 32] & SOL_RESP_MASK
);
1221 hdr
->exp_cmdsn
= cpu_to_be32(psol
->
1222 dw
[offsetof(struct amap_sol_cqe
, i_exp_cmd_sn
) / 32]
1223 & SOL_EXP_CMD_SN_MASK
);
1224 hdr
->max_cmdsn
= be32_to_cpu((psol
->
1225 dw
[offsetof(struct amap_sol_cqe
, i_exp_cmd_sn
) / 32]
1226 & SOL_EXP_CMD_SN_MASK
) +
1227 ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_cmd_wnd
)
1228 / 32] & SOL_CMD_WND_MASK
) >> 24) - 1);
1229 hdr
->dlength
[0] = 0;
1230 hdr
->dlength
[1] = 0;
1231 hdr
->dlength
[2] = 0;
1233 hdr
->itt
= io_task
->libiscsi_itt
;
1234 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, NULL
, 0);
1238 be_complete_tmf(struct beiscsi_conn
*beiscsi_conn
,
1239 struct iscsi_task
*task
, struct sol_cqe
*psol
)
1241 struct iscsi_tm_rsp
*hdr
;
1242 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1243 struct beiscsi_io_task
*io_task
= task
->dd_data
;
1245 hdr
= (struct iscsi_tm_rsp
*)task
->hdr
;
1246 hdr
->opcode
= ISCSI_OP_SCSI_TMFUNC_RSP
;
1247 hdr
->flags
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_flags
) / 32]
1248 & SOL_FLAGS_MASK
) >> 24) | 0x80;
1249 hdr
->response
= (psol
->dw
[offsetof(struct amap_sol_cqe
, i_resp
) /
1250 32] & SOL_RESP_MASK
);
1251 hdr
->exp_cmdsn
= cpu_to_be32(psol
->dw
[offsetof(struct amap_sol_cqe
,
1252 i_exp_cmd_sn
) / 32] & SOL_EXP_CMD_SN_MASK
);
1253 hdr
->max_cmdsn
= be32_to_cpu((psol
->dw
[offsetof(struct amap_sol_cqe
,
1254 i_exp_cmd_sn
) / 32] & SOL_EXP_CMD_SN_MASK
) +
1255 ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_cmd_wnd
)
1256 / 32] & SOL_CMD_WND_MASK
) >> 24) - 1);
1257 hdr
->itt
= io_task
->libiscsi_itt
;
1258 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, NULL
, 0);
1262 hwi_complete_drvr_msgs(struct beiscsi_conn
*beiscsi_conn
,
1263 struct beiscsi_hba
*phba
, struct sol_cqe
*psol
)
1265 struct hwi_wrb_context
*pwrb_context
;
1266 struct wrb_handle
*pwrb_handle
= NULL
;
1267 struct hwi_controller
*phwi_ctrlr
;
1268 struct iscsi_task
*task
;
1269 struct beiscsi_io_task
*io_task
;
1270 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1271 struct iscsi_session
*session
= conn
->session
;
1273 phwi_ctrlr
= phba
->phwi_ctrlr
;
1274 pwrb_context
= &phwi_ctrlr
->wrb_context
[((psol
->
1275 dw
[offsetof(struct amap_sol_cqe
, cid
) / 32] &
1276 SOL_CID_MASK
) >> 6) -
1277 phba
->fw_config
.iscsi_cid_start
];
1278 pwrb_handle
= pwrb_context
->pwrb_handle_basestd
[((psol
->
1279 dw
[offsetof(struct amap_sol_cqe
, wrb_index
) /
1280 32] & SOL_WRB_INDEX_MASK
) >> 16)];
1281 task
= pwrb_handle
->pio_handle
;
1283 io_task
= task
->dd_data
;
1284 spin_lock(&phba
->mgmt_sgl_lock
);
1285 free_mgmt_sgl_handle(phba
, io_task
->psgl_handle
);
1286 spin_unlock(&phba
->mgmt_sgl_lock
);
1287 spin_lock_bh(&session
->lock
);
1288 free_wrb_handle(phba
, pwrb_context
, pwrb_handle
);
1289 spin_unlock_bh(&session
->lock
);
1293 be_complete_nopin_resp(struct beiscsi_conn
*beiscsi_conn
,
1294 struct iscsi_task
*task
, struct sol_cqe
*psol
)
1296 struct iscsi_nopin
*hdr
;
1297 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1298 struct beiscsi_io_task
*io_task
= task
->dd_data
;
1300 hdr
= (struct iscsi_nopin
*)task
->hdr
;
1301 hdr
->flags
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_flags
) / 32]
1302 & SOL_FLAGS_MASK
) >> 24) | 0x80;
1303 hdr
->exp_cmdsn
= cpu_to_be32(psol
->dw
[offsetof(struct amap_sol_cqe
,
1304 i_exp_cmd_sn
) / 32] & SOL_EXP_CMD_SN_MASK
);
1305 hdr
->max_cmdsn
= be32_to_cpu((psol
->dw
[offsetof(struct amap_sol_cqe
,
1306 i_exp_cmd_sn
) / 32] & SOL_EXP_CMD_SN_MASK
) +
1307 ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_cmd_wnd
)
1308 / 32] & SOL_CMD_WND_MASK
) >> 24) - 1);
1309 hdr
->opcode
= ISCSI_OP_NOOP_IN
;
1310 hdr
->itt
= io_task
->libiscsi_itt
;
1311 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, NULL
, 0);
1314 static void hwi_complete_cmd(struct beiscsi_conn
*beiscsi_conn
,
1315 struct beiscsi_hba
*phba
, struct sol_cqe
*psol
)
1317 struct hwi_wrb_context
*pwrb_context
;
1318 struct wrb_handle
*pwrb_handle
;
1319 struct iscsi_wrb
*pwrb
= NULL
;
1320 struct hwi_controller
*phwi_ctrlr
;
1321 struct iscsi_task
*task
;
1323 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1324 struct iscsi_session
*session
= conn
->session
;
1326 phwi_ctrlr
= phba
->phwi_ctrlr
;
1327 pwrb_context
= &phwi_ctrlr
->wrb_context
[((psol
->dw
[offsetof
1328 (struct amap_sol_cqe
, cid
) / 32]
1329 & SOL_CID_MASK
) >> 6) -
1330 phba
->fw_config
.iscsi_cid_start
];
1331 pwrb_handle
= pwrb_context
->pwrb_handle_basestd
[((psol
->
1332 dw
[offsetof(struct amap_sol_cqe
, wrb_index
) /
1333 32] & SOL_WRB_INDEX_MASK
) >> 16)];
1334 task
= pwrb_handle
->pio_handle
;
1335 pwrb
= pwrb_handle
->pwrb
;
1336 type
= (pwrb
->dw
[offsetof(struct amap_iscsi_wrb
, type
) / 32] &
1337 WRB_TYPE_MASK
) >> 28;
1339 spin_lock_bh(&session
->lock
);
1342 case HWH_TYPE_IO_RD
:
1343 if ((task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) ==
1345 be_complete_nopin_resp(beiscsi_conn
, task
, psol
);
1347 be_complete_io(beiscsi_conn
, task
, psol
);
1350 case HWH_TYPE_LOGOUT
:
1351 if ((task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) == ISCSI_OP_LOGOUT
)
1352 be_complete_logout(beiscsi_conn
, task
, psol
);
1354 be_complete_tmf(beiscsi_conn
, task
, psol
);
1358 case HWH_TYPE_LOGIN
:
1360 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1361 "- Solicited path\n");
1365 be_complete_nopin_resp(beiscsi_conn
, task
, psol
);
1369 shost_printk(KERN_WARNING
, phba
->shost
,
1370 "In hwi_complete_cmd, unknown type = %d"
1371 "wrb_index 0x%x CID 0x%x\n", type
,
1372 ((psol
->dw
[offsetof(struct amap_iscsi_wrb
,
1373 type
) / 32] & SOL_WRB_INDEX_MASK
) >> 16),
1374 ((psol
->dw
[offsetof(struct amap_sol_cqe
,
1375 cid
) / 32] & SOL_CID_MASK
) >> 6));
1379 spin_unlock_bh(&session
->lock
);
1382 static struct list_head
*hwi_get_async_busy_list(struct hwi_async_pdu_context
1383 *pasync_ctx
, unsigned int is_header
,
1384 unsigned int host_write_ptr
)
1387 return &pasync_ctx
->async_entry
[host_write_ptr
].
1390 return &pasync_ctx
->async_entry
[host_write_ptr
].data_busy_list
;
1393 static struct async_pdu_handle
*
1394 hwi_get_async_handle(struct beiscsi_hba
*phba
,
1395 struct beiscsi_conn
*beiscsi_conn
,
1396 struct hwi_async_pdu_context
*pasync_ctx
,
1397 struct i_t_dpdu_cqe
*pdpdu_cqe
, unsigned int *pcq_index
)
1399 struct be_bus_address phys_addr
;
1400 struct list_head
*pbusy_list
;
1401 struct async_pdu_handle
*pasync_handle
= NULL
;
1403 unsigned char buffer_index
= -1;
1404 unsigned char is_header
= 0;
1406 phys_addr
.u
.a32
.address_lo
=
1407 pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
, db_addr_lo
) / 32] -
1408 ((pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
, dpl
) / 32]
1409 & PDUCQE_DPL_MASK
) >> 16);
1410 phys_addr
.u
.a32
.address_hi
=
1411 pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
, db_addr_hi
) / 32];
1413 phys_addr
.u
.a64
.address
=
1414 *((unsigned long long *)(&phys_addr
.u
.a64
.address
));
1416 switch (pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
, code
) / 32]
1417 & PDUCQE_CODE_MASK
) {
1418 case UNSOL_HDR_NOTIFY
:
1421 pbusy_list
= hwi_get_async_busy_list(pasync_ctx
, 1,
1422 (pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
,
1423 index
) / 32] & PDUCQE_INDEX_MASK
));
1425 buffer_len
= (unsigned int)(phys_addr
.u
.a64
.address
-
1426 pasync_ctx
->async_header
.pa_base
.u
.a64
.address
);
1428 buffer_index
= buffer_len
/
1429 pasync_ctx
->async_header
.buffer_size
;
1432 case UNSOL_DATA_NOTIFY
:
1433 pbusy_list
= hwi_get_async_busy_list(pasync_ctx
, 0, (pdpdu_cqe
->
1434 dw
[offsetof(struct amap_i_t_dpdu_cqe
,
1435 index
) / 32] & PDUCQE_INDEX_MASK
));
1436 buffer_len
= (unsigned long)(phys_addr
.u
.a64
.address
-
1437 pasync_ctx
->async_data
.pa_base
.u
.
1439 buffer_index
= buffer_len
/ pasync_ctx
->async_data
.buffer_size
;
1443 shost_printk(KERN_WARNING
, phba
->shost
,
1444 "Unexpected code=%d\n",
1445 pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
,
1446 code
) / 32] & PDUCQE_CODE_MASK
);
1450 WARN_ON(!(buffer_index
<= pasync_ctx
->async_data
.num_entries
));
1451 WARN_ON(list_empty(pbusy_list
));
1452 list_for_each_entry(pasync_handle
, pbusy_list
, link
) {
1453 WARN_ON(pasync_handle
->consumed
);
1454 if (pasync_handle
->index
== buffer_index
)
1458 WARN_ON(!pasync_handle
);
1460 pasync_handle
->cri
= (unsigned short)beiscsi_conn
->beiscsi_conn_cid
-
1461 phba
->fw_config
.iscsi_cid_start
;
1462 pasync_handle
->is_header
= is_header
;
1463 pasync_handle
->buffer_len
= ((pdpdu_cqe
->
1464 dw
[offsetof(struct amap_i_t_dpdu_cqe
, dpl
) / 32]
1465 & PDUCQE_DPL_MASK
) >> 16);
1467 *pcq_index
= (pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
,
1468 index
) / 32] & PDUCQE_INDEX_MASK
);
1469 return pasync_handle
;
1473 hwi_update_async_writables(struct hwi_async_pdu_context
*pasync_ctx
,
1474 unsigned int is_header
, unsigned int cq_index
)
1476 struct list_head
*pbusy_list
;
1477 struct async_pdu_handle
*pasync_handle
;
1478 unsigned int num_entries
, writables
= 0;
1479 unsigned int *pep_read_ptr
, *pwritables
;
1483 pep_read_ptr
= &pasync_ctx
->async_header
.ep_read_ptr
;
1484 pwritables
= &pasync_ctx
->async_header
.writables
;
1485 num_entries
= pasync_ctx
->async_header
.num_entries
;
1487 pep_read_ptr
= &pasync_ctx
->async_data
.ep_read_ptr
;
1488 pwritables
= &pasync_ctx
->async_data
.writables
;
1489 num_entries
= pasync_ctx
->async_data
.num_entries
;
1492 while ((*pep_read_ptr
) != cq_index
) {
1494 *pep_read_ptr
= (*pep_read_ptr
) % num_entries
;
1496 pbusy_list
= hwi_get_async_busy_list(pasync_ctx
, is_header
,
1499 WARN_ON(list_empty(pbusy_list
));
1501 if (!list_empty(pbusy_list
)) {
1502 pasync_handle
= list_entry(pbusy_list
->next
,
1503 struct async_pdu_handle
,
1505 WARN_ON(!pasync_handle
);
1506 pasync_handle
->consumed
= 1;
1514 "Duplicate notification received - index 0x%x!!\n",
1519 *pwritables
= *pwritables
+ writables
;
1523 static unsigned int hwi_free_async_msg(struct beiscsi_hba
*phba
,
1526 struct hwi_controller
*phwi_ctrlr
;
1527 struct hwi_async_pdu_context
*pasync_ctx
;
1528 struct async_pdu_handle
*pasync_handle
, *tmp_handle
;
1529 struct list_head
*plist
;
1532 phwi_ctrlr
= phba
->phwi_ctrlr
;
1533 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
1535 plist
= &pasync_ctx
->async_entry
[cri
].wait_queue
.list
;
1537 list_for_each_entry_safe(pasync_handle
, tmp_handle
, plist
, link
) {
1538 list_del(&pasync_handle
->link
);
1541 list_add_tail(&pasync_handle
->link
,
1542 &pasync_ctx
->async_header
.free_list
);
1543 pasync_ctx
->async_header
.free_entries
++;
1546 list_add_tail(&pasync_handle
->link
,
1547 &pasync_ctx
->async_data
.free_list
);
1548 pasync_ctx
->async_data
.free_entries
++;
1553 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[cri
].wait_queue
.list
);
1554 pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_received
= 0;
1555 pasync_ctx
->async_entry
[cri
].wait_queue
.bytes_received
= 0;
1559 static struct phys_addr
*
1560 hwi_get_ring_address(struct hwi_async_pdu_context
*pasync_ctx
,
1561 unsigned int is_header
, unsigned int host_write_ptr
)
1563 struct phys_addr
*pasync_sge
= NULL
;
1566 pasync_sge
= pasync_ctx
->async_header
.ring_base
;
1568 pasync_sge
= pasync_ctx
->async_data
.ring_base
;
1570 return pasync_sge
+ host_write_ptr
;
1573 static void hwi_post_async_buffers(struct beiscsi_hba
*phba
,
1574 unsigned int is_header
)
1576 struct hwi_controller
*phwi_ctrlr
;
1577 struct hwi_async_pdu_context
*pasync_ctx
;
1578 struct async_pdu_handle
*pasync_handle
;
1579 struct list_head
*pfree_link
, *pbusy_list
;
1580 struct phys_addr
*pasync_sge
;
1581 unsigned int ring_id
, num_entries
;
1582 unsigned int host_write_num
;
1583 unsigned int writables
;
1587 phwi_ctrlr
= phba
->phwi_ctrlr
;
1588 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
1591 num_entries
= pasync_ctx
->async_header
.num_entries
;
1592 writables
= min(pasync_ctx
->async_header
.writables
,
1593 pasync_ctx
->async_header
.free_entries
);
1594 pfree_link
= pasync_ctx
->async_header
.free_list
.next
;
1595 host_write_num
= pasync_ctx
->async_header
.host_write_ptr
;
1596 ring_id
= phwi_ctrlr
->default_pdu_hdr
.id
;
1598 num_entries
= pasync_ctx
->async_data
.num_entries
;
1599 writables
= min(pasync_ctx
->async_data
.writables
,
1600 pasync_ctx
->async_data
.free_entries
);
1601 pfree_link
= pasync_ctx
->async_data
.free_list
.next
;
1602 host_write_num
= pasync_ctx
->async_data
.host_write_ptr
;
1603 ring_id
= phwi_ctrlr
->default_pdu_data
.id
;
1606 writables
= (writables
/ 8) * 8;
1608 for (i
= 0; i
< writables
; i
++) {
1610 hwi_get_async_busy_list(pasync_ctx
, is_header
,
1613 list_entry(pfree_link
, struct async_pdu_handle
,
1615 WARN_ON(!pasync_handle
);
1616 pasync_handle
->consumed
= 0;
1618 pfree_link
= pfree_link
->next
;
1620 pasync_sge
= hwi_get_ring_address(pasync_ctx
,
1621 is_header
, host_write_num
);
1623 pasync_sge
->hi
= pasync_handle
->pa
.u
.a32
.address_lo
;
1624 pasync_sge
->lo
= pasync_handle
->pa
.u
.a32
.address_hi
;
1626 list_move(&pasync_handle
->link
, pbusy_list
);
1629 host_write_num
= host_write_num
% num_entries
;
1633 pasync_ctx
->async_header
.host_write_ptr
=
1635 pasync_ctx
->async_header
.free_entries
-= writables
;
1636 pasync_ctx
->async_header
.writables
-= writables
;
1637 pasync_ctx
->async_header
.busy_entries
+= writables
;
1639 pasync_ctx
->async_data
.host_write_ptr
= host_write_num
;
1640 pasync_ctx
->async_data
.free_entries
-= writables
;
1641 pasync_ctx
->async_data
.writables
-= writables
;
1642 pasync_ctx
->async_data
.busy_entries
+= writables
;
1645 doorbell
|= ring_id
& DB_DEF_PDU_RING_ID_MASK
;
1646 doorbell
|= 1 << DB_DEF_PDU_REARM_SHIFT
;
1647 doorbell
|= 0 << DB_DEF_PDU_EVENT_SHIFT
;
1648 doorbell
|= (writables
& DB_DEF_PDU_CQPROC_MASK
)
1649 << DB_DEF_PDU_CQPROC_SHIFT
;
1651 iowrite32(doorbell
, phba
->db_va
+ DB_RXULP0_OFFSET
);
1655 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba
*phba
,
1656 struct beiscsi_conn
*beiscsi_conn
,
1657 struct i_t_dpdu_cqe
*pdpdu_cqe
)
1659 struct hwi_controller
*phwi_ctrlr
;
1660 struct hwi_async_pdu_context
*pasync_ctx
;
1661 struct async_pdu_handle
*pasync_handle
= NULL
;
1662 unsigned int cq_index
= -1;
1664 phwi_ctrlr
= phba
->phwi_ctrlr
;
1665 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
1667 pasync_handle
= hwi_get_async_handle(phba
, beiscsi_conn
, pasync_ctx
,
1668 pdpdu_cqe
, &cq_index
);
1669 BUG_ON(pasync_handle
->is_header
!= 0);
1670 if (pasync_handle
->consumed
== 0)
1671 hwi_update_async_writables(pasync_ctx
, pasync_handle
->is_header
,
1674 hwi_free_async_msg(phba
, pasync_handle
->cri
);
1675 hwi_post_async_buffers(phba
, pasync_handle
->is_header
);
1679 hwi_fwd_async_msg(struct beiscsi_conn
*beiscsi_conn
,
1680 struct beiscsi_hba
*phba
,
1681 struct hwi_async_pdu_context
*pasync_ctx
, unsigned short cri
)
1683 struct list_head
*plist
;
1684 struct async_pdu_handle
*pasync_handle
;
1686 unsigned int hdr_len
= 0, buf_len
= 0;
1687 unsigned int status
, index
= 0, offset
= 0;
1688 void *pfirst_buffer
= NULL
;
1689 unsigned int num_buf
= 0;
1691 plist
= &pasync_ctx
->async_entry
[cri
].wait_queue
.list
;
1693 list_for_each_entry(pasync_handle
, plist
, link
) {
1695 phdr
= pasync_handle
->pbuffer
;
1696 hdr_len
= pasync_handle
->buffer_len
;
1698 buf_len
= pasync_handle
->buffer_len
;
1700 pfirst_buffer
= pasync_handle
->pbuffer
;
1703 memcpy(pfirst_buffer
+ offset
,
1704 pasync_handle
->pbuffer
, buf_len
);
1710 status
= beiscsi_process_async_pdu(beiscsi_conn
, phba
,
1711 (beiscsi_conn
->beiscsi_conn_cid
-
1712 phba
->fw_config
.iscsi_cid_start
),
1713 phdr
, hdr_len
, pfirst_buffer
,
1717 hwi_free_async_msg(phba
, cri
);
1722 hwi_gather_async_pdu(struct beiscsi_conn
*beiscsi_conn
,
1723 struct beiscsi_hba
*phba
,
1724 struct async_pdu_handle
*pasync_handle
)
1726 struct hwi_async_pdu_context
*pasync_ctx
;
1727 struct hwi_controller
*phwi_ctrlr
;
1728 unsigned int bytes_needed
= 0, status
= 0;
1729 unsigned short cri
= pasync_handle
->cri
;
1730 struct pdu_base
*ppdu
;
1732 phwi_ctrlr
= phba
->phwi_ctrlr
;
1733 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
1735 list_del(&pasync_handle
->link
);
1736 if (pasync_handle
->is_header
) {
1737 pasync_ctx
->async_header
.busy_entries
--;
1738 if (pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_received
) {
1739 hwi_free_async_msg(phba
, cri
);
1743 pasync_ctx
->async_entry
[cri
].wait_queue
.bytes_received
= 0;
1744 pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_received
= 1;
1745 pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_len
=
1746 (unsigned short)pasync_handle
->buffer_len
;
1747 list_add_tail(&pasync_handle
->link
,
1748 &pasync_ctx
->async_entry
[cri
].wait_queue
.list
);
1750 ppdu
= pasync_handle
->pbuffer
;
1751 bytes_needed
= ((((ppdu
->dw
[offsetof(struct amap_pdu_base
,
1752 data_len_hi
) / 32] & PDUBASE_DATALENHI_MASK
) << 8) &
1753 0xFFFF0000) | ((be16_to_cpu((ppdu
->
1754 dw
[offsetof(struct amap_pdu_base
, data_len_lo
) / 32]
1755 & PDUBASE_DATALENLO_MASK
) >> 16)) & 0x0000FFFF));
1758 pasync_ctx
->async_entry
[cri
].wait_queue
.bytes_needed
=
1761 if (bytes_needed
== 0)
1762 status
= hwi_fwd_async_msg(beiscsi_conn
, phba
,
1766 pasync_ctx
->async_data
.busy_entries
--;
1767 if (pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_received
) {
1768 list_add_tail(&pasync_handle
->link
,
1769 &pasync_ctx
->async_entry
[cri
].wait_queue
.
1771 pasync_ctx
->async_entry
[cri
].wait_queue
.
1773 (unsigned short)pasync_handle
->buffer_len
;
1775 if (pasync_ctx
->async_entry
[cri
].wait_queue
.
1777 pasync_ctx
->async_entry
[cri
].wait_queue
.
1779 status
= hwi_fwd_async_msg(beiscsi_conn
, phba
,
1786 static void hwi_process_default_pdu_ring(struct beiscsi_conn
*beiscsi_conn
,
1787 struct beiscsi_hba
*phba
,
1788 struct i_t_dpdu_cqe
*pdpdu_cqe
)
1790 struct hwi_controller
*phwi_ctrlr
;
1791 struct hwi_async_pdu_context
*pasync_ctx
;
1792 struct async_pdu_handle
*pasync_handle
= NULL
;
1793 unsigned int cq_index
= -1;
1795 phwi_ctrlr
= phba
->phwi_ctrlr
;
1796 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
1797 pasync_handle
= hwi_get_async_handle(phba
, beiscsi_conn
, pasync_ctx
,
1798 pdpdu_cqe
, &cq_index
);
1800 if (pasync_handle
->consumed
== 0)
1801 hwi_update_async_writables(pasync_ctx
, pasync_handle
->is_header
,
1803 hwi_gather_async_pdu(beiscsi_conn
, phba
, pasync_handle
);
1804 hwi_post_async_buffers(phba
, pasync_handle
->is_header
);
1807 static void beiscsi_process_mcc_isr(struct beiscsi_hba
*phba
)
1809 struct be_queue_info
*mcc_cq
;
1810 struct be_mcc_compl
*mcc_compl
;
1811 unsigned int num_processed
= 0;
1813 mcc_cq
= &phba
->ctrl
.mcc_obj
.cq
;
1814 mcc_compl
= queue_tail_node(mcc_cq
);
1815 mcc_compl
->flags
= le32_to_cpu(mcc_compl
->flags
);
1816 while (mcc_compl
->flags
& CQE_FLAGS_VALID_MASK
) {
1818 if (num_processed
>= 32) {
1819 hwi_ring_cq_db(phba
, mcc_cq
->id
,
1820 num_processed
, 0, 0);
1823 if (mcc_compl
->flags
& CQE_FLAGS_ASYNC_MASK
) {
1824 /* Interpret flags as an async trailer */
1825 if (is_link_state_evt(mcc_compl
->flags
))
1826 /* Interpret compl as a async link evt */
1827 beiscsi_async_link_state_process(phba
,
1828 (struct be_async_event_link_state
*) mcc_compl
);
1831 " Unsupported Async Event, flags"
1832 " = 0x%08x\n", mcc_compl
->flags
);
1833 } else if (mcc_compl
->flags
& CQE_FLAGS_COMPLETED_MASK
) {
1834 be_mcc_compl_process_isr(&phba
->ctrl
, mcc_compl
);
1835 atomic_dec(&phba
->ctrl
.mcc_obj
.q
.used
);
1838 mcc_compl
->flags
= 0;
1839 queue_tail_inc(mcc_cq
);
1840 mcc_compl
= queue_tail_node(mcc_cq
);
1841 mcc_compl
->flags
= le32_to_cpu(mcc_compl
->flags
);
1845 if (num_processed
> 0)
1846 hwi_ring_cq_db(phba
, mcc_cq
->id
, num_processed
, 1, 0);
1850 static unsigned int beiscsi_process_cq(struct be_eq_obj
*pbe_eq
)
1852 struct be_queue_info
*cq
;
1853 struct sol_cqe
*sol
;
1854 struct dmsg_cqe
*dmsg
;
1855 unsigned int num_processed
= 0;
1856 unsigned int tot_nump
= 0;
1857 struct beiscsi_conn
*beiscsi_conn
;
1858 struct beiscsi_endpoint
*beiscsi_ep
;
1859 struct iscsi_endpoint
*ep
;
1860 struct beiscsi_hba
*phba
;
1863 sol
= queue_tail_node(cq
);
1864 phba
= pbe_eq
->phba
;
1866 while (sol
->dw
[offsetof(struct amap_sol_cqe
, valid
) / 32] &
1868 be_dws_le_to_cpu(sol
, sizeof(struct sol_cqe
));
1870 ep
= phba
->ep_array
[(u32
) ((sol
->
1871 dw
[offsetof(struct amap_sol_cqe
, cid
) / 32] &
1872 SOL_CID_MASK
) >> 6) -
1873 phba
->fw_config
.iscsi_cid_start
];
1875 beiscsi_ep
= ep
->dd_data
;
1876 beiscsi_conn
= beiscsi_ep
->conn
;
1878 if (num_processed
>= 32) {
1879 hwi_ring_cq_db(phba
, cq
->id
,
1880 num_processed
, 0, 0);
1881 tot_nump
+= num_processed
;
1885 switch ((u32
) sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1886 32] & CQE_CODE_MASK
) {
1887 case SOL_CMD_COMPLETE
:
1888 hwi_complete_cmd(beiscsi_conn
, phba
, sol
);
1890 case DRIVERMSG_NOTIFY
:
1891 SE_DEBUG(DBG_LVL_8
, "Received DRIVERMSG_NOTIFY\n");
1892 dmsg
= (struct dmsg_cqe
*)sol
;
1893 hwi_complete_drvr_msgs(beiscsi_conn
, phba
, sol
);
1895 case UNSOL_HDR_NOTIFY
:
1896 SE_DEBUG(DBG_LVL_8
, "Received UNSOL_HDR_ NOTIFY\n");
1897 hwi_process_default_pdu_ring(beiscsi_conn
, phba
,
1898 (struct i_t_dpdu_cqe
*)sol
);
1900 case UNSOL_DATA_NOTIFY
:
1901 SE_DEBUG(DBG_LVL_8
, "Received UNSOL_DATA_NOTIFY\n");
1902 hwi_process_default_pdu_ring(beiscsi_conn
, phba
,
1903 (struct i_t_dpdu_cqe
*)sol
);
1905 case CXN_INVALIDATE_INDEX_NOTIFY
:
1906 case CMD_INVALIDATED_NOTIFY
:
1907 case CXN_INVALIDATE_NOTIFY
:
1909 "Ignoring CQ Error notification for cmd/cxn"
1912 case SOL_CMD_KILLED_DATA_DIGEST_ERR
:
1913 case CMD_KILLED_INVALID_STATSN_RCVD
:
1914 case CMD_KILLED_INVALID_R2T_RCVD
:
1915 case CMD_CXN_KILLED_LUN_INVALID
:
1916 case CMD_CXN_KILLED_ICD_INVALID
:
1917 case CMD_CXN_KILLED_ITT_INVALID
:
1918 case CMD_CXN_KILLED_SEQ_OUTOFORDER
:
1919 case CMD_CXN_KILLED_INVALID_DATASN_RCVD
:
1921 "CQ Error notification for cmd.. "
1922 "code %d cid 0x%x\n",
1923 sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1924 32] & CQE_CODE_MASK
,
1925 (sol
->dw
[offsetof(struct amap_sol_cqe
, cid
) /
1926 32] & SOL_CID_MASK
));
1928 case UNSOL_DATA_DIGEST_ERROR_NOTIFY
:
1930 "Digest error on def pdu ring, dropping..\n");
1931 hwi_flush_default_pdu_buffer(phba
, beiscsi_conn
,
1932 (struct i_t_dpdu_cqe
*) sol
);
1934 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL
:
1935 case CXN_KILLED_BURST_LEN_MISMATCH
:
1936 case CXN_KILLED_AHS_RCVD
:
1937 case CXN_KILLED_HDR_DIGEST_ERR
:
1938 case CXN_KILLED_UNKNOWN_HDR
:
1939 case CXN_KILLED_STALE_ITT_TTT_RCVD
:
1940 case CXN_KILLED_INVALID_ITT_TTT_RCVD
:
1941 case CXN_KILLED_TIMED_OUT
:
1942 case CXN_KILLED_FIN_RCVD
:
1943 case CXN_KILLED_BAD_UNSOL_PDU_RCVD
:
1944 case CXN_KILLED_BAD_WRB_INDEX_ERROR
:
1945 case CXN_KILLED_OVER_RUN_RESIDUAL
:
1946 case CXN_KILLED_UNDER_RUN_RESIDUAL
:
1947 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN
:
1948 SE_DEBUG(DBG_LVL_1
, "CQ Error %d, reset CID "
1950 sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1951 32] & CQE_CODE_MASK
,
1952 (sol
->dw
[offsetof(struct amap_sol_cqe
, cid
) /
1953 32] & CQE_CID_MASK
));
1954 iscsi_conn_failure(beiscsi_conn
->conn
,
1955 ISCSI_ERR_CONN_FAILED
);
1957 case CXN_KILLED_RST_SENT
:
1958 case CXN_KILLED_RST_RCVD
:
1959 SE_DEBUG(DBG_LVL_1
, "CQ Error %d, reset"
1960 "received/sent on CID 0x%x...\n",
1961 sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1962 32] & CQE_CODE_MASK
,
1963 (sol
->dw
[offsetof(struct amap_sol_cqe
, cid
) /
1964 32] & CQE_CID_MASK
));
1965 iscsi_conn_failure(beiscsi_conn
->conn
,
1966 ISCSI_ERR_CONN_FAILED
);
1969 SE_DEBUG(DBG_LVL_1
, "CQ Error Invalid code= %d "
1970 "received on CID 0x%x...\n",
1971 sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1972 32] & CQE_CODE_MASK
,
1973 (sol
->dw
[offsetof(struct amap_sol_cqe
, cid
) /
1974 32] & CQE_CID_MASK
));
1978 AMAP_SET_BITS(struct amap_sol_cqe
, valid
, sol
, 0);
1980 sol
= queue_tail_node(cq
);
1984 if (num_processed
> 0) {
1985 tot_nump
+= num_processed
;
1986 hwi_ring_cq_db(phba
, cq
->id
, num_processed
, 1, 0);
1991 void beiscsi_process_all_cqs(struct work_struct
*work
)
1993 unsigned long flags
;
1994 struct hwi_controller
*phwi_ctrlr
;
1995 struct hwi_context_memory
*phwi_context
;
1996 struct be_eq_obj
*pbe_eq
;
1997 struct beiscsi_hba
*phba
=
1998 container_of(work
, struct beiscsi_hba
, work_cqs
);
2000 phwi_ctrlr
= phba
->phwi_ctrlr
;
2001 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
2002 if (phba
->msix_enabled
)
2003 pbe_eq
= &phwi_context
->be_eq
[phba
->num_cpus
];
2005 pbe_eq
= &phwi_context
->be_eq
[0];
2007 if (phba
->todo_mcc_cq
) {
2008 spin_lock_irqsave(&phba
->isr_lock
, flags
);
2009 phba
->todo_mcc_cq
= 0;
2010 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
2011 beiscsi_process_mcc_isr(phba
);
2014 if (phba
->todo_cq
) {
2015 spin_lock_irqsave(&phba
->isr_lock
, flags
);
2017 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
2018 beiscsi_process_cq(pbe_eq
);
2022 static int be_iopoll(struct blk_iopoll
*iop
, int budget
)
2024 static unsigned int ret
;
2025 struct beiscsi_hba
*phba
;
2026 struct be_eq_obj
*pbe_eq
;
2028 pbe_eq
= container_of(iop
, struct be_eq_obj
, iopoll
);
2029 ret
= beiscsi_process_cq(pbe_eq
);
2031 phba
= pbe_eq
->phba
;
2032 blk_iopoll_complete(iop
);
2033 SE_DEBUG(DBG_LVL_8
, "rearm pbe_eq->q.id =%d\n", pbe_eq
->q
.id
);
2034 hwi_ring_eq_db(phba
, pbe_eq
->q
.id
, 0, 0, 1, 1);
2040 hwi_write_sgl(struct iscsi_wrb
*pwrb
, struct scatterlist
*sg
,
2041 unsigned int num_sg
, struct beiscsi_io_task
*io_task
)
2043 struct iscsi_sge
*psgl
;
2044 unsigned int sg_len
, index
;
2045 unsigned int sge_len
= 0;
2046 unsigned long long addr
;
2047 struct scatterlist
*l_sg
;
2048 unsigned int offset
;
2050 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_lo
, pwrb
,
2051 io_task
->bhs_pa
.u
.a32
.address_lo
);
2052 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_hi
, pwrb
,
2053 io_task
->bhs_pa
.u
.a32
.address_hi
);
2056 for (index
= 0; (index
< num_sg
) && (index
< 2); index
++,
2059 sg_len
= sg_dma_len(sg
);
2060 addr
= (u64
) sg_dma_address(sg
);
2061 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_lo
, pwrb
,
2062 ((u32
)(addr
& 0xFFFFFFFF)));
2063 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_hi
, pwrb
,
2064 ((u32
)(addr
>> 32)));
2065 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_len
, pwrb
,
2069 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_r2t_offset
,
2071 sg_len
= sg_dma_len(sg
);
2072 addr
= (u64
) sg_dma_address(sg
);
2073 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_addr_lo
, pwrb
,
2074 ((u32
)(addr
& 0xFFFFFFFF)));
2075 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_addr_hi
, pwrb
,
2076 ((u32
)(addr
>> 32)));
2077 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_len
, pwrb
,
2081 psgl
= (struct iscsi_sge
*)io_task
->psgl_handle
->pfrag
;
2082 memset(psgl
, 0, sizeof(*psgl
) * BE2_SGE
);
2084 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, io_task
->bhs_len
- 2);
2086 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
2087 io_task
->bhs_pa
.u
.a32
.address_hi
);
2088 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
2089 io_task
->bhs_pa
.u
.a32
.address_lo
);
2092 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
,
2094 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_last
, pwrb
,
2096 } else if (num_sg
== 2) {
2097 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
,
2099 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_last
, pwrb
,
2102 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
,
2104 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_last
, pwrb
,
2111 for (index
= 0; index
< num_sg
; index
++, sg
= sg_next(sg
), psgl
++) {
2112 sg_len
= sg_dma_len(sg
);
2113 addr
= (u64
) sg_dma_address(sg
);
2114 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
2115 (addr
& 0xFFFFFFFF));
2116 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
2118 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, sg_len
);
2119 AMAP_SET_BITS(struct amap_iscsi_sge
, sge_offset
, psgl
, offset
);
2120 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 0);
2124 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 1);
2127 static void hwi_write_buffer(struct iscsi_wrb
*pwrb
, struct iscsi_task
*task
)
2129 struct iscsi_sge
*psgl
;
2130 unsigned long long addr
;
2131 struct beiscsi_io_task
*io_task
= task
->dd_data
;
2132 struct beiscsi_conn
*beiscsi_conn
= io_task
->conn
;
2133 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
2135 io_task
->bhs_len
= sizeof(struct be_nonio_bhs
) - 2;
2136 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_lo
, pwrb
,
2137 io_task
->bhs_pa
.u
.a32
.address_lo
);
2138 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_hi
, pwrb
,
2139 io_task
->bhs_pa
.u
.a32
.address_hi
);
2142 if (task
->data_count
) {
2143 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 1);
2144 addr
= (u64
) pci_map_single(phba
->pcidev
,
2146 task
->data_count
, 1);
2148 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 0);
2151 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_lo
, pwrb
,
2152 ((u32
)(addr
& 0xFFFFFFFF)));
2153 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_hi
, pwrb
,
2154 ((u32
)(addr
>> 32)));
2155 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_len
, pwrb
,
2158 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
, 1);
2160 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 0);
2164 psgl
= (struct iscsi_sge
*)io_task
->psgl_handle
->pfrag
;
2166 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, io_task
->bhs_len
);
2168 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
2169 io_task
->bhs_pa
.u
.a32
.address_hi
);
2170 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
2171 io_task
->bhs_pa
.u
.a32
.address_lo
);
2174 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
, 0);
2175 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
, 0);
2176 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, 0);
2177 AMAP_SET_BITS(struct amap_iscsi_sge
, sge_offset
, psgl
, 0);
2178 AMAP_SET_BITS(struct amap_iscsi_sge
, rsvd0
, psgl
, 0);
2179 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 0);
2183 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
2184 ((u32
)(addr
& 0xFFFFFFFF)));
2185 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
2186 ((u32
)(addr
>> 32)));
2188 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, 0x106);
2190 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 1);
2193 static void beiscsi_find_mem_req(struct beiscsi_hba
*phba
)
2195 unsigned int num_cq_pages
, num_async_pdu_buf_pages
;
2196 unsigned int num_async_pdu_data_pages
, wrb_sz_per_cxn
;
2197 unsigned int num_async_pdu_buf_sgl_pages
, num_async_pdu_data_sgl_pages
;
2199 num_cq_pages
= PAGES_REQUIRED(phba
->params
.num_cq_entries
* \
2200 sizeof(struct sol_cqe
));
2201 num_async_pdu_buf_pages
=
2202 PAGES_REQUIRED(phba
->params
.asyncpdus_per_ctrl
* \
2203 phba
->params
.defpdu_hdr_sz
);
2204 num_async_pdu_buf_sgl_pages
=
2205 PAGES_REQUIRED(phba
->params
.asyncpdus_per_ctrl
* \
2206 sizeof(struct phys_addr
));
2207 num_async_pdu_data_pages
=
2208 PAGES_REQUIRED(phba
->params
.asyncpdus_per_ctrl
* \
2209 phba
->params
.defpdu_data_sz
);
2210 num_async_pdu_data_sgl_pages
=
2211 PAGES_REQUIRED(phba
->params
.asyncpdus_per_ctrl
* \
2212 sizeof(struct phys_addr
));
2214 phba
->params
.hwi_ws_sz
= sizeof(struct hwi_controller
);
2216 phba
->mem_req
[ISCSI_MEM_GLOBAL_HEADER
] = 2 *
2217 BE_ISCSI_PDU_HEADER_SIZE
;
2218 phba
->mem_req
[HWI_MEM_ADDN_CONTEXT
] =
2219 sizeof(struct hwi_context_memory
);
2222 phba
->mem_req
[HWI_MEM_WRB
] = sizeof(struct iscsi_wrb
)
2223 * (phba
->params
.wrbs_per_cxn
)
2224 * phba
->params
.cxns_per_ctrl
;
2225 wrb_sz_per_cxn
= sizeof(struct wrb_handle
) *
2226 (phba
->params
.wrbs_per_cxn
);
2227 phba
->mem_req
[HWI_MEM_WRBH
] = roundup_pow_of_two((wrb_sz_per_cxn
) *
2228 phba
->params
.cxns_per_ctrl
);
2230 phba
->mem_req
[HWI_MEM_SGLH
] = sizeof(struct sgl_handle
) *
2231 phba
->params
.icds_per_ctrl
;
2232 phba
->mem_req
[HWI_MEM_SGE
] = sizeof(struct iscsi_sge
) *
2233 phba
->params
.num_sge_per_io
* phba
->params
.icds_per_ctrl
;
2235 phba
->mem_req
[HWI_MEM_ASYNC_HEADER_BUF
] =
2236 num_async_pdu_buf_pages
* PAGE_SIZE
;
2237 phba
->mem_req
[HWI_MEM_ASYNC_DATA_BUF
] =
2238 num_async_pdu_data_pages
* PAGE_SIZE
;
2239 phba
->mem_req
[HWI_MEM_ASYNC_HEADER_RING
] =
2240 num_async_pdu_buf_sgl_pages
* PAGE_SIZE
;
2241 phba
->mem_req
[HWI_MEM_ASYNC_DATA_RING
] =
2242 num_async_pdu_data_sgl_pages
* PAGE_SIZE
;
2243 phba
->mem_req
[HWI_MEM_ASYNC_HEADER_HANDLE
] =
2244 phba
->params
.asyncpdus_per_ctrl
*
2245 sizeof(struct async_pdu_handle
);
2246 phba
->mem_req
[HWI_MEM_ASYNC_DATA_HANDLE
] =
2247 phba
->params
.asyncpdus_per_ctrl
*
2248 sizeof(struct async_pdu_handle
);
2249 phba
->mem_req
[HWI_MEM_ASYNC_PDU_CONTEXT
] =
2250 sizeof(struct hwi_async_pdu_context
) +
2251 (phba
->params
.cxns_per_ctrl
* sizeof(struct hwi_async_entry
));
2254 static int beiscsi_alloc_mem(struct beiscsi_hba
*phba
)
2256 struct be_mem_descriptor
*mem_descr
;
2258 struct mem_array
*mem_arr
, *mem_arr_orig
;
2259 unsigned int i
, j
, alloc_size
, curr_alloc_size
;
2261 phba
->phwi_ctrlr
= kmalloc(phba
->params
.hwi_ws_sz
, GFP_KERNEL
);
2262 if (!phba
->phwi_ctrlr
)
2265 phba
->init_mem
= kcalloc(SE_MEM_MAX
, sizeof(*mem_descr
),
2267 if (!phba
->init_mem
) {
2268 kfree(phba
->phwi_ctrlr
);
2272 mem_arr_orig
= kmalloc(sizeof(*mem_arr_orig
) * BEISCSI_MAX_FRAGS_INIT
,
2274 if (!mem_arr_orig
) {
2275 kfree(phba
->init_mem
);
2276 kfree(phba
->phwi_ctrlr
);
2280 mem_descr
= phba
->init_mem
;
2281 for (i
= 0; i
< SE_MEM_MAX
; i
++) {
2283 mem_arr
= mem_arr_orig
;
2284 alloc_size
= phba
->mem_req
[i
];
2285 memset(mem_arr
, 0, sizeof(struct mem_array
) *
2286 BEISCSI_MAX_FRAGS_INIT
);
2287 curr_alloc_size
= min(be_max_phys_size
* 1024, alloc_size
);
2289 mem_arr
->virtual_address
= pci_alloc_consistent(
2293 if (!mem_arr
->virtual_address
) {
2294 if (curr_alloc_size
<= BE_MIN_MEM_SIZE
)
2296 if (curr_alloc_size
-
2297 rounddown_pow_of_two(curr_alloc_size
))
2298 curr_alloc_size
= rounddown_pow_of_two
2301 curr_alloc_size
= curr_alloc_size
/ 2;
2303 mem_arr
->bus_address
.u
.
2304 a64
.address
= (__u64
) bus_add
;
2305 mem_arr
->size
= curr_alloc_size
;
2306 alloc_size
-= curr_alloc_size
;
2307 curr_alloc_size
= min(be_max_phys_size
*
2312 } while (alloc_size
);
2313 mem_descr
->num_elements
= j
;
2314 mem_descr
->size_in_bytes
= phba
->mem_req
[i
];
2315 mem_descr
->mem_array
= kmalloc(sizeof(*mem_arr
) * j
,
2317 if (!mem_descr
->mem_array
)
2320 memcpy(mem_descr
->mem_array
, mem_arr_orig
,
2321 sizeof(struct mem_array
) * j
);
2324 kfree(mem_arr_orig
);
2327 mem_descr
->num_elements
= j
;
2328 while ((i
) || (j
)) {
2329 for (j
= mem_descr
->num_elements
; j
> 0; j
--) {
2330 pci_free_consistent(phba
->pcidev
,
2331 mem_descr
->mem_array
[j
- 1].size
,
2332 mem_descr
->mem_array
[j
- 1].
2334 (unsigned long)mem_descr
->
2336 bus_address
.u
.a64
.address
);
2340 kfree(mem_descr
->mem_array
);
2344 kfree(mem_arr_orig
);
2345 kfree(phba
->init_mem
);
2346 kfree(phba
->phwi_ctrlr
);
2350 static int beiscsi_get_memory(struct beiscsi_hba
*phba
)
2352 beiscsi_find_mem_req(phba
);
2353 return beiscsi_alloc_mem(phba
);
2356 static void iscsi_init_global_templates(struct beiscsi_hba
*phba
)
2358 struct pdu_data_out
*pdata_out
;
2359 struct pdu_nop_out
*pnop_out
;
2360 struct be_mem_descriptor
*mem_descr
;
2362 mem_descr
= phba
->init_mem
;
2363 mem_descr
+= ISCSI_MEM_GLOBAL_HEADER
;
2365 (struct pdu_data_out
*)mem_descr
->mem_array
[0].virtual_address
;
2366 memset(pdata_out
, 0, BE_ISCSI_PDU_HEADER_SIZE
);
2368 AMAP_SET_BITS(struct amap_pdu_data_out
, opcode
, pdata_out
,
2372 (struct pdu_nop_out
*)((unsigned char *)mem_descr
->mem_array
[0].
2373 virtual_address
+ BE_ISCSI_PDU_HEADER_SIZE
);
2375 memset(pnop_out
, 0, BE_ISCSI_PDU_HEADER_SIZE
);
2376 AMAP_SET_BITS(struct amap_pdu_nop_out
, ttt
, pnop_out
, 0xFFFFFFFF);
2377 AMAP_SET_BITS(struct amap_pdu_nop_out
, f_bit
, pnop_out
, 1);
2378 AMAP_SET_BITS(struct amap_pdu_nop_out
, i_bit
, pnop_out
, 0);
2381 static void beiscsi_init_wrb_handle(struct beiscsi_hba
*phba
)
2383 struct be_mem_descriptor
*mem_descr_wrbh
, *mem_descr_wrb
;
2384 struct wrb_handle
*pwrb_handle
;
2385 struct hwi_controller
*phwi_ctrlr
;
2386 struct hwi_wrb_context
*pwrb_context
;
2387 struct iscsi_wrb
*pwrb
;
2388 unsigned int num_cxn_wrbh
;
2389 unsigned int num_cxn_wrb
, j
, idx
, index
;
2391 mem_descr_wrbh
= phba
->init_mem
;
2392 mem_descr_wrbh
+= HWI_MEM_WRBH
;
2394 mem_descr_wrb
= phba
->init_mem
;
2395 mem_descr_wrb
+= HWI_MEM_WRB
;
2398 pwrb_handle
= mem_descr_wrbh
->mem_array
[idx
].virtual_address
;
2399 num_cxn_wrbh
= ((mem_descr_wrbh
->mem_array
[idx
].size
) /
2400 ((sizeof(struct wrb_handle
)) *
2401 phba
->params
.wrbs_per_cxn
));
2402 phwi_ctrlr
= phba
->phwi_ctrlr
;
2404 for (index
= 0; index
< phba
->params
.cxns_per_ctrl
* 2; index
+= 2) {
2405 pwrb_context
= &phwi_ctrlr
->wrb_context
[index
];
2406 pwrb_context
->pwrb_handle_base
=
2407 kzalloc(sizeof(struct wrb_handle
*) *
2408 phba
->params
.wrbs_per_cxn
, GFP_KERNEL
);
2409 pwrb_context
->pwrb_handle_basestd
=
2410 kzalloc(sizeof(struct wrb_handle
*) *
2411 phba
->params
.wrbs_per_cxn
, GFP_KERNEL
);
2413 pwrb_context
->alloc_index
= 0;
2414 pwrb_context
->wrb_handles_available
= 0;
2415 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
2416 pwrb_context
->pwrb_handle_base
[j
] = pwrb_handle
;
2417 pwrb_context
->pwrb_handle_basestd
[j
] =
2419 pwrb_context
->wrb_handles_available
++;
2420 pwrb_handle
->wrb_index
= j
;
2423 pwrb_context
->free_index
= 0;
2428 mem_descr_wrbh
->mem_array
[idx
].virtual_address
;
2430 ((mem_descr_wrbh
->mem_array
[idx
].size
) /
2431 ((sizeof(struct wrb_handle
)) *
2432 phba
->params
.wrbs_per_cxn
));
2433 pwrb_context
->alloc_index
= 0;
2434 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
2435 pwrb_context
->pwrb_handle_base
[j
] = pwrb_handle
;
2436 pwrb_context
->pwrb_handle_basestd
[j
] =
2438 pwrb_context
->wrb_handles_available
++;
2439 pwrb_handle
->wrb_index
= j
;
2442 pwrb_context
->free_index
= 0;
2447 pwrb
= mem_descr_wrb
->mem_array
[idx
].virtual_address
;
2448 num_cxn_wrb
= (mem_descr_wrb
->mem_array
[idx
].size
) /
2449 ((sizeof(struct iscsi_wrb
) *
2450 phba
->params
.wrbs_per_cxn
));
2451 for (index
= 0; index
< phba
->params
.cxns_per_ctrl
* 2; index
+= 2) {
2452 pwrb_context
= &phwi_ctrlr
->wrb_context
[index
];
2454 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
2455 pwrb_handle
= pwrb_context
->pwrb_handle_base
[j
];
2456 pwrb_handle
->pwrb
= pwrb
;
2462 pwrb
= mem_descr_wrb
->mem_array
[idx
].virtual_address
;
2463 num_cxn_wrb
= (mem_descr_wrb
->mem_array
[idx
].size
) /
2464 ((sizeof(struct iscsi_wrb
) *
2465 phba
->params
.wrbs_per_cxn
));
2466 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
2467 pwrb_handle
= pwrb_context
->pwrb_handle_base
[j
];
2468 pwrb_handle
->pwrb
= pwrb
;
2476 static void hwi_init_async_pdu_ctx(struct beiscsi_hba
*phba
)
2478 struct hwi_controller
*phwi_ctrlr
;
2479 struct hba_parameters
*p
= &phba
->params
;
2480 struct hwi_async_pdu_context
*pasync_ctx
;
2481 struct async_pdu_handle
*pasync_header_h
, *pasync_data_h
;
2483 struct be_mem_descriptor
*mem_descr
;
2485 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2486 mem_descr
+= HWI_MEM_ASYNC_PDU_CONTEXT
;
2488 phwi_ctrlr
= phba
->phwi_ctrlr
;
2489 phwi_ctrlr
->phwi_ctxt
->pasync_ctx
= (struct hwi_async_pdu_context
*)
2490 mem_descr
->mem_array
[0].virtual_address
;
2491 pasync_ctx
= phwi_ctrlr
->phwi_ctxt
->pasync_ctx
;
2492 memset(pasync_ctx
, 0, sizeof(*pasync_ctx
));
2494 pasync_ctx
->async_header
.num_entries
= p
->asyncpdus_per_ctrl
;
2495 pasync_ctx
->async_header
.buffer_size
= p
->defpdu_hdr_sz
;
2496 pasync_ctx
->async_data
.buffer_size
= p
->defpdu_data_sz
;
2497 pasync_ctx
->async_data
.num_entries
= p
->asyncpdus_per_ctrl
;
2499 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2500 mem_descr
+= HWI_MEM_ASYNC_HEADER_BUF
;
2501 if (mem_descr
->mem_array
[0].virtual_address
) {
2503 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2504 "va=%p\n", mem_descr
->mem_array
[0].virtual_address
);
2506 shost_printk(KERN_WARNING
, phba
->shost
,
2507 "No Virtual address\n");
2509 pasync_ctx
->async_header
.va_base
=
2510 mem_descr
->mem_array
[0].virtual_address
;
2512 pasync_ctx
->async_header
.pa_base
.u
.a64
.address
=
2513 mem_descr
->mem_array
[0].bus_address
.u
.a64
.address
;
2515 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2516 mem_descr
+= HWI_MEM_ASYNC_HEADER_RING
;
2517 if (mem_descr
->mem_array
[0].virtual_address
) {
2519 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2520 "va=%p\n", mem_descr
->mem_array
[0].virtual_address
);
2522 shost_printk(KERN_WARNING
, phba
->shost
,
2523 "No Virtual address\n");
2524 pasync_ctx
->async_header
.ring_base
=
2525 mem_descr
->mem_array
[0].virtual_address
;
2527 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2528 mem_descr
+= HWI_MEM_ASYNC_HEADER_HANDLE
;
2529 if (mem_descr
->mem_array
[0].virtual_address
) {
2531 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2532 "va=%p\n", mem_descr
->mem_array
[0].virtual_address
);
2534 shost_printk(KERN_WARNING
, phba
->shost
,
2535 "No Virtual address\n");
2537 pasync_ctx
->async_header
.handle_base
=
2538 mem_descr
->mem_array
[0].virtual_address
;
2539 pasync_ctx
->async_header
.writables
= 0;
2540 INIT_LIST_HEAD(&pasync_ctx
->async_header
.free_list
);
2542 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2543 mem_descr
+= HWI_MEM_ASYNC_DATA_BUF
;
2544 if (mem_descr
->mem_array
[0].virtual_address
) {
2546 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2547 "va=%p\n", mem_descr
->mem_array
[0].virtual_address
);
2549 shost_printk(KERN_WARNING
, phba
->shost
,
2550 "No Virtual address\n");
2551 pasync_ctx
->async_data
.va_base
=
2552 mem_descr
->mem_array
[0].virtual_address
;
2553 pasync_ctx
->async_data
.pa_base
.u
.a64
.address
=
2554 mem_descr
->mem_array
[0].bus_address
.u
.a64
.address
;
2556 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2557 mem_descr
+= HWI_MEM_ASYNC_DATA_RING
;
2558 if (mem_descr
->mem_array
[0].virtual_address
) {
2560 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2561 "va=%p\n", mem_descr
->mem_array
[0].virtual_address
);
2563 shost_printk(KERN_WARNING
, phba
->shost
,
2564 "No Virtual address\n");
2566 pasync_ctx
->async_data
.ring_base
=
2567 mem_descr
->mem_array
[0].virtual_address
;
2569 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2570 mem_descr
+= HWI_MEM_ASYNC_DATA_HANDLE
;
2571 if (!mem_descr
->mem_array
[0].virtual_address
)
2572 shost_printk(KERN_WARNING
, phba
->shost
,
2573 "No Virtual address\n");
2575 pasync_ctx
->async_data
.handle_base
=
2576 mem_descr
->mem_array
[0].virtual_address
;
2577 pasync_ctx
->async_data
.writables
= 0;
2578 INIT_LIST_HEAD(&pasync_ctx
->async_data
.free_list
);
2581 (struct async_pdu_handle
*)pasync_ctx
->async_header
.handle_base
;
2583 (struct async_pdu_handle
*)pasync_ctx
->async_data
.handle_base
;
2585 for (index
= 0; index
< p
->asyncpdus_per_ctrl
; index
++) {
2586 pasync_header_h
->cri
= -1;
2587 pasync_header_h
->index
= (char)index
;
2588 INIT_LIST_HEAD(&pasync_header_h
->link
);
2589 pasync_header_h
->pbuffer
=
2590 (void *)((unsigned long)
2591 (pasync_ctx
->async_header
.va_base
) +
2592 (p
->defpdu_hdr_sz
* index
));
2594 pasync_header_h
->pa
.u
.a64
.address
=
2595 pasync_ctx
->async_header
.pa_base
.u
.a64
.address
+
2596 (p
->defpdu_hdr_sz
* index
);
2598 list_add_tail(&pasync_header_h
->link
,
2599 &pasync_ctx
->async_header
.free_list
);
2601 pasync_ctx
->async_header
.free_entries
++;
2602 pasync_ctx
->async_header
.writables
++;
2604 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[index
].wait_queue
.list
);
2605 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[index
].
2607 pasync_data_h
->cri
= -1;
2608 pasync_data_h
->index
= (char)index
;
2609 INIT_LIST_HEAD(&pasync_data_h
->link
);
2610 pasync_data_h
->pbuffer
=
2611 (void *)((unsigned long)
2612 (pasync_ctx
->async_data
.va_base
) +
2613 (p
->defpdu_data_sz
* index
));
2615 pasync_data_h
->pa
.u
.a64
.address
=
2616 pasync_ctx
->async_data
.pa_base
.u
.a64
.address
+
2617 (p
->defpdu_data_sz
* index
);
2619 list_add_tail(&pasync_data_h
->link
,
2620 &pasync_ctx
->async_data
.free_list
);
2622 pasync_ctx
->async_data
.free_entries
++;
2623 pasync_ctx
->async_data
.writables
++;
2625 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[index
].data_busy_list
);
2628 pasync_ctx
->async_header
.host_write_ptr
= 0;
2629 pasync_ctx
->async_header
.ep_read_ptr
= -1;
2630 pasync_ctx
->async_data
.host_write_ptr
= 0;
2631 pasync_ctx
->async_data
.ep_read_ptr
= -1;
2635 be_sgl_create_contiguous(void *virtual_address
,
2636 u64 physical_address
, u32 length
,
2637 struct be_dma_mem
*sgl
)
2639 WARN_ON(!virtual_address
);
2640 WARN_ON(!physical_address
);
2641 WARN_ON(!length
> 0);
2644 sgl
->va
= virtual_address
;
2645 sgl
->dma
= (unsigned long)physical_address
;
2651 static void be_sgl_destroy_contiguous(struct be_dma_mem
*sgl
)
2653 memset(sgl
, 0, sizeof(*sgl
));
2657 hwi_build_be_sgl_arr(struct beiscsi_hba
*phba
,
2658 struct mem_array
*pmem
, struct be_dma_mem
*sgl
)
2661 be_sgl_destroy_contiguous(sgl
);
2663 be_sgl_create_contiguous(pmem
->virtual_address
,
2664 pmem
->bus_address
.u
.a64
.address
,
2669 hwi_build_be_sgl_by_offset(struct beiscsi_hba
*phba
,
2670 struct mem_array
*pmem
, struct be_dma_mem
*sgl
)
2673 be_sgl_destroy_contiguous(sgl
);
2675 be_sgl_create_contiguous((unsigned char *)pmem
->virtual_address
,
2676 pmem
->bus_address
.u
.a64
.address
,
2680 static int be_fill_queue(struct be_queue_info
*q
,
2681 u16 len
, u16 entry_size
, void *vaddress
)
2683 struct be_dma_mem
*mem
= &q
->dma_mem
;
2685 memset(q
, 0, sizeof(*q
));
2687 q
->entry_size
= entry_size
;
2688 mem
->size
= len
* entry_size
;
2692 memset(mem
->va
, 0, mem
->size
);
2696 static int beiscsi_create_eqs(struct beiscsi_hba
*phba
,
2697 struct hwi_context_memory
*phwi_context
)
2699 unsigned int i
, num_eq_pages
;
2700 int ret
, eq_for_mcc
;
2701 struct be_queue_info
*eq
;
2702 struct be_dma_mem
*mem
;
2706 num_eq_pages
= PAGES_REQUIRED(phba
->params
.num_eq_entries
* \
2707 sizeof(struct be_eq_entry
));
2709 if (phba
->msix_enabled
)
2713 for (i
= 0; i
< (phba
->num_cpus
+ eq_for_mcc
); i
++) {
2714 eq
= &phwi_context
->be_eq
[i
].q
;
2716 phwi_context
->be_eq
[i
].phba
= phba
;
2717 eq_vaddress
= pci_alloc_consistent(phba
->pcidev
,
2718 num_eq_pages
* PAGE_SIZE
,
2721 goto create_eq_error
;
2723 mem
->va
= eq_vaddress
;
2724 ret
= be_fill_queue(eq
, phba
->params
.num_eq_entries
,
2725 sizeof(struct be_eq_entry
), eq_vaddress
);
2727 shost_printk(KERN_ERR
, phba
->shost
,
2728 "be_fill_queue Failed for EQ\n");
2729 goto create_eq_error
;
2733 ret
= beiscsi_cmd_eq_create(&phba
->ctrl
, eq
,
2734 phwi_context
->cur_eqd
);
2736 shost_printk(KERN_ERR
, phba
->shost
,
2737 "beiscsi_cmd_eq_create"
2739 goto create_eq_error
;
2741 SE_DEBUG(DBG_LVL_8
, "eqid = %d\n", phwi_context
->be_eq
[i
].q
.id
);
2745 for (i
= 0; i
< (phba
->num_cpus
+ 1); i
++) {
2746 eq
= &phwi_context
->be_eq
[i
].q
;
2749 pci_free_consistent(phba
->pcidev
, num_eq_pages
2756 static int beiscsi_create_cqs(struct beiscsi_hba
*phba
,
2757 struct hwi_context_memory
*phwi_context
)
2759 unsigned int i
, num_cq_pages
;
2761 struct be_queue_info
*cq
, *eq
;
2762 struct be_dma_mem
*mem
;
2763 struct be_eq_obj
*pbe_eq
;
2767 num_cq_pages
= PAGES_REQUIRED(phba
->params
.num_cq_entries
* \
2768 sizeof(struct sol_cqe
));
2770 for (i
= 0; i
< phba
->num_cpus
; i
++) {
2771 cq
= &phwi_context
->be_cq
[i
];
2772 eq
= &phwi_context
->be_eq
[i
].q
;
2773 pbe_eq
= &phwi_context
->be_eq
[i
];
2775 pbe_eq
->phba
= phba
;
2777 cq_vaddress
= pci_alloc_consistent(phba
->pcidev
,
2778 num_cq_pages
* PAGE_SIZE
,
2781 goto create_cq_error
;
2782 ret
= be_fill_queue(cq
, phba
->params
.num_cq_entries
,
2783 sizeof(struct sol_cqe
), cq_vaddress
);
2785 shost_printk(KERN_ERR
, phba
->shost
,
2786 "be_fill_queue Failed for ISCSI CQ\n");
2787 goto create_cq_error
;
2791 ret
= beiscsi_cmd_cq_create(&phba
->ctrl
, cq
, eq
, false,
2794 shost_printk(KERN_ERR
, phba
->shost
,
2795 "beiscsi_cmd_eq_create"
2796 "Failed for ISCSI CQ\n");
2797 goto create_cq_error
;
2799 SE_DEBUG(DBG_LVL_8
, "iscsi cq_id is %d for eq_id %d\n",
2801 SE_DEBUG(DBG_LVL_8
, "ISCSI CQ CREATED\n");
2806 for (i
= 0; i
< phba
->num_cpus
; i
++) {
2807 cq
= &phwi_context
->be_cq
[i
];
2810 pci_free_consistent(phba
->pcidev
, num_cq_pages
2819 beiscsi_create_def_hdr(struct beiscsi_hba
*phba
,
2820 struct hwi_context_memory
*phwi_context
,
2821 struct hwi_controller
*phwi_ctrlr
,
2822 unsigned int def_pdu_ring_sz
)
2826 struct be_queue_info
*dq
, *cq
;
2827 struct be_dma_mem
*mem
;
2828 struct be_mem_descriptor
*mem_descr
;
2832 dq
= &phwi_context
->be_def_hdrq
;
2833 cq
= &phwi_context
->be_cq
[0];
2835 mem_descr
= phba
->init_mem
;
2836 mem_descr
+= HWI_MEM_ASYNC_HEADER_RING
;
2837 dq_vaddress
= mem_descr
->mem_array
[idx
].virtual_address
;
2838 ret
= be_fill_queue(dq
, mem_descr
->mem_array
[0].size
/
2839 sizeof(struct phys_addr
),
2840 sizeof(struct phys_addr
), dq_vaddress
);
2842 shost_printk(KERN_ERR
, phba
->shost
,
2843 "be_fill_queue Failed for DEF PDU HDR\n");
2846 mem
->dma
= (unsigned long)mem_descr
->mem_array
[idx
].
2847 bus_address
.u
.a64
.address
;
2848 ret
= be_cmd_create_default_pdu_queue(&phba
->ctrl
, cq
, dq
,
2850 phba
->params
.defpdu_hdr_sz
);
2852 shost_printk(KERN_ERR
, phba
->shost
,
2853 "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2856 phwi_ctrlr
->default_pdu_hdr
.id
= phwi_context
->be_def_hdrq
.id
;
2857 SE_DEBUG(DBG_LVL_8
, "iscsi def pdu id is %d\n",
2858 phwi_context
->be_def_hdrq
.id
);
2859 hwi_post_async_buffers(phba
, 1);
2864 beiscsi_create_def_data(struct beiscsi_hba
*phba
,
2865 struct hwi_context_memory
*phwi_context
,
2866 struct hwi_controller
*phwi_ctrlr
,
2867 unsigned int def_pdu_ring_sz
)
2871 struct be_queue_info
*dataq
, *cq
;
2872 struct be_dma_mem
*mem
;
2873 struct be_mem_descriptor
*mem_descr
;
2877 dataq
= &phwi_context
->be_def_dataq
;
2878 cq
= &phwi_context
->be_cq
[0];
2879 mem
= &dataq
->dma_mem
;
2880 mem_descr
= phba
->init_mem
;
2881 mem_descr
+= HWI_MEM_ASYNC_DATA_RING
;
2882 dq_vaddress
= mem_descr
->mem_array
[idx
].virtual_address
;
2883 ret
= be_fill_queue(dataq
, mem_descr
->mem_array
[0].size
/
2884 sizeof(struct phys_addr
),
2885 sizeof(struct phys_addr
), dq_vaddress
);
2887 shost_printk(KERN_ERR
, phba
->shost
,
2888 "be_fill_queue Failed for DEF PDU DATA\n");
2891 mem
->dma
= (unsigned long)mem_descr
->mem_array
[idx
].
2892 bus_address
.u
.a64
.address
;
2893 ret
= be_cmd_create_default_pdu_queue(&phba
->ctrl
, cq
, dataq
,
2895 phba
->params
.defpdu_data_sz
);
2897 shost_printk(KERN_ERR
, phba
->shost
,
2898 "be_cmd_create_default_pdu_queue Failed"
2899 " for DEF PDU DATA\n");
2902 phwi_ctrlr
->default_pdu_data
.id
= phwi_context
->be_def_dataq
.id
;
2903 SE_DEBUG(DBG_LVL_8
, "iscsi def data id is %d\n",
2904 phwi_context
->be_def_dataq
.id
);
2905 hwi_post_async_buffers(phba
, 0);
2906 SE_DEBUG(DBG_LVL_8
, "DEFAULT PDU DATA RING CREATED\n");
2911 beiscsi_post_pages(struct beiscsi_hba
*phba
)
2913 struct be_mem_descriptor
*mem_descr
;
2914 struct mem_array
*pm_arr
;
2915 unsigned int page_offset
, i
;
2916 struct be_dma_mem sgl
;
2919 mem_descr
= phba
->init_mem
;
2920 mem_descr
+= HWI_MEM_SGE
;
2921 pm_arr
= mem_descr
->mem_array
;
2923 page_offset
= (sizeof(struct iscsi_sge
) * phba
->params
.num_sge_per_io
*
2924 phba
->fw_config
.iscsi_icd_start
) / PAGE_SIZE
;
2925 for (i
= 0; i
< mem_descr
->num_elements
; i
++) {
2926 hwi_build_be_sgl_arr(phba
, pm_arr
, &sgl
);
2927 status
= be_cmd_iscsi_post_sgl_pages(&phba
->ctrl
, &sgl
,
2929 (pm_arr
->size
/ PAGE_SIZE
));
2930 page_offset
+= pm_arr
->size
/ PAGE_SIZE
;
2932 shost_printk(KERN_ERR
, phba
->shost
,
2933 "post sgl failed.\n");
2938 SE_DEBUG(DBG_LVL_8
, "POSTED PAGES\n");
2942 static void be_queue_free(struct beiscsi_hba
*phba
, struct be_queue_info
*q
)
2944 struct be_dma_mem
*mem
= &q
->dma_mem
;
2946 pci_free_consistent(phba
->pcidev
, mem
->size
,
2950 static int be_queue_alloc(struct beiscsi_hba
*phba
, struct be_queue_info
*q
,
2951 u16 len
, u16 entry_size
)
2953 struct be_dma_mem
*mem
= &q
->dma_mem
;
2955 memset(q
, 0, sizeof(*q
));
2957 q
->entry_size
= entry_size
;
2958 mem
->size
= len
* entry_size
;
2959 mem
->va
= pci_alloc_consistent(phba
->pcidev
, mem
->size
, &mem
->dma
);
2962 memset(mem
->va
, 0, mem
->size
);
2967 beiscsi_create_wrb_rings(struct beiscsi_hba
*phba
,
2968 struct hwi_context_memory
*phwi_context
,
2969 struct hwi_controller
*phwi_ctrlr
)
2971 unsigned int wrb_mem_index
, offset
, size
, num_wrb_rings
;
2973 unsigned int idx
, num
, i
;
2974 struct mem_array
*pwrb_arr
;
2976 struct be_dma_mem sgl
;
2977 struct be_mem_descriptor
*mem_descr
;
2981 mem_descr
= phba
->init_mem
;
2982 mem_descr
+= HWI_MEM_WRB
;
2983 pwrb_arr
= kmalloc(sizeof(*pwrb_arr
) * phba
->params
.cxns_per_ctrl
,
2986 shost_printk(KERN_ERR
, phba
->shost
,
2987 "Memory alloc failed in create wrb ring.\n");
2990 wrb_vaddr
= mem_descr
->mem_array
[idx
].virtual_address
;
2991 pa_addr_lo
= mem_descr
->mem_array
[idx
].bus_address
.u
.a64
.address
;
2992 num_wrb_rings
= mem_descr
->mem_array
[idx
].size
/
2993 (phba
->params
.wrbs_per_cxn
* sizeof(struct iscsi_wrb
));
2995 for (num
= 0; num
< phba
->params
.cxns_per_ctrl
; num
++) {
2996 if (num_wrb_rings
) {
2997 pwrb_arr
[num
].virtual_address
= wrb_vaddr
;
2998 pwrb_arr
[num
].bus_address
.u
.a64
.address
= pa_addr_lo
;
2999 pwrb_arr
[num
].size
= phba
->params
.wrbs_per_cxn
*
3000 sizeof(struct iscsi_wrb
);
3001 wrb_vaddr
+= pwrb_arr
[num
].size
;
3002 pa_addr_lo
+= pwrb_arr
[num
].size
;
3006 wrb_vaddr
= mem_descr
->mem_array
[idx
].virtual_address
;
3007 pa_addr_lo
= mem_descr
->mem_array
[idx
].\
3008 bus_address
.u
.a64
.address
;
3009 num_wrb_rings
= mem_descr
->mem_array
[idx
].size
/
3010 (phba
->params
.wrbs_per_cxn
*
3011 sizeof(struct iscsi_wrb
));
3012 pwrb_arr
[num
].virtual_address
= wrb_vaddr
;
3013 pwrb_arr
[num
].bus_address
.u
.a64
.address\
3015 pwrb_arr
[num
].size
= phba
->params
.wrbs_per_cxn
*
3016 sizeof(struct iscsi_wrb
);
3017 wrb_vaddr
+= pwrb_arr
[num
].size
;
3018 pa_addr_lo
+= pwrb_arr
[num
].size
;
3022 for (i
= 0; i
< phba
->params
.cxns_per_ctrl
; i
++) {
3027 hwi_build_be_sgl_by_offset(phba
, &pwrb_arr
[i
], &sgl
);
3028 status
= be_cmd_wrbq_create(&phba
->ctrl
, &sgl
,
3029 &phwi_context
->be_wrbq
[i
]);
3031 shost_printk(KERN_ERR
, phba
->shost
,
3032 "wrbq create failed.");
3036 phwi_ctrlr
->wrb_context
[i
* 2].cid
= phwi_context
->be_wrbq
[i
].
3043 static void free_wrb_handles(struct beiscsi_hba
*phba
)
3046 struct hwi_controller
*phwi_ctrlr
;
3047 struct hwi_wrb_context
*pwrb_context
;
3049 phwi_ctrlr
= phba
->phwi_ctrlr
;
3050 for (index
= 0; index
< phba
->params
.cxns_per_ctrl
* 2; index
+= 2) {
3051 pwrb_context
= &phwi_ctrlr
->wrb_context
[index
];
3052 kfree(pwrb_context
->pwrb_handle_base
);
3053 kfree(pwrb_context
->pwrb_handle_basestd
);
3057 static void be_mcc_queues_destroy(struct beiscsi_hba
*phba
)
3059 struct be_queue_info
*q
;
3060 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
3062 q
= &phba
->ctrl
.mcc_obj
.q
;
3064 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_MCCQ
);
3065 be_queue_free(phba
, q
);
3067 q
= &phba
->ctrl
.mcc_obj
.cq
;
3069 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_CQ
);
3070 be_queue_free(phba
, q
);
3073 static void hwi_cleanup(struct beiscsi_hba
*phba
)
3075 struct be_queue_info
*q
;
3076 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
3077 struct hwi_controller
*phwi_ctrlr
;
3078 struct hwi_context_memory
*phwi_context
;
3081 phwi_ctrlr
= phba
->phwi_ctrlr
;
3082 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
3083 for (i
= 0; i
< phba
->params
.cxns_per_ctrl
; i
++) {
3084 q
= &phwi_context
->be_wrbq
[i
];
3086 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_WRBQ
);
3088 free_wrb_handles(phba
);
3090 q
= &phwi_context
->be_def_hdrq
;
3092 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_DPDUQ
);
3094 q
= &phwi_context
->be_def_dataq
;
3096 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_DPDUQ
);
3098 beiscsi_cmd_q_destroy(ctrl
, NULL
, QTYPE_SGL
);
3100 for (i
= 0; i
< (phba
->num_cpus
); i
++) {
3101 q
= &phwi_context
->be_cq
[i
];
3103 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_CQ
);
3105 if (phba
->msix_enabled
)
3109 for (i
= 0; i
< (phba
->num_cpus
+ eq_num
); i
++) {
3110 q
= &phwi_context
->be_eq
[i
].q
;
3112 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_EQ
);
3114 be_mcc_queues_destroy(phba
);
3117 static int be_mcc_queues_create(struct beiscsi_hba
*phba
,
3118 struct hwi_context_memory
*phwi_context
)
3120 struct be_queue_info
*q
, *cq
;
3121 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
3123 /* Alloc MCC compl queue */
3124 cq
= &phba
->ctrl
.mcc_obj
.cq
;
3125 if (be_queue_alloc(phba
, cq
, MCC_CQ_LEN
,
3126 sizeof(struct be_mcc_compl
)))
3128 /* Ask BE to create MCC compl queue; */
3129 if (phba
->msix_enabled
) {
3130 if (beiscsi_cmd_cq_create(ctrl
, cq
, &phwi_context
->be_eq
3131 [phba
->num_cpus
].q
, false, true, 0))
3134 if (beiscsi_cmd_cq_create(ctrl
, cq
, &phwi_context
->be_eq
[0].q
,
3139 /* Alloc MCC queue */
3140 q
= &phba
->ctrl
.mcc_obj
.q
;
3141 if (be_queue_alloc(phba
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
3142 goto mcc_cq_destroy
;
3144 /* Ask BE to create MCC queue */
3145 if (beiscsi_cmd_mccq_create(phba
, q
, cq
))
3151 be_queue_free(phba
, q
);
3153 beiscsi_cmd_q_destroy(ctrl
, cq
, QTYPE_CQ
);
3155 be_queue_free(phba
, cq
);
3160 static int find_num_cpus(void)
3164 num_cpus
= num_online_cpus();
3165 if (num_cpus
>= MAX_CPUS
)
3166 num_cpus
= MAX_CPUS
- 1;
3168 SE_DEBUG(DBG_LVL_8
, "num_cpus = %d\n", num_cpus
);
3172 static int hwi_init_port(struct beiscsi_hba
*phba
)
3174 struct hwi_controller
*phwi_ctrlr
;
3175 struct hwi_context_memory
*phwi_context
;
3176 unsigned int def_pdu_ring_sz
;
3177 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
3181 phba
->params
.asyncpdus_per_ctrl
* sizeof(struct phys_addr
);
3182 phwi_ctrlr
= phba
->phwi_ctrlr
;
3183 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
3184 phwi_context
->max_eqd
= 0;
3185 phwi_context
->min_eqd
= 0;
3186 phwi_context
->cur_eqd
= 64;
3187 be_cmd_fw_initialize(&phba
->ctrl
);
3189 status
= beiscsi_create_eqs(phba
, phwi_context
);
3191 shost_printk(KERN_ERR
, phba
->shost
, "EQ not created\n");
3195 status
= be_mcc_queues_create(phba
, phwi_context
);
3199 status
= mgmt_check_supported_fw(ctrl
, phba
);
3201 shost_printk(KERN_ERR
, phba
->shost
,
3202 "Unsupported fw version\n");
3206 status
= beiscsi_create_cqs(phba
, phwi_context
);
3208 shost_printk(KERN_ERR
, phba
->shost
, "CQ not created\n");
3212 status
= beiscsi_create_def_hdr(phba
, phwi_context
, phwi_ctrlr
,
3215 shost_printk(KERN_ERR
, phba
->shost
,
3216 "Default Header not created\n");
3220 status
= beiscsi_create_def_data(phba
, phwi_context
,
3221 phwi_ctrlr
, def_pdu_ring_sz
);
3223 shost_printk(KERN_ERR
, phba
->shost
,
3224 "Default Data not created\n");
3228 status
= beiscsi_post_pages(phba
);
3230 shost_printk(KERN_ERR
, phba
->shost
, "Post SGL Pages Failed\n");
3234 status
= beiscsi_create_wrb_rings(phba
, phwi_context
, phwi_ctrlr
);
3236 shost_printk(KERN_ERR
, phba
->shost
,
3237 "WRB Rings not created\n");
3241 SE_DEBUG(DBG_LVL_8
, "hwi_init_port success\n");
3245 shost_printk(KERN_ERR
, phba
->shost
, "hwi_init_port failed");
3250 static int hwi_init_controller(struct beiscsi_hba
*phba
)
3252 struct hwi_controller
*phwi_ctrlr
;
3254 phwi_ctrlr
= phba
->phwi_ctrlr
;
3255 if (1 == phba
->init_mem
[HWI_MEM_ADDN_CONTEXT
].num_elements
) {
3256 phwi_ctrlr
->phwi_ctxt
= (struct hwi_context_memory
*)phba
->
3257 init_mem
[HWI_MEM_ADDN_CONTEXT
].mem_array
[0].virtual_address
;
3258 SE_DEBUG(DBG_LVL_8
, " phwi_ctrlr->phwi_ctxt=%p\n",
3259 phwi_ctrlr
->phwi_ctxt
);
3261 shost_printk(KERN_ERR
, phba
->shost
,
3262 "HWI_MEM_ADDN_CONTEXT is more than one element."
3263 "Failing to load\n");
3267 iscsi_init_global_templates(phba
);
3268 beiscsi_init_wrb_handle(phba
);
3269 hwi_init_async_pdu_ctx(phba
);
3270 if (hwi_init_port(phba
) != 0) {
3271 shost_printk(KERN_ERR
, phba
->shost
,
3272 "hwi_init_controller failed\n");
3278 static void beiscsi_free_mem(struct beiscsi_hba
*phba
)
3280 struct be_mem_descriptor
*mem_descr
;
3283 mem_descr
= phba
->init_mem
;
3286 for (i
= 0; i
< SE_MEM_MAX
; i
++) {
3287 for (j
= mem_descr
->num_elements
; j
> 0; j
--) {
3288 pci_free_consistent(phba
->pcidev
,
3289 mem_descr
->mem_array
[j
- 1].size
,
3290 mem_descr
->mem_array
[j
- 1].virtual_address
,
3291 (unsigned long)mem_descr
->mem_array
[j
- 1].
3292 bus_address
.u
.a64
.address
);
3294 kfree(mem_descr
->mem_array
);
3297 kfree(phba
->init_mem
);
3298 kfree(phba
->phwi_ctrlr
);
3301 static int beiscsi_init_controller(struct beiscsi_hba
*phba
)
3305 ret
= beiscsi_get_memory(phba
);
3307 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe -"
3308 "Failed in beiscsi_alloc_memory\n");
3312 ret
= hwi_init_controller(phba
);
3315 SE_DEBUG(DBG_LVL_8
, "Return success from beiscsi_init_controller");
3319 beiscsi_free_mem(phba
);
3323 static int beiscsi_init_sgl_handle(struct beiscsi_hba
*phba
)
3325 struct be_mem_descriptor
*mem_descr_sglh
, *mem_descr_sg
;
3326 struct sgl_handle
*psgl_handle
;
3327 struct iscsi_sge
*pfrag
;
3328 unsigned int arr_index
, i
, idx
;
3330 phba
->io_sgl_hndl_avbl
= 0;
3331 phba
->eh_sgl_hndl_avbl
= 0;
3333 mem_descr_sglh
= phba
->init_mem
;
3334 mem_descr_sglh
+= HWI_MEM_SGLH
;
3335 if (1 == mem_descr_sglh
->num_elements
) {
3336 phba
->io_sgl_hndl_base
= kzalloc(sizeof(struct sgl_handle
*) *
3337 phba
->params
.ios_per_ctrl
,
3339 if (!phba
->io_sgl_hndl_base
) {
3340 shost_printk(KERN_ERR
, phba
->shost
,
3341 "Mem Alloc Failed. Failing to load\n");
3344 phba
->eh_sgl_hndl_base
= kzalloc(sizeof(struct sgl_handle
*) *
3345 (phba
->params
.icds_per_ctrl
-
3346 phba
->params
.ios_per_ctrl
),
3348 if (!phba
->eh_sgl_hndl_base
) {
3349 kfree(phba
->io_sgl_hndl_base
);
3350 shost_printk(KERN_ERR
, phba
->shost
,
3351 "Mem Alloc Failed. Failing to load\n");
3355 shost_printk(KERN_ERR
, phba
->shost
,
3356 "HWI_MEM_SGLH is more than one element."
3357 "Failing to load\n");
3363 while (idx
< mem_descr_sglh
->num_elements
) {
3364 psgl_handle
= mem_descr_sglh
->mem_array
[idx
].virtual_address
;
3366 for (i
= 0; i
< (mem_descr_sglh
->mem_array
[idx
].size
/
3367 sizeof(struct sgl_handle
)); i
++) {
3368 if (arr_index
< phba
->params
.ios_per_ctrl
) {
3369 phba
->io_sgl_hndl_base
[arr_index
] = psgl_handle
;
3370 phba
->io_sgl_hndl_avbl
++;
3373 phba
->eh_sgl_hndl_base
[arr_index
-
3374 phba
->params
.ios_per_ctrl
] =
3377 phba
->eh_sgl_hndl_avbl
++;
3384 "phba->io_sgl_hndl_avbl=%d"
3385 "phba->eh_sgl_hndl_avbl=%d\n",
3386 phba
->io_sgl_hndl_avbl
,
3387 phba
->eh_sgl_hndl_avbl
);
3388 mem_descr_sg
= phba
->init_mem
;
3389 mem_descr_sg
+= HWI_MEM_SGE
;
3390 SE_DEBUG(DBG_LVL_8
, "\n mem_descr_sg->num_elements=%d\n",
3391 mem_descr_sg
->num_elements
);
3394 while (idx
< mem_descr_sg
->num_elements
) {
3395 pfrag
= mem_descr_sg
->mem_array
[idx
].virtual_address
;
3398 i
< (mem_descr_sg
->mem_array
[idx
].size
) /
3399 (sizeof(struct iscsi_sge
) * phba
->params
.num_sge_per_io
);
3401 if (arr_index
< phba
->params
.ios_per_ctrl
)
3402 psgl_handle
= phba
->io_sgl_hndl_base
[arr_index
];
3404 psgl_handle
= phba
->eh_sgl_hndl_base
[arr_index
-
3405 phba
->params
.ios_per_ctrl
];
3406 psgl_handle
->pfrag
= pfrag
;
3407 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, pfrag
, 0);
3408 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, pfrag
, 0);
3409 pfrag
+= phba
->params
.num_sge_per_io
;
3410 psgl_handle
->sgl_index
=
3411 phba
->fw_config
.iscsi_icd_start
+ arr_index
++;
3415 phba
->io_sgl_free_index
= 0;
3416 phba
->io_sgl_alloc_index
= 0;
3417 phba
->eh_sgl_free_index
= 0;
3418 phba
->eh_sgl_alloc_index
= 0;
3422 static int hba_setup_cid_tbls(struct beiscsi_hba
*phba
)
3426 phba
->cid_array
= kzalloc(sizeof(void *) * phba
->params
.cxns_per_ctrl
,
3428 if (!phba
->cid_array
) {
3429 shost_printk(KERN_ERR
, phba
->shost
,
3430 "Failed to allocate memory in "
3431 "hba_setup_cid_tbls\n");
3434 phba
->ep_array
= kzalloc(sizeof(struct iscsi_endpoint
*) *
3435 phba
->params
.cxns_per_ctrl
* 2, GFP_KERNEL
);
3436 if (!phba
->ep_array
) {
3437 shost_printk(KERN_ERR
, phba
->shost
,
3438 "Failed to allocate memory in "
3439 "hba_setup_cid_tbls\n");
3440 kfree(phba
->cid_array
);
3443 new_cid
= phba
->fw_config
.iscsi_cid_start
;
3444 for (i
= 0; i
< phba
->params
.cxns_per_ctrl
; i
++) {
3445 phba
->cid_array
[i
] = new_cid
;
3448 phba
->avlbl_cids
= phba
->params
.cxns_per_ctrl
;
3452 static void hwi_enable_intr(struct beiscsi_hba
*phba
)
3454 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
3455 struct hwi_controller
*phwi_ctrlr
;
3456 struct hwi_context_memory
*phwi_context
;
3457 struct be_queue_info
*eq
;
3462 phwi_ctrlr
= phba
->phwi_ctrlr
;
3463 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
3465 addr
= (u8 __iomem
*) ((u8 __iomem
*) ctrl
->pcicfg
+
3466 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
);
3467 reg
= ioread32(addr
);
3469 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
3471 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
3472 SE_DEBUG(DBG_LVL_8
, "reg =x%08x addr=%p\n", reg
, addr
);
3473 iowrite32(reg
, addr
);
3476 if (!phba
->msix_enabled
) {
3477 eq
= &phwi_context
->be_eq
[0].q
;
3478 SE_DEBUG(DBG_LVL_8
, "eq->id=%d\n", eq
->id
);
3479 hwi_ring_eq_db(phba
, eq
->id
, 0, 0, 1, 1);
3481 for (i
= 0; i
<= phba
->num_cpus
; i
++) {
3482 eq
= &phwi_context
->be_eq
[i
].q
;
3483 SE_DEBUG(DBG_LVL_8
, "eq->id=%d\n", eq
->id
);
3484 hwi_ring_eq_db(phba
, eq
->id
, 0, 0, 1, 1);
3489 static void hwi_disable_intr(struct beiscsi_hba
*phba
)
3491 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
3493 u8 __iomem
*addr
= ctrl
->pcicfg
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
;
3494 u32 reg
= ioread32(addr
);
3496 u32 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
3498 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
3499 iowrite32(reg
, addr
);
3501 shost_printk(KERN_WARNING
, phba
->shost
,
3502 "In hwi_disable_intr, Already Disabled\n");
3505 static int beiscsi_get_boot_info(struct beiscsi_hba
*phba
)
3507 struct be_cmd_resp_get_boot_target
*boot_resp
;
3508 struct be_cmd_resp_get_session
*session_resp
;
3509 struct be_mcc_wrb
*wrb
;
3510 struct be_dma_mem nonemb_cmd
;
3511 unsigned int tag
, wrb_num
;
3512 unsigned short status
, extd_status
;
3513 struct be_queue_info
*mccq
= &phba
->ctrl
.mcc_obj
.q
;
3515 tag
= beiscsi_get_boot_target(phba
);
3517 SE_DEBUG(DBG_LVL_1
, "be_cmd_get_mac_addr Failed\n");
3520 wait_event_interruptible(phba
->ctrl
.mcc_wait
[tag
],
3521 phba
->ctrl
.mcc_numtag
[tag
]);
3523 wrb_num
= (phba
->ctrl
.mcc_numtag
[tag
] & 0x00FF0000) >> 16;
3524 extd_status
= (phba
->ctrl
.mcc_numtag
[tag
] & 0x0000FF00) >> 8;
3525 status
= phba
->ctrl
.mcc_numtag
[tag
] & 0x000000FF;
3526 if (status
|| extd_status
) {
3527 SE_DEBUG(DBG_LVL_1
, "be_cmd_get_mac_addr Failed"
3528 " status = %d extd_status = %d\n",
3529 status
, extd_status
);
3530 free_mcc_tag(&phba
->ctrl
, tag
);
3533 wrb
= queue_get_wrb(mccq
, wrb_num
);
3534 free_mcc_tag(&phba
->ctrl
, tag
);
3535 boot_resp
= embedded_payload(wrb
);
3537 if (boot_resp
->boot_session_handle
< 0) {
3538 printk(KERN_ERR
"No Boot Session for this pci_func,"
3539 "session Hndl = %d\n", boot_resp
->boot_session_handle
);
3543 nonemb_cmd
.va
= pci_alloc_consistent(phba
->ctrl
.pdev
,
3544 sizeof(*session_resp
),
3546 if (nonemb_cmd
.va
== NULL
) {
3548 "Failed to allocate memory for"
3549 "beiscsi_get_session_info\n");
3553 memset(nonemb_cmd
.va
, 0, sizeof(*session_resp
));
3554 tag
= beiscsi_get_session_info(phba
,
3555 boot_resp
->boot_session_handle
, &nonemb_cmd
);
3557 SE_DEBUG(DBG_LVL_1
, "beiscsi_get_session_info"
3561 wait_event_interruptible(phba
->ctrl
.mcc_wait
[tag
],
3562 phba
->ctrl
.mcc_numtag
[tag
]);
3564 wrb_num
= (phba
->ctrl
.mcc_numtag
[tag
] & 0x00FF0000) >> 16;
3565 extd_status
= (phba
->ctrl
.mcc_numtag
[tag
] & 0x0000FF00) >> 8;
3566 status
= phba
->ctrl
.mcc_numtag
[tag
] & 0x000000FF;
3567 if (status
|| extd_status
) {
3568 SE_DEBUG(DBG_LVL_1
, "beiscsi_get_session_info Failed"
3569 " status = %d extd_status = %d\n",
3570 status
, extd_status
);
3571 free_mcc_tag(&phba
->ctrl
, tag
);
3574 wrb
= queue_get_wrb(mccq
, wrb_num
);
3575 free_mcc_tag(&phba
->ctrl
, tag
);
3576 session_resp
= nonemb_cmd
.va
;
3577 memcpy(&phba
->boot_sess
, &session_resp
->session_info
,
3578 sizeof(struct mgmt_session_info
));
3579 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
3580 nonemb_cmd
.va
, nonemb_cmd
.dma
);
3583 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
3584 nonemb_cmd
.va
, nonemb_cmd
.dma
);
3588 static int beiscsi_init_port(struct beiscsi_hba
*phba
)
3592 ret
= beiscsi_init_controller(phba
);
3594 shost_printk(KERN_ERR
, phba
->shost
,
3595 "beiscsi_dev_probe - Failed in"
3596 "beiscsi_init_controller\n");
3599 ret
= beiscsi_init_sgl_handle(phba
);
3601 shost_printk(KERN_ERR
, phba
->shost
,
3602 "beiscsi_dev_probe - Failed in"
3603 "beiscsi_init_sgl_handle\n");
3604 goto do_cleanup_ctrlr
;
3607 if (hba_setup_cid_tbls(phba
)) {
3608 shost_printk(KERN_ERR
, phba
->shost
,
3609 "Failed in hba_setup_cid_tbls\n");
3610 kfree(phba
->io_sgl_hndl_base
);
3611 kfree(phba
->eh_sgl_hndl_base
);
3612 goto do_cleanup_ctrlr
;
3622 static void hwi_purge_eq(struct beiscsi_hba
*phba
)
3624 struct hwi_controller
*phwi_ctrlr
;
3625 struct hwi_context_memory
*phwi_context
;
3626 struct be_queue_info
*eq
;
3627 struct be_eq_entry
*eqe
= NULL
;
3629 unsigned int num_processed
;
3631 phwi_ctrlr
= phba
->phwi_ctrlr
;
3632 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
3633 if (phba
->msix_enabled
)
3638 for (i
= 0; i
< (phba
->num_cpus
+ eq_msix
); i
++) {
3639 eq
= &phwi_context
->be_eq
[i
].q
;
3640 eqe
= queue_tail_node(eq
);
3642 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
3644 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
3646 eqe
= queue_tail_node(eq
);
3651 hwi_ring_eq_db(phba
, eq
->id
, 1, num_processed
, 1, 1);
3655 static void beiscsi_clean_port(struct beiscsi_hba
*phba
)
3659 mgmt_status
= mgmt_epfw_cleanup(phba
, CMD_CONNECTION_CHUTE_0
);
3661 shost_printk(KERN_WARNING
, phba
->shost
,
3662 "mgmt_epfw_cleanup FAILED\n");
3666 kfree(phba
->io_sgl_hndl_base
);
3667 kfree(phba
->eh_sgl_hndl_base
);
3668 kfree(phba
->cid_array
);
3669 kfree(phba
->ep_array
);
3673 beiscsi_offload_connection(struct beiscsi_conn
*beiscsi_conn
,
3674 struct beiscsi_offload_params
*params
)
3676 struct wrb_handle
*pwrb_handle
;
3677 struct iscsi_target_context_update_wrb
*pwrb
= NULL
;
3678 struct be_mem_descriptor
*mem_descr
;
3679 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
3683 * We can always use 0 here because it is reserved by libiscsi for
3684 * login/startup related tasks.
3686 pwrb_handle
= alloc_wrb_handle(phba
, (beiscsi_conn
->beiscsi_conn_cid
-
3687 phba
->fw_config
.iscsi_cid_start
));
3688 pwrb
= (struct iscsi_target_context_update_wrb
*)pwrb_handle
->pwrb
;
3689 memset(pwrb
, 0, sizeof(*pwrb
));
3690 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
3691 max_burst_length
, pwrb
, params
->dw
[offsetof
3692 (struct amap_beiscsi_offload_params
,
3693 max_burst_length
) / 32]);
3694 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
3695 max_send_data_segment_length
, pwrb
,
3696 params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3697 max_send_data_segment_length
) / 32]);
3698 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
3701 params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3702 first_burst_length
) / 32]);
3704 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, erl
, pwrb
,
3705 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3706 erl
) / 32] & OFFLD_PARAMS_ERL
));
3707 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, dde
, pwrb
,
3708 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3709 dde
) / 32] & OFFLD_PARAMS_DDE
) >> 2);
3710 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, hde
, pwrb
,
3711 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3712 hde
) / 32] & OFFLD_PARAMS_HDE
) >> 3);
3713 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, ir2t
, pwrb
,
3714 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3715 ir2t
) / 32] & OFFLD_PARAMS_IR2T
) >> 4);
3716 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, imd
, pwrb
,
3717 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3718 imd
) / 32] & OFFLD_PARAMS_IMD
) >> 5);
3719 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, stat_sn
,
3721 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
3722 exp_statsn
) / 32] + 1));
3723 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, type
, pwrb
,
3725 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, wrb_idx
,
3726 pwrb
, pwrb_handle
->wrb_index
);
3727 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, ptr2nextwrb
,
3728 pwrb
, pwrb_handle
->nxt_wrb_index
);
3729 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
3730 session_state
, pwrb
, 0);
3731 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, compltonack
,
3733 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, notpredblq
,
3735 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, mode
, pwrb
,
3738 mem_descr
= phba
->init_mem
;
3739 mem_descr
+= ISCSI_MEM_GLOBAL_HEADER
;
3741 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
3742 pad_buffer_addr_hi
, pwrb
,
3743 mem_descr
->mem_array
[0].bus_address
.u
.a32
.address_hi
);
3744 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
3745 pad_buffer_addr_lo
, pwrb
,
3746 mem_descr
->mem_array
[0].bus_address
.u
.a32
.address_lo
);
3748 be_dws_le_to_cpu(pwrb
, sizeof(struct iscsi_target_context_update_wrb
));
3750 doorbell
|= beiscsi_conn
->beiscsi_conn_cid
& DB_WRB_POST_CID_MASK
;
3751 doorbell
|= (pwrb_handle
->wrb_index
& DB_DEF_PDU_WRB_INDEX_MASK
)
3752 << DB_DEF_PDU_WRB_INDEX_SHIFT
;
3753 doorbell
|= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT
;
3755 iowrite32(doorbell
, phba
->db_va
+ DB_TXULP0_OFFSET
);
3758 static void beiscsi_parse_pdu(struct iscsi_conn
*conn
, itt_t itt
,
3759 int *index
, int *age
)
3763 *age
= conn
->session
->age
;
3767 * beiscsi_alloc_pdu - allocates pdu and related resources
3768 * @task: libiscsi task
3769 * @opcode: opcode of pdu for task
3771 * This is called with the session lock held. It will allocate
3772 * the wrb and sgl if needed for the command. And it will prep
3773 * the pdu's itt. beiscsi_parse_pdu will later translate
3774 * the pdu itt to the libiscsi task itt.
3776 static int beiscsi_alloc_pdu(struct iscsi_task
*task
, uint8_t opcode
)
3778 struct beiscsi_io_task
*io_task
= task
->dd_data
;
3779 struct iscsi_conn
*conn
= task
->conn
;
3780 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
3781 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
3782 struct hwi_wrb_context
*pwrb_context
;
3783 struct hwi_controller
*phwi_ctrlr
;
3785 struct beiscsi_session
*beiscsi_sess
= beiscsi_conn
->beiscsi_sess
;
3788 io_task
->cmd_bhs
= pci_pool_alloc(beiscsi_sess
->bhs_pool
,
3789 GFP_ATOMIC
, &paddr
);
3790 if (!io_task
->cmd_bhs
)
3792 io_task
->bhs_pa
.u
.a64
.address
= paddr
;
3793 io_task
->libiscsi_itt
= (itt_t
)task
->itt
;
3794 io_task
->conn
= beiscsi_conn
;
3796 task
->hdr
= (struct iscsi_hdr
*)&io_task
->cmd_bhs
->iscsi_hdr
;
3797 task
->hdr_max
= sizeof(struct be_cmd_bhs
);
3798 io_task
->psgl_handle
= NULL
;
3799 io_task
->psgl_handle
= NULL
;
3802 spin_lock(&phba
->io_sgl_lock
);
3803 io_task
->psgl_handle
= alloc_io_sgl_handle(phba
);
3804 spin_unlock(&phba
->io_sgl_lock
);
3805 if (!io_task
->psgl_handle
)
3807 io_task
->pwrb_handle
= alloc_wrb_handle(phba
,
3808 beiscsi_conn
->beiscsi_conn_cid
-
3809 phba
->fw_config
.iscsi_cid_start
);
3810 if (!io_task
->pwrb_handle
)
3813 io_task
->scsi_cmnd
= NULL
;
3814 if ((opcode
& ISCSI_OPCODE_MASK
) == ISCSI_OP_LOGIN
) {
3815 if (!beiscsi_conn
->login_in_progress
) {
3816 spin_lock(&phba
->mgmt_sgl_lock
);
3817 io_task
->psgl_handle
= (struct sgl_handle
*)
3818 alloc_mgmt_sgl_handle(phba
);
3819 spin_unlock(&phba
->mgmt_sgl_lock
);
3820 if (!io_task
->psgl_handle
)
3823 beiscsi_conn
->login_in_progress
= 1;
3824 beiscsi_conn
->plogin_sgl_handle
=
3825 io_task
->psgl_handle
;
3826 io_task
->pwrb_handle
=
3827 alloc_wrb_handle(phba
,
3828 beiscsi_conn
->beiscsi_conn_cid
-
3829 phba
->fw_config
.iscsi_cid_start
);
3830 if (!io_task
->pwrb_handle
)
3832 beiscsi_conn
->plogin_wrb_handle
=
3833 io_task
->pwrb_handle
;
3836 io_task
->psgl_handle
=
3837 beiscsi_conn
->plogin_sgl_handle
;
3838 io_task
->pwrb_handle
=
3839 beiscsi_conn
->plogin_wrb_handle
;
3842 spin_lock(&phba
->mgmt_sgl_lock
);
3843 io_task
->psgl_handle
= alloc_mgmt_sgl_handle(phba
);
3844 spin_unlock(&phba
->mgmt_sgl_lock
);
3845 if (!io_task
->psgl_handle
)
3847 io_task
->pwrb_handle
=
3848 alloc_wrb_handle(phba
,
3849 beiscsi_conn
->beiscsi_conn_cid
-
3850 phba
->fw_config
.iscsi_cid_start
);
3851 if (!io_task
->pwrb_handle
)
3852 goto free_mgmt_hndls
;
3856 itt
= (itt_t
) cpu_to_be32(((unsigned int)io_task
->pwrb_handle
->
3857 wrb_index
<< 16) | (unsigned int)
3858 (io_task
->psgl_handle
->sgl_index
));
3859 io_task
->pwrb_handle
->pio_handle
= task
;
3861 io_task
->cmd_bhs
->iscsi_hdr
.itt
= itt
;
3865 spin_lock(&phba
->io_sgl_lock
);
3866 free_io_sgl_handle(phba
, io_task
->psgl_handle
);
3867 spin_unlock(&phba
->io_sgl_lock
);
3870 spin_lock(&phba
->mgmt_sgl_lock
);
3871 free_mgmt_sgl_handle(phba
, io_task
->psgl_handle
);
3872 spin_unlock(&phba
->mgmt_sgl_lock
);
3874 phwi_ctrlr
= phba
->phwi_ctrlr
;
3875 pwrb_context
= &phwi_ctrlr
->wrb_context
[
3876 beiscsi_conn
->beiscsi_conn_cid
-
3877 phba
->fw_config
.iscsi_cid_start
];
3878 if (io_task
->pwrb_handle
)
3879 free_wrb_handle(phba
, pwrb_context
, io_task
->pwrb_handle
);
3880 io_task
->pwrb_handle
= NULL
;
3881 pci_pool_free(beiscsi_sess
->bhs_pool
, io_task
->cmd_bhs
,
3882 io_task
->bhs_pa
.u
.a64
.address
);
3883 SE_DEBUG(DBG_LVL_1
, "Alloc of SGL_ICD Failed\n");
3887 static void beiscsi_cleanup_task(struct iscsi_task
*task
)
3889 struct beiscsi_io_task
*io_task
= task
->dd_data
;
3890 struct iscsi_conn
*conn
= task
->conn
;
3891 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
3892 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
3893 struct beiscsi_session
*beiscsi_sess
= beiscsi_conn
->beiscsi_sess
;
3894 struct hwi_wrb_context
*pwrb_context
;
3895 struct hwi_controller
*phwi_ctrlr
;
3897 phwi_ctrlr
= phba
->phwi_ctrlr
;
3898 pwrb_context
= &phwi_ctrlr
->wrb_context
[beiscsi_conn
->beiscsi_conn_cid
3899 - phba
->fw_config
.iscsi_cid_start
];
3900 if (io_task
->pwrb_handle
) {
3901 free_wrb_handle(phba
, pwrb_context
, io_task
->pwrb_handle
);
3902 io_task
->pwrb_handle
= NULL
;
3905 if (io_task
->cmd_bhs
) {
3906 pci_pool_free(beiscsi_sess
->bhs_pool
, io_task
->cmd_bhs
,
3907 io_task
->bhs_pa
.u
.a64
.address
);
3911 if (io_task
->psgl_handle
) {
3912 spin_lock(&phba
->io_sgl_lock
);
3913 free_io_sgl_handle(phba
, io_task
->psgl_handle
);
3914 spin_unlock(&phba
->io_sgl_lock
);
3915 io_task
->psgl_handle
= NULL
;
3919 ((task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) == ISCSI_OP_LOGIN
))
3921 if (io_task
->psgl_handle
) {
3922 spin_lock(&phba
->mgmt_sgl_lock
);
3923 free_mgmt_sgl_handle(phba
, io_task
->psgl_handle
);
3924 spin_unlock(&phba
->mgmt_sgl_lock
);
3925 io_task
->psgl_handle
= NULL
;
3930 static int beiscsi_iotask(struct iscsi_task
*task
, struct scatterlist
*sg
,
3931 unsigned int num_sg
, unsigned int xferlen
,
3932 unsigned int writedir
)
3935 struct beiscsi_io_task
*io_task
= task
->dd_data
;
3936 struct iscsi_conn
*conn
= task
->conn
;
3937 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
3938 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
3939 struct iscsi_wrb
*pwrb
= NULL
;
3940 unsigned int doorbell
= 0;
3942 pwrb
= io_task
->pwrb_handle
->pwrb
;
3943 io_task
->cmd_bhs
->iscsi_hdr
.exp_statsn
= 0;
3944 io_task
->bhs_len
= sizeof(struct be_cmd_bhs
);
3947 memset(&io_task
->cmd_bhs
->iscsi_data_pdu
, 0, 48);
3948 AMAP_SET_BITS(struct amap_pdu_data_out
, itt
,
3949 &io_task
->cmd_bhs
->iscsi_data_pdu
,
3950 (unsigned int)io_task
->cmd_bhs
->iscsi_hdr
.itt
);
3951 AMAP_SET_BITS(struct amap_pdu_data_out
, opcode
,
3952 &io_task
->cmd_bhs
->iscsi_data_pdu
,
3953 ISCSI_OPCODE_SCSI_DATA_OUT
);
3954 AMAP_SET_BITS(struct amap_pdu_data_out
, final_bit
,
3955 &io_task
->cmd_bhs
->iscsi_data_pdu
, 1);
3956 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
3958 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 1);
3960 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
3962 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 0);
3964 memcpy(&io_task
->cmd_bhs
->iscsi_data_pdu
.
3965 dw
[offsetof(struct amap_pdu_data_out
, lun
) / 32],
3966 &io_task
->cmd_bhs
->iscsi_hdr
.lun
, sizeof(struct scsi_lun
));
3968 AMAP_SET_BITS(struct amap_iscsi_wrb
, lun
, pwrb
,
3969 cpu_to_be16(*(unsigned short *)&io_task
->cmd_bhs
->iscsi_hdr
.lun
));
3970 AMAP_SET_BITS(struct amap_iscsi_wrb
, r2t_exp_dtl
, pwrb
, xferlen
);
3971 AMAP_SET_BITS(struct amap_iscsi_wrb
, wrb_idx
, pwrb
,
3972 io_task
->pwrb_handle
->wrb_index
);
3973 AMAP_SET_BITS(struct amap_iscsi_wrb
, cmdsn_itt
, pwrb
,
3974 be32_to_cpu(task
->cmdsn
));
3975 AMAP_SET_BITS(struct amap_iscsi_wrb
, sgl_icd_idx
, pwrb
,
3976 io_task
->psgl_handle
->sgl_index
);
3978 hwi_write_sgl(pwrb
, sg
, num_sg
, io_task
);
3980 AMAP_SET_BITS(struct amap_iscsi_wrb
, ptr2nextwrb
, pwrb
,
3981 io_task
->pwrb_handle
->nxt_wrb_index
);
3982 be_dws_le_to_cpu(pwrb
, sizeof(struct iscsi_wrb
));
3984 doorbell
|= beiscsi_conn
->beiscsi_conn_cid
& DB_WRB_POST_CID_MASK
;
3985 doorbell
|= (io_task
->pwrb_handle
->wrb_index
&
3986 DB_DEF_PDU_WRB_INDEX_MASK
) << DB_DEF_PDU_WRB_INDEX_SHIFT
;
3987 doorbell
|= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT
;
3989 iowrite32(doorbell
, phba
->db_va
+ DB_TXULP0_OFFSET
);
3993 static int beiscsi_mtask(struct iscsi_task
*task
)
3995 struct beiscsi_io_task
*io_task
= task
->dd_data
;
3996 struct iscsi_conn
*conn
= task
->conn
;
3997 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
3998 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
3999 struct iscsi_wrb
*pwrb
= NULL
;
4000 unsigned int doorbell
= 0;
4003 cid
= beiscsi_conn
->beiscsi_conn_cid
;
4004 pwrb
= io_task
->pwrb_handle
->pwrb
;
4005 memset(pwrb
, 0, sizeof(*pwrb
));
4006 AMAP_SET_BITS(struct amap_iscsi_wrb
, cmdsn_itt
, pwrb
,
4007 be32_to_cpu(task
->cmdsn
));
4008 AMAP_SET_BITS(struct amap_iscsi_wrb
, wrb_idx
, pwrb
,
4009 io_task
->pwrb_handle
->wrb_index
);
4010 AMAP_SET_BITS(struct amap_iscsi_wrb
, sgl_icd_idx
, pwrb
,
4011 io_task
->psgl_handle
->sgl_index
);
4013 switch (task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) {
4014 case ISCSI_OP_LOGIN
:
4015 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
4017 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 0);
4018 AMAP_SET_BITS(struct amap_iscsi_wrb
, cmdsn_itt
, pwrb
, 1);
4019 hwi_write_buffer(pwrb
, task
);
4021 case ISCSI_OP_NOOP_OUT
:
4022 if (task
->hdr
->ttt
!= ISCSI_RESERVED_TAG
) {
4023 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
4025 AMAP_SET_BITS(struct amap_iscsi_wrb
, cmdsn_itt
,
4027 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 0);
4029 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
4031 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 1);
4033 hwi_write_buffer(pwrb
, task
);
4036 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
4038 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 0);
4039 hwi_write_buffer(pwrb
, task
);
4041 case ISCSI_OP_SCSI_TMFUNC
:
4042 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
4044 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 0);
4045 hwi_write_buffer(pwrb
, task
);
4047 case ISCSI_OP_LOGOUT
:
4048 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 0);
4049 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
4051 hwi_write_buffer(pwrb
, task
);
4055 SE_DEBUG(DBG_LVL_1
, "opcode =%d Not supported\n",
4056 task
->hdr
->opcode
& ISCSI_OPCODE_MASK
);
4060 AMAP_SET_BITS(struct amap_iscsi_wrb
, r2t_exp_dtl
, pwrb
,
4062 AMAP_SET_BITS(struct amap_iscsi_wrb
, ptr2nextwrb
, pwrb
,
4063 io_task
->pwrb_handle
->nxt_wrb_index
);
4064 be_dws_le_to_cpu(pwrb
, sizeof(struct iscsi_wrb
));
4066 doorbell
|= cid
& DB_WRB_POST_CID_MASK
;
4067 doorbell
|= (io_task
->pwrb_handle
->wrb_index
&
4068 DB_DEF_PDU_WRB_INDEX_MASK
) << DB_DEF_PDU_WRB_INDEX_SHIFT
;
4069 doorbell
|= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT
;
4070 iowrite32(doorbell
, phba
->db_va
+ DB_TXULP0_OFFSET
);
4074 static int beiscsi_task_xmit(struct iscsi_task
*task
)
4076 struct beiscsi_io_task
*io_task
= task
->dd_data
;
4077 struct scsi_cmnd
*sc
= task
->sc
;
4078 struct scatterlist
*sg
;
4080 unsigned int writedir
= 0, xferlen
= 0;
4083 return beiscsi_mtask(task
);
4085 io_task
->scsi_cmnd
= sc
;
4086 num_sg
= scsi_dma_map(sc
);
4088 SE_DEBUG(DBG_LVL_1
, " scsi_dma_map Failed\n")
4091 xferlen
= scsi_bufflen(sc
);
4092 sg
= scsi_sglist(sc
);
4093 if (sc
->sc_data_direction
== DMA_TO_DEVICE
) {
4095 SE_DEBUG(DBG_LVL_4
, "task->imm_count=0x%08x\n",
4099 return beiscsi_iotask(task
, sg
, num_sg
, xferlen
, writedir
);
4102 static void beiscsi_remove(struct pci_dev
*pcidev
)
4104 struct beiscsi_hba
*phba
= NULL
;
4105 struct hwi_controller
*phwi_ctrlr
;
4106 struct hwi_context_memory
*phwi_context
;
4107 struct be_eq_obj
*pbe_eq
;
4108 unsigned int i
, msix_vec
;
4109 u8
*real_offset
= 0;
4112 phba
= (struct beiscsi_hba
*)pci_get_drvdata(pcidev
);
4114 dev_err(&pcidev
->dev
, "beiscsi_remove called with no phba\n");
4118 phwi_ctrlr
= phba
->phwi_ctrlr
;
4119 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
4120 hwi_disable_intr(phba
);
4121 if (phba
->msix_enabled
) {
4122 for (i
= 0; i
<= phba
->num_cpus
; i
++) {
4123 msix_vec
= phba
->msix_entries
[i
].vector
;
4124 free_irq(msix_vec
, &phwi_context
->be_eq
[i
]);
4127 if (phba
->pcidev
->irq
)
4128 free_irq(phba
->pcidev
->irq
, phba
);
4129 pci_disable_msix(phba
->pcidev
);
4130 destroy_workqueue(phba
->wq
);
4131 if (blk_iopoll_enabled
)
4132 for (i
= 0; i
< phba
->num_cpus
; i
++) {
4133 pbe_eq
= &phwi_context
->be_eq
[i
];
4134 blk_iopoll_disable(&pbe_eq
->iopoll
);
4137 beiscsi_clean_port(phba
);
4138 beiscsi_free_mem(phba
);
4139 real_offset
= (u8
*)phba
->csr_va
+ MPU_EP_SEMAPHORE
;
4141 value
= readl((void *)real_offset
);
4143 if (value
& 0x00010000) {
4144 value
&= 0xfffeffff;
4145 writel(value
, (void *)real_offset
);
4147 beiscsi_unmap_pci_function(phba
);
4148 pci_free_consistent(phba
->pcidev
,
4149 phba
->ctrl
.mbox_mem_alloced
.size
,
4150 phba
->ctrl
.mbox_mem_alloced
.va
,
4151 phba
->ctrl
.mbox_mem_alloced
.dma
);
4152 if (phba
->boot_kset
)
4153 iscsi_boot_destroy_kset(phba
->boot_kset
);
4154 iscsi_host_remove(phba
->shost
);
4155 pci_dev_put(phba
->pcidev
);
4156 iscsi_host_free(phba
->shost
);
4159 static void beiscsi_msix_enable(struct beiscsi_hba
*phba
)
4163 for (i
= 0; i
<= phba
->num_cpus
; i
++)
4164 phba
->msix_entries
[i
].entry
= i
;
4166 status
= pci_enable_msix(phba
->pcidev
, phba
->msix_entries
,
4167 (phba
->num_cpus
+ 1));
4169 phba
->msix_enabled
= true;
4174 static int __devinit
beiscsi_dev_probe(struct pci_dev
*pcidev
,
4175 const struct pci_device_id
*id
)
4177 struct beiscsi_hba
*phba
= NULL
;
4178 struct hwi_controller
*phwi_ctrlr
;
4179 struct hwi_context_memory
*phwi_context
;
4180 struct be_eq_obj
*pbe_eq
;
4181 int ret
, num_cpus
, i
;
4182 u8
*real_offset
= 0;
4185 ret
= beiscsi_enable_pci(pcidev
);
4187 dev_err(&pcidev
->dev
, "beiscsi_dev_probe-"
4188 " Failed to enable pci device\n");
4192 phba
= beiscsi_hba_alloc(pcidev
);
4194 dev_err(&pcidev
->dev
, "beiscsi_dev_probe-"
4195 " Failed in beiscsi_hba_alloc\n");
4199 switch (pcidev
->device
) {
4203 phba
->generation
= BE_GEN2
;
4207 phba
->generation
= BE_GEN3
;
4210 phba
->generation
= 0;
4214 num_cpus
= find_num_cpus();
4217 phba
->num_cpus
= num_cpus
;
4218 SE_DEBUG(DBG_LVL_8
, "num_cpus = %d\n", phba
->num_cpus
);
4221 beiscsi_msix_enable(phba
);
4222 ret
= be_ctrl_init(phba
, pcidev
);
4224 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
4225 "Failed in be_ctrl_init\n");
4230 real_offset
= (u8
*)phba
->csr_va
+ MPU_EP_SEMAPHORE
;
4231 value
= readl((void *)real_offset
);
4232 if (value
& 0x00010000) {
4234 shost_printk(KERN_ERR
, phba
->shost
,
4235 "Loading Driver in crashdump mode\n");
4236 ret
= beiscsi_pci_soft_reset(phba
);
4238 shost_printk(KERN_ERR
, phba
->shost
,
4239 "Reset Failed. Aborting Crashdump\n");
4242 ret
= be_chk_reset_complete(phba
);
4244 shost_printk(KERN_ERR
, phba
->shost
,
4245 "Failed to get out of reset."
4246 "Aborting Crashdump\n");
4250 value
|= 0x00010000;
4251 writel(value
, (void *)real_offset
);
4256 spin_lock_init(&phba
->io_sgl_lock
);
4257 spin_lock_init(&phba
->mgmt_sgl_lock
);
4258 spin_lock_init(&phba
->isr_lock
);
4259 ret
= mgmt_get_fw_config(&phba
->ctrl
, phba
);
4261 shost_printk(KERN_ERR
, phba
->shost
,
4262 "Error getting fw config\n");
4265 phba
->shost
->max_id
= phba
->fw_config
.iscsi_cid_count
;
4266 beiscsi_get_params(phba
);
4267 phba
->shost
->can_queue
= phba
->params
.ios_per_ctrl
;
4268 ret
= beiscsi_init_port(phba
);
4270 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
4271 "Failed in beiscsi_init_port\n");
4275 for (i
= 0; i
< MAX_MCC_CMD
; i
++) {
4276 init_waitqueue_head(&phba
->ctrl
.mcc_wait
[i
+ 1]);
4277 phba
->ctrl
.mcc_tag
[i
] = i
+ 1;
4278 phba
->ctrl
.mcc_numtag
[i
+ 1] = 0;
4279 phba
->ctrl
.mcc_tag_available
++;
4282 phba
->ctrl
.mcc_alloc_index
= phba
->ctrl
.mcc_free_index
= 0;
4284 snprintf(phba
->wq_name
, sizeof(phba
->wq_name
), "beiscsi_q_irq%u",
4285 phba
->shost
->host_no
);
4286 phba
->wq
= alloc_workqueue(phba
->wq_name
, WQ_MEM_RECLAIM
, 1);
4288 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
4289 "Failed to allocate work queue\n");
4293 INIT_WORK(&phba
->work_cqs
, beiscsi_process_all_cqs
);
4295 phwi_ctrlr
= phba
->phwi_ctrlr
;
4296 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
4297 if (blk_iopoll_enabled
) {
4298 for (i
= 0; i
< phba
->num_cpus
; i
++) {
4299 pbe_eq
= &phwi_context
->be_eq
[i
];
4300 blk_iopoll_init(&pbe_eq
->iopoll
, be_iopoll_budget
,
4302 blk_iopoll_enable(&pbe_eq
->iopoll
);
4305 ret
= beiscsi_init_irqs(phba
);
4307 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
4308 "Failed to beiscsi_init_irqs\n");
4311 hwi_enable_intr(phba
);
4312 ret
= beiscsi_get_boot_info(phba
);
4314 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
4315 "No Boot Devices !!!!!\n");
4317 SE_DEBUG(DBG_LVL_8
, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
4321 destroy_workqueue(phba
->wq
);
4322 if (blk_iopoll_enabled
)
4323 for (i
= 0; i
< phba
->num_cpus
; i
++) {
4324 pbe_eq
= &phwi_context
->be_eq
[i
];
4325 blk_iopoll_disable(&pbe_eq
->iopoll
);
4328 beiscsi_clean_port(phba
);
4329 beiscsi_free_mem(phba
);
4331 real_offset
= (u8
*)phba
->csr_va
+ MPU_EP_SEMAPHORE
;
4333 value
= readl((void *)real_offset
);
4335 if (value
& 0x00010000) {
4336 value
&= 0xfffeffff;
4337 writel(value
, (void *)real_offset
);
4340 pci_free_consistent(phba
->pcidev
,
4341 phba
->ctrl
.mbox_mem_alloced
.size
,
4342 phba
->ctrl
.mbox_mem_alloced
.va
,
4343 phba
->ctrl
.mbox_mem_alloced
.dma
);
4344 beiscsi_unmap_pci_function(phba
);
4346 if (phba
->msix_enabled
)
4347 pci_disable_msix(phba
->pcidev
);
4348 iscsi_host_remove(phba
->shost
);
4349 pci_dev_put(phba
->pcidev
);
4350 iscsi_host_free(phba
->shost
);
4352 pci_disable_device(pcidev
);
4356 struct iscsi_transport beiscsi_iscsi_transport
= {
4357 .owner
= THIS_MODULE
,
4359 .caps
= CAP_RECOVERY_L0
| CAP_HDRDGST
| CAP_TEXT_NEGO
|
4360 CAP_MULTI_R2T
| CAP_DATADGST
| CAP_DATA_PATH_OFFLOAD
,
4361 .param_mask
= ISCSI_MAX_RECV_DLENGTH
|
4362 ISCSI_MAX_XMIT_DLENGTH
|
4365 ISCSI_INITIAL_R2T_EN
|
4370 ISCSI_PDU_INORDER_EN
|
4371 ISCSI_DATASEQ_INORDER_EN
|
4374 ISCSI_CONN_ADDRESS
|
4376 ISCSI_PERSISTENT_PORT
|
4377 ISCSI_PERSISTENT_ADDRESS
|
4378 ISCSI_TARGET_NAME
| ISCSI_TPGT
|
4379 ISCSI_USERNAME
| ISCSI_PASSWORD
|
4380 ISCSI_USERNAME_IN
| ISCSI_PASSWORD_IN
|
4381 ISCSI_FAST_ABORT
| ISCSI_ABORT_TMO
|
4382 ISCSI_LU_RESET_TMO
|
4383 ISCSI_PING_TMO
| ISCSI_RECV_TMO
|
4384 ISCSI_IFACE_NAME
| ISCSI_INITIATOR_NAME
,
4385 .host_param_mask
= ISCSI_HOST_HWADDRESS
| ISCSI_HOST_IPADDRESS
|
4386 ISCSI_HOST_INITIATOR_NAME
,
4387 .create_session
= beiscsi_session_create
,
4388 .destroy_session
= beiscsi_session_destroy
,
4389 .create_conn
= beiscsi_conn_create
,
4390 .bind_conn
= beiscsi_conn_bind
,
4391 .destroy_conn
= iscsi_conn_teardown
,
4392 .set_param
= beiscsi_set_param
,
4393 .get_conn_param
= iscsi_conn_get_param
,
4394 .get_session_param
= iscsi_session_get_param
,
4395 .get_host_param
= beiscsi_get_host_param
,
4396 .start_conn
= beiscsi_conn_start
,
4397 .stop_conn
= iscsi_conn_stop
,
4398 .send_pdu
= iscsi_conn_send_pdu
,
4399 .xmit_task
= beiscsi_task_xmit
,
4400 .cleanup_task
= beiscsi_cleanup_task
,
4401 .alloc_pdu
= beiscsi_alloc_pdu
,
4402 .parse_pdu_itt
= beiscsi_parse_pdu
,
4403 .get_stats
= beiscsi_conn_get_stats
,
4404 .get_ep_param
= beiscsi_ep_get_param
,
4405 .ep_connect
= beiscsi_ep_connect
,
4406 .ep_poll
= beiscsi_ep_poll
,
4407 .ep_disconnect
= beiscsi_ep_disconnect
,
4408 .session_recovery_timedout
= iscsi_session_recovery_timedout
,
4411 static struct pci_driver beiscsi_pci_driver
= {
4413 .probe
= beiscsi_dev_probe
,
4414 .remove
= beiscsi_remove
,
4415 .id_table
= beiscsi_pci_id_table
4419 static int __init
beiscsi_module_init(void)
4423 beiscsi_scsi_transport
=
4424 iscsi_register_transport(&beiscsi_iscsi_transport
);
4425 if (!beiscsi_scsi_transport
) {
4427 "beiscsi_module_init - Unable to register beiscsi"
4431 SE_DEBUG(DBG_LVL_8
, "In beiscsi_module_init, tt=%p\n",
4432 &beiscsi_iscsi_transport
);
4434 ret
= pci_register_driver(&beiscsi_pci_driver
);
4437 "beiscsi_module_init - Unable to register"
4438 "beiscsi pci driver.\n");
4439 goto unregister_iscsi_transport
;
4443 unregister_iscsi_transport
:
4444 iscsi_unregister_transport(&beiscsi_iscsi_transport
);
4448 static void __exit
beiscsi_module_exit(void)
4450 pci_unregister_driver(&beiscsi_pci_driver
);
4451 iscsi_unregister_transport(&beiscsi_iscsi_transport
);
4454 module_init(beiscsi_module_init
);
4455 module_exit(beiscsi_module_exit
);