[SCSI] be2iscsi: correction in the claculation for num_cxn_wrb
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / be2iscsi / be_main.c
blob687d535894a97d8c2f718ea37820a631eba095a9
1 /**
2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
12 * Contact Information:
13 * linux-drivers@serverengines.com
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/blkdev.h>
24 #include <linux/pci.h>
25 #include <linux/string.h>
26 #include <linux/kernel.h>
27 #include <linux/semaphore.h>
29 #include <scsi/libiscsi.h>
30 #include <scsi/scsi_transport_iscsi.h>
31 #include <scsi/scsi_transport.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi.h>
36 #include "be_main.h"
37 #include "be_iscsi.h"
38 #include "be_mgmt.h"
40 static unsigned int be_iopoll_budget = 10;
41 static unsigned int be_max_phys_size = 64;
42 static unsigned int enable_msix = 1;
44 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
45 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
46 MODULE_AUTHOR("ServerEngines Corporation");
47 MODULE_LICENSE("GPL");
48 module_param(be_iopoll_budget, int, 0);
49 module_param(enable_msix, int, 0);
50 module_param(be_max_phys_size, uint, S_IRUGO);
51 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
52 "contiguous memory that can be allocated."
53 "Range is 16 - 128");
55 static int beiscsi_slave_configure(struct scsi_device *sdev)
57 blk_queue_max_segment_size(sdev->request_queue, 65536);
58 return 0;
61 /*------------------- PCI Driver operations and data ----------------- */
62 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
63 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
64 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
65 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
66 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
67 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
68 { 0 }
70 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
72 static struct scsi_host_template beiscsi_sht = {
73 .module = THIS_MODULE,
74 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
75 .proc_name = DRV_NAME,
76 .queuecommand = iscsi_queuecommand,
77 .eh_abort_handler = iscsi_eh_abort,
78 .change_queue_depth = iscsi_change_queue_depth,
79 .slave_configure = beiscsi_slave_configure,
80 .target_alloc = iscsi_target_alloc,
81 .eh_device_reset_handler = iscsi_eh_device_reset,
82 .eh_target_reset_handler = iscsi_eh_target_reset,
83 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
84 .can_queue = BE2_IO_DEPTH,
85 .this_id = -1,
86 .max_sectors = BEISCSI_MAX_SECTORS,
87 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
88 .use_clustering = ENABLE_CLUSTERING,
91 static struct scsi_transport_template *beiscsi_scsi_transport;
93 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
95 struct beiscsi_hba *phba;
96 struct Scsi_Host *shost;
98 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
99 if (!shost) {
100 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
101 "iscsi_host_alloc failed \n");
102 return NULL;
104 shost->dma_boundary = pcidev->dma_mask;
105 shost->max_id = BE2_MAX_SESSIONS;
106 shost->max_channel = 0;
107 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
108 shost->max_lun = BEISCSI_NUM_MAX_LUN;
109 shost->transportt = beiscsi_scsi_transport;
110 phba = iscsi_host_priv(shost);
111 memset(phba, 0, sizeof(*phba));
112 phba->shost = shost;
113 phba->pcidev = pci_dev_get(pcidev);
114 pci_set_drvdata(pcidev, phba);
116 if (iscsi_host_add(shost, &phba->pcidev->dev))
117 goto free_devices;
118 return phba;
120 free_devices:
121 pci_dev_put(phba->pcidev);
122 iscsi_host_free(phba->shost);
123 return NULL;
126 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
128 if (phba->csr_va) {
129 iounmap(phba->csr_va);
130 phba->csr_va = NULL;
132 if (phba->db_va) {
133 iounmap(phba->db_va);
134 phba->db_va = NULL;
136 if (phba->pci_va) {
137 iounmap(phba->pci_va);
138 phba->pci_va = NULL;
142 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
143 struct pci_dev *pcidev)
145 u8 __iomem *addr;
147 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
148 pci_resource_len(pcidev, 2));
149 if (addr == NULL)
150 return -ENOMEM;
151 phba->ctrl.csr = addr;
152 phba->csr_va = addr;
153 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
155 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
156 if (addr == NULL)
157 goto pci_map_err;
158 phba->ctrl.db = addr;
159 phba->db_va = addr;
160 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
162 addr = ioremap_nocache(pci_resource_start(pcidev, 1),
163 pci_resource_len(pcidev, 1));
164 if (addr == NULL)
165 goto pci_map_err;
166 phba->ctrl.pcicfg = addr;
167 phba->pci_va = addr;
168 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
169 return 0;
171 pci_map_err:
172 beiscsi_unmap_pci_function(phba);
173 return -ENOMEM;
176 static int beiscsi_enable_pci(struct pci_dev *pcidev)
178 int ret;
180 ret = pci_enable_device(pcidev);
181 if (ret) {
182 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
183 "failed. Returning -ENODEV\n");
184 return ret;
187 pci_set_master(pcidev);
188 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
189 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
190 if (ret) {
191 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
192 pci_disable_device(pcidev);
193 return ret;
196 return 0;
199 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
201 struct be_ctrl_info *ctrl = &phba->ctrl;
202 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
203 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
204 int status = 0;
206 ctrl->pdev = pdev;
207 status = beiscsi_map_pci_bars(phba, pdev);
208 if (status)
209 return status;
210 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
211 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
212 mbox_mem_alloc->size,
213 &mbox_mem_alloc->dma);
214 if (!mbox_mem_alloc->va) {
215 beiscsi_unmap_pci_function(phba);
216 status = -ENOMEM;
217 return status;
220 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
221 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
222 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
223 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
224 spin_lock_init(&ctrl->mbox_lock);
225 spin_lock_init(&phba->ctrl.mcc_lock);
226 spin_lock_init(&phba->ctrl.mcc_cq_lock);
228 return status;
231 static void beiscsi_get_params(struct beiscsi_hba *phba)
233 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
234 - (phba->fw_config.iscsi_cid_count
235 + BE2_TMFS
236 + BE2_NOPOUT_REQ));
237 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
238 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
239 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
240 phba->params.num_sge_per_io = BE2_SGE;
241 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
242 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
243 phba->params.eq_timer = 64;
244 phba->params.num_eq_entries =
245 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
246 + BE2_TMFS) / 512) + 1) * 512;
247 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
248 ? 1024 : phba->params.num_eq_entries;
249 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
250 phba->params.num_eq_entries);
251 phba->params.num_cq_entries =
252 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
253 + BE2_TMFS) / 512) + 1) * 512;
254 phba->params.wrbs_per_cxn = 256;
257 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
258 unsigned int id, unsigned int clr_interrupt,
259 unsigned int num_processed,
260 unsigned char rearm, unsigned char event)
262 u32 val = 0;
263 val |= id & DB_EQ_RING_ID_MASK;
264 if (rearm)
265 val |= 1 << DB_EQ_REARM_SHIFT;
266 if (clr_interrupt)
267 val |= 1 << DB_EQ_CLR_SHIFT;
268 if (event)
269 val |= 1 << DB_EQ_EVNT_SHIFT;
270 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
271 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
275 * be_isr_mcc - The isr routine of the driver.
276 * @irq: Not used
277 * @dev_id: Pointer to host adapter structure
279 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
281 struct beiscsi_hba *phba;
282 struct be_eq_entry *eqe = NULL;
283 struct be_queue_info *eq;
284 struct be_queue_info *mcc;
285 unsigned int num_eq_processed;
286 struct be_eq_obj *pbe_eq;
287 unsigned long flags;
289 pbe_eq = dev_id;
290 eq = &pbe_eq->q;
291 phba = pbe_eq->phba;
292 mcc = &phba->ctrl.mcc_obj.cq;
293 eqe = queue_tail_node(eq);
294 if (!eqe)
295 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
297 num_eq_processed = 0;
299 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
300 & EQE_VALID_MASK) {
301 if (((eqe->dw[offsetof(struct amap_eq_entry,
302 resource_id) / 32] &
303 EQE_RESID_MASK) >> 16) == mcc->id) {
304 spin_lock_irqsave(&phba->isr_lock, flags);
305 phba->todo_mcc_cq = 1;
306 spin_unlock_irqrestore(&phba->isr_lock, flags);
308 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
309 queue_tail_inc(eq);
310 eqe = queue_tail_node(eq);
311 num_eq_processed++;
313 if (phba->todo_mcc_cq)
314 queue_work(phba->wq, &phba->work_cqs);
315 if (num_eq_processed)
316 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
318 return IRQ_HANDLED;
322 * be_isr_msix - The isr routine of the driver.
323 * @irq: Not used
324 * @dev_id: Pointer to host adapter structure
326 static irqreturn_t be_isr_msix(int irq, void *dev_id)
328 struct beiscsi_hba *phba;
329 struct be_eq_entry *eqe = NULL;
330 struct be_queue_info *eq;
331 struct be_queue_info *cq;
332 unsigned int num_eq_processed;
333 struct be_eq_obj *pbe_eq;
334 unsigned long flags;
336 pbe_eq = dev_id;
337 eq = &pbe_eq->q;
338 cq = pbe_eq->cq;
339 eqe = queue_tail_node(eq);
340 if (!eqe)
341 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
343 phba = pbe_eq->phba;
344 num_eq_processed = 0;
345 if (blk_iopoll_enabled) {
346 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
347 & EQE_VALID_MASK) {
348 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
349 blk_iopoll_sched(&pbe_eq->iopoll);
351 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
352 queue_tail_inc(eq);
353 eqe = queue_tail_node(eq);
354 num_eq_processed++;
356 if (num_eq_processed)
357 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
359 return IRQ_HANDLED;
360 } else {
361 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
362 & EQE_VALID_MASK) {
363 spin_lock_irqsave(&phba->isr_lock, flags);
364 phba->todo_cq = 1;
365 spin_unlock_irqrestore(&phba->isr_lock, flags);
366 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
367 queue_tail_inc(eq);
368 eqe = queue_tail_node(eq);
369 num_eq_processed++;
371 if (phba->todo_cq)
372 queue_work(phba->wq, &phba->work_cqs);
374 if (num_eq_processed)
375 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
377 return IRQ_HANDLED;
382 * be_isr - The isr routine of the driver.
383 * @irq: Not used
384 * @dev_id: Pointer to host adapter structure
386 static irqreturn_t be_isr(int irq, void *dev_id)
388 struct beiscsi_hba *phba;
389 struct hwi_controller *phwi_ctrlr;
390 struct hwi_context_memory *phwi_context;
391 struct be_eq_entry *eqe = NULL;
392 struct be_queue_info *eq;
393 struct be_queue_info *cq;
394 struct be_queue_info *mcc;
395 unsigned long flags, index;
396 unsigned int num_mcceq_processed, num_ioeq_processed;
397 struct be_ctrl_info *ctrl;
398 struct be_eq_obj *pbe_eq;
399 int isr;
401 phba = dev_id;
402 ctrl = &phba->ctrl;;
403 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
404 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
405 if (!isr)
406 return IRQ_NONE;
408 phwi_ctrlr = phba->phwi_ctrlr;
409 phwi_context = phwi_ctrlr->phwi_ctxt;
410 pbe_eq = &phwi_context->be_eq[0];
412 eq = &phwi_context->be_eq[0].q;
413 mcc = &phba->ctrl.mcc_obj.cq;
414 index = 0;
415 eqe = queue_tail_node(eq);
416 if (!eqe)
417 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
419 num_ioeq_processed = 0;
420 num_mcceq_processed = 0;
421 if (blk_iopoll_enabled) {
422 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
423 & EQE_VALID_MASK) {
424 if (((eqe->dw[offsetof(struct amap_eq_entry,
425 resource_id) / 32] &
426 EQE_RESID_MASK) >> 16) == mcc->id) {
427 spin_lock_irqsave(&phba->isr_lock, flags);
428 phba->todo_mcc_cq = 1;
429 spin_unlock_irqrestore(&phba->isr_lock, flags);
430 num_mcceq_processed++;
431 } else {
432 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
433 blk_iopoll_sched(&pbe_eq->iopoll);
434 num_ioeq_processed++;
436 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
437 queue_tail_inc(eq);
438 eqe = queue_tail_node(eq);
440 if (num_ioeq_processed || num_mcceq_processed) {
441 if (phba->todo_mcc_cq)
442 queue_work(phba->wq, &phba->work_cqs);
444 if ((num_mcceq_processed) && (!num_ioeq_processed))
445 hwi_ring_eq_db(phba, eq->id, 0,
446 (num_ioeq_processed +
447 num_mcceq_processed) , 1, 1);
448 else
449 hwi_ring_eq_db(phba, eq->id, 0,
450 (num_ioeq_processed +
451 num_mcceq_processed), 0, 1);
453 return IRQ_HANDLED;
454 } else
455 return IRQ_NONE;
456 } else {
457 cq = &phwi_context->be_cq[0];
458 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
459 & EQE_VALID_MASK) {
461 if (((eqe->dw[offsetof(struct amap_eq_entry,
462 resource_id) / 32] &
463 EQE_RESID_MASK) >> 16) != cq->id) {
464 spin_lock_irqsave(&phba->isr_lock, flags);
465 phba->todo_mcc_cq = 1;
466 spin_unlock_irqrestore(&phba->isr_lock, flags);
467 } else {
468 spin_lock_irqsave(&phba->isr_lock, flags);
469 phba->todo_cq = 1;
470 spin_unlock_irqrestore(&phba->isr_lock, flags);
472 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
473 queue_tail_inc(eq);
474 eqe = queue_tail_node(eq);
475 num_ioeq_processed++;
477 if (phba->todo_cq || phba->todo_mcc_cq)
478 queue_work(phba->wq, &phba->work_cqs);
480 if (num_ioeq_processed) {
481 hwi_ring_eq_db(phba, eq->id, 0,
482 num_ioeq_processed, 1, 1);
483 return IRQ_HANDLED;
484 } else
485 return IRQ_NONE;
489 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
491 struct pci_dev *pcidev = phba->pcidev;
492 struct hwi_controller *phwi_ctrlr;
493 struct hwi_context_memory *phwi_context;
494 int ret, msix_vec, i = 0;
495 char desc[32];
497 phwi_ctrlr = phba->phwi_ctrlr;
498 phwi_context = phwi_ctrlr->phwi_ctxt;
500 if (phba->msix_enabled) {
501 for (i = 0; i < phba->num_cpus; i++) {
502 sprintf(desc, "beiscsi_msix_%04x", i);
503 msix_vec = phba->msix_entries[i].vector;
504 ret = request_irq(msix_vec, be_isr_msix, 0, desc,
505 &phwi_context->be_eq[i]);
507 msix_vec = phba->msix_entries[i].vector;
508 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
509 &phwi_context->be_eq[i]);
510 } else {
511 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
512 "beiscsi", phba);
513 if (ret) {
514 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
515 "Failed to register irq\\n");
516 return ret;
519 return 0;
522 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
523 unsigned int id, unsigned int num_processed,
524 unsigned char rearm, unsigned char event)
526 u32 val = 0;
527 val |= id & DB_CQ_RING_ID_MASK;
528 if (rearm)
529 val |= 1 << DB_CQ_REARM_SHIFT;
530 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
531 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
534 static unsigned int
535 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
536 struct beiscsi_hba *phba,
537 unsigned short cid,
538 struct pdu_base *ppdu,
539 unsigned long pdu_len,
540 void *pbuffer, unsigned long buf_len)
542 struct iscsi_conn *conn = beiscsi_conn->conn;
543 struct iscsi_session *session = conn->session;
544 struct iscsi_task *task;
545 struct beiscsi_io_task *io_task;
546 struct iscsi_hdr *login_hdr;
548 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
549 PDUBASE_OPCODE_MASK) {
550 case ISCSI_OP_NOOP_IN:
551 pbuffer = NULL;
552 buf_len = 0;
553 break;
554 case ISCSI_OP_ASYNC_EVENT:
555 break;
556 case ISCSI_OP_REJECT:
557 WARN_ON(!pbuffer);
558 WARN_ON(!(buf_len == 48));
559 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
560 break;
561 case ISCSI_OP_LOGIN_RSP:
562 case ISCSI_OP_TEXT_RSP:
563 task = conn->login_task;
564 io_task = task->dd_data;
565 login_hdr = (struct iscsi_hdr *)ppdu;
566 login_hdr->itt = io_task->libiscsi_itt;
567 break;
568 default:
569 shost_printk(KERN_WARNING, phba->shost,
570 "Unrecognized opcode 0x%x in async msg \n",
571 (ppdu->
572 dw[offsetof(struct amap_pdu_base, opcode) / 32]
573 & PDUBASE_OPCODE_MASK));
574 return 1;
577 spin_lock_bh(&session->lock);
578 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
579 spin_unlock_bh(&session->lock);
580 return 0;
583 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
585 struct sgl_handle *psgl_handle;
587 if (phba->io_sgl_hndl_avbl) {
588 SE_DEBUG(DBG_LVL_8,
589 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
590 phba->io_sgl_alloc_index);
591 psgl_handle = phba->io_sgl_hndl_base[phba->
592 io_sgl_alloc_index];
593 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
594 phba->io_sgl_hndl_avbl--;
595 if (phba->io_sgl_alloc_index == (phba->params.
596 ios_per_ctrl - 1))
597 phba->io_sgl_alloc_index = 0;
598 else
599 phba->io_sgl_alloc_index++;
600 } else
601 psgl_handle = NULL;
602 return psgl_handle;
605 static void
606 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
608 SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
609 phba->io_sgl_free_index);
610 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
612 * this can happen if clean_task is called on a task that
613 * failed in xmit_task or alloc_pdu.
615 SE_DEBUG(DBG_LVL_8,
616 "Double Free in IO SGL io_sgl_free_index=%d,"
617 "value there=%p \n", phba->io_sgl_free_index,
618 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
619 return;
621 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
622 phba->io_sgl_hndl_avbl++;
623 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
624 phba->io_sgl_free_index = 0;
625 else
626 phba->io_sgl_free_index++;
630 * alloc_wrb_handle - To allocate a wrb handle
631 * @phba: The hba pointer
632 * @cid: The cid to use for allocation
634 * This happens under session_lock until submission to chip
636 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
638 struct hwi_wrb_context *pwrb_context;
639 struct hwi_controller *phwi_ctrlr;
640 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
642 phwi_ctrlr = phba->phwi_ctrlr;
643 pwrb_context = &phwi_ctrlr->wrb_context[cid];
644 if (pwrb_context->wrb_handles_available >= 2) {
645 pwrb_handle = pwrb_context->pwrb_handle_base[
646 pwrb_context->alloc_index];
647 pwrb_context->wrb_handles_available--;
648 if (pwrb_context->alloc_index ==
649 (phba->params.wrbs_per_cxn - 1))
650 pwrb_context->alloc_index = 0;
651 else
652 pwrb_context->alloc_index++;
653 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
654 pwrb_context->alloc_index];
655 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
656 } else
657 pwrb_handle = NULL;
658 return pwrb_handle;
662 * free_wrb_handle - To free the wrb handle back to pool
663 * @phba: The hba pointer
664 * @pwrb_context: The context to free from
665 * @pwrb_handle: The wrb_handle to free
667 * This happens under session_lock until submission to chip
669 static void
670 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
671 struct wrb_handle *pwrb_handle)
673 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
674 pwrb_context->wrb_handles_available++;
675 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
676 pwrb_context->free_index = 0;
677 else
678 pwrb_context->free_index++;
680 SE_DEBUG(DBG_LVL_8,
681 "FREE WRB: pwrb_handle=%p free_index=0x%x"
682 "wrb_handles_available=%d \n",
683 pwrb_handle, pwrb_context->free_index,
684 pwrb_context->wrb_handles_available);
687 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
689 struct sgl_handle *psgl_handle;
691 if (phba->eh_sgl_hndl_avbl) {
692 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
693 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
694 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
695 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
696 phba->eh_sgl_hndl_avbl--;
697 if (phba->eh_sgl_alloc_index ==
698 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
700 phba->eh_sgl_alloc_index = 0;
701 else
702 phba->eh_sgl_alloc_index++;
703 } else
704 psgl_handle = NULL;
705 return psgl_handle;
708 void
709 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
712 SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
713 phba->eh_sgl_free_index);
714 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
716 * this can happen if clean_task is called on a task that
717 * failed in xmit_task or alloc_pdu.
719 SE_DEBUG(DBG_LVL_8,
720 "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
721 phba->eh_sgl_free_index);
722 return;
724 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
725 phba->eh_sgl_hndl_avbl++;
726 if (phba->eh_sgl_free_index ==
727 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
728 phba->eh_sgl_free_index = 0;
729 else
730 phba->eh_sgl_free_index++;
733 static void
734 be_complete_io(struct beiscsi_conn *beiscsi_conn,
735 struct iscsi_task *task, struct sol_cqe *psol)
737 struct beiscsi_io_task *io_task = task->dd_data;
738 struct be_status_bhs *sts_bhs =
739 (struct be_status_bhs *)io_task->cmd_bhs;
740 struct iscsi_conn *conn = beiscsi_conn->conn;
741 unsigned int sense_len;
742 unsigned char *sense;
743 u32 resid = 0, exp_cmdsn, max_cmdsn;
744 u8 rsp, status, flags;
746 exp_cmdsn = (psol->
747 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
748 & SOL_EXP_CMD_SN_MASK);
749 max_cmdsn = ((psol->
750 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
751 & SOL_EXP_CMD_SN_MASK) +
752 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
753 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
754 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
755 & SOL_RESP_MASK) >> 16);
756 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
757 & SOL_STS_MASK) >> 8);
758 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
759 & SOL_FLAGS_MASK) >> 24) | 0x80;
761 task->sc->result = (DID_OK << 16) | status;
762 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
763 task->sc->result = DID_ERROR << 16;
764 goto unmap;
767 /* bidi not initially supported */
768 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
769 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
770 32] & SOL_RES_CNT_MASK);
772 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
773 task->sc->result = DID_ERROR << 16;
775 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
776 scsi_set_resid(task->sc, resid);
777 if (!status && (scsi_bufflen(task->sc) - resid <
778 task->sc->underflow))
779 task->sc->result = DID_ERROR << 16;
783 if (status == SAM_STAT_CHECK_CONDITION) {
784 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
785 sense = sts_bhs->sense_info + sizeof(unsigned short);
786 sense_len = cpu_to_be16(*slen);
787 memcpy(task->sc->sense_buffer, sense,
788 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
791 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
792 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
793 & SOL_RES_CNT_MASK)
794 conn->rxdata_octets += (psol->
795 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
796 & SOL_RES_CNT_MASK);
798 unmap:
799 scsi_dma_unmap(io_task->scsi_cmnd);
800 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
803 static void
804 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
805 struct iscsi_task *task, struct sol_cqe *psol)
807 struct iscsi_logout_rsp *hdr;
808 struct beiscsi_io_task *io_task = task->dd_data;
809 struct iscsi_conn *conn = beiscsi_conn->conn;
811 hdr = (struct iscsi_logout_rsp *)task->hdr;
812 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
813 hdr->t2wait = 5;
814 hdr->t2retain = 0;
815 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
816 & SOL_FLAGS_MASK) >> 24) | 0x80;
817 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
818 32] & SOL_RESP_MASK);
819 hdr->exp_cmdsn = cpu_to_be32(psol->
820 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
821 & SOL_EXP_CMD_SN_MASK);
822 hdr->max_cmdsn = be32_to_cpu((psol->
823 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
824 & SOL_EXP_CMD_SN_MASK) +
825 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
826 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
827 hdr->dlength[0] = 0;
828 hdr->dlength[1] = 0;
829 hdr->dlength[2] = 0;
830 hdr->hlength = 0;
831 hdr->itt = io_task->libiscsi_itt;
832 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
835 static void
836 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
837 struct iscsi_task *task, struct sol_cqe *psol)
839 struct iscsi_tm_rsp *hdr;
840 struct iscsi_conn *conn = beiscsi_conn->conn;
841 struct beiscsi_io_task *io_task = task->dd_data;
843 hdr = (struct iscsi_tm_rsp *)task->hdr;
844 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
845 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
846 & SOL_FLAGS_MASK) >> 24) | 0x80;
847 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
848 32] & SOL_RESP_MASK);
849 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
850 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
851 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
852 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
853 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
854 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
855 hdr->itt = io_task->libiscsi_itt;
856 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
859 static void
860 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
861 struct beiscsi_hba *phba, struct sol_cqe *psol)
863 struct hwi_wrb_context *pwrb_context;
864 struct wrb_handle *pwrb_handle = NULL;
865 struct hwi_controller *phwi_ctrlr;
866 struct iscsi_task *task;
867 struct beiscsi_io_task *io_task;
868 struct iscsi_conn *conn = beiscsi_conn->conn;
869 struct iscsi_session *session = conn->session;
871 phwi_ctrlr = phba->phwi_ctrlr;
872 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
873 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
874 SOL_CID_MASK) >> 6) -
875 phba->fw_config.iscsi_cid_start];
876 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
877 dw[offsetof(struct amap_sol_cqe, wrb_index) /
878 32] & SOL_WRB_INDEX_MASK) >> 16)];
879 task = pwrb_handle->pio_handle;
881 io_task = task->dd_data;
882 spin_lock(&phba->mgmt_sgl_lock);
883 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
884 spin_unlock(&phba->mgmt_sgl_lock);
885 spin_lock_bh(&session->lock);
886 free_wrb_handle(phba, pwrb_context, pwrb_handle);
887 spin_unlock_bh(&session->lock);
890 static void
891 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
892 struct iscsi_task *task, struct sol_cqe *psol)
894 struct iscsi_nopin *hdr;
895 struct iscsi_conn *conn = beiscsi_conn->conn;
896 struct beiscsi_io_task *io_task = task->dd_data;
898 hdr = (struct iscsi_nopin *)task->hdr;
899 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
900 & SOL_FLAGS_MASK) >> 24) | 0x80;
901 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
902 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
903 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
904 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
905 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
906 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
907 hdr->opcode = ISCSI_OP_NOOP_IN;
908 hdr->itt = io_task->libiscsi_itt;
909 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
912 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
913 struct beiscsi_hba *phba, struct sol_cqe *psol)
915 struct hwi_wrb_context *pwrb_context;
916 struct wrb_handle *pwrb_handle;
917 struct iscsi_wrb *pwrb = NULL;
918 struct hwi_controller *phwi_ctrlr;
919 struct iscsi_task *task;
920 unsigned int type;
921 struct iscsi_conn *conn = beiscsi_conn->conn;
922 struct iscsi_session *session = conn->session;
924 phwi_ctrlr = phba->phwi_ctrlr;
925 pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
926 (struct amap_sol_cqe, cid) / 32]
927 & SOL_CID_MASK) >> 6) -
928 phba->fw_config.iscsi_cid_start];
929 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
930 dw[offsetof(struct amap_sol_cqe, wrb_index) /
931 32] & SOL_WRB_INDEX_MASK) >> 16)];
932 task = pwrb_handle->pio_handle;
933 pwrb = pwrb_handle->pwrb;
934 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
935 WRB_TYPE_MASK) >> 28;
937 spin_lock_bh(&session->lock);
938 switch (type) {
939 case HWH_TYPE_IO:
940 case HWH_TYPE_IO_RD:
941 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
942 ISCSI_OP_NOOP_OUT) {
943 be_complete_nopin_resp(beiscsi_conn, task, psol);
944 } else
945 be_complete_io(beiscsi_conn, task, psol);
946 break;
948 case HWH_TYPE_LOGOUT:
949 be_complete_logout(beiscsi_conn, task, psol);
950 break;
952 case HWH_TYPE_LOGIN:
953 SE_DEBUG(DBG_LVL_1,
954 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
955 "- Solicited path \n");
956 break;
958 case HWH_TYPE_TMF:
959 be_complete_tmf(beiscsi_conn, task, psol);
960 break;
962 case HWH_TYPE_NOP:
963 be_complete_nopin_resp(beiscsi_conn, task, psol);
964 break;
966 default:
967 shost_printk(KERN_WARNING, phba->shost,
968 "In hwi_complete_cmd, unknown type = %d"
969 "wrb_index 0x%x CID 0x%x\n", type,
970 ((psol->dw[offsetof(struct amap_iscsi_wrb,
971 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
972 ((psol->dw[offsetof(struct amap_sol_cqe,
973 cid) / 32] & SOL_CID_MASK) >> 6));
974 break;
977 spin_unlock_bh(&session->lock);
980 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
981 *pasync_ctx, unsigned int is_header,
982 unsigned int host_write_ptr)
984 if (is_header)
985 return &pasync_ctx->async_entry[host_write_ptr].
986 header_busy_list;
987 else
988 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
991 static struct async_pdu_handle *
992 hwi_get_async_handle(struct beiscsi_hba *phba,
993 struct beiscsi_conn *beiscsi_conn,
994 struct hwi_async_pdu_context *pasync_ctx,
995 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
997 struct be_bus_address phys_addr;
998 struct list_head *pbusy_list;
999 struct async_pdu_handle *pasync_handle = NULL;
1000 int buffer_len = 0;
1001 unsigned char buffer_index = -1;
1002 unsigned char is_header = 0;
1004 phys_addr.u.a32.address_lo =
1005 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1006 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1007 & PDUCQE_DPL_MASK) >> 16);
1008 phys_addr.u.a32.address_hi =
1009 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1011 phys_addr.u.a64.address =
1012 *((unsigned long long *)(&phys_addr.u.a64.address));
1014 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1015 & PDUCQE_CODE_MASK) {
1016 case UNSOL_HDR_NOTIFY:
1017 is_header = 1;
1019 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1020 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1021 index) / 32] & PDUCQE_INDEX_MASK));
1023 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1024 pasync_ctx->async_header.pa_base.u.a64.address);
1026 buffer_index = buffer_len /
1027 pasync_ctx->async_header.buffer_size;
1029 break;
1030 case UNSOL_DATA_NOTIFY:
1031 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1032 dw[offsetof(struct amap_i_t_dpdu_cqe,
1033 index) / 32] & PDUCQE_INDEX_MASK));
1034 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1035 pasync_ctx->async_data.pa_base.u.
1036 a64.address);
1037 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1038 break;
1039 default:
1040 pbusy_list = NULL;
1041 shost_printk(KERN_WARNING, phba->shost,
1042 "Unexpected code=%d \n",
1043 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1044 code) / 32] & PDUCQE_CODE_MASK);
1045 return NULL;
1048 WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1049 WARN_ON(list_empty(pbusy_list));
1050 list_for_each_entry(pasync_handle, pbusy_list, link) {
1051 WARN_ON(pasync_handle->consumed);
1052 if (pasync_handle->index == buffer_index)
1053 break;
1056 WARN_ON(!pasync_handle);
1058 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1059 phba->fw_config.iscsi_cid_start;
1060 pasync_handle->is_header = is_header;
1061 pasync_handle->buffer_len = ((pdpdu_cqe->
1062 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1063 & PDUCQE_DPL_MASK) >> 16);
1065 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1066 index) / 32] & PDUCQE_INDEX_MASK);
1067 return pasync_handle;
1070 static unsigned int
1071 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1072 unsigned int is_header, unsigned int cq_index)
1074 struct list_head *pbusy_list;
1075 struct async_pdu_handle *pasync_handle;
1076 unsigned int num_entries, writables = 0;
1077 unsigned int *pep_read_ptr, *pwritables;
1080 if (is_header) {
1081 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1082 pwritables = &pasync_ctx->async_header.writables;
1083 num_entries = pasync_ctx->async_header.num_entries;
1084 } else {
1085 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1086 pwritables = &pasync_ctx->async_data.writables;
1087 num_entries = pasync_ctx->async_data.num_entries;
1090 while ((*pep_read_ptr) != cq_index) {
1091 (*pep_read_ptr)++;
1092 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1094 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1095 *pep_read_ptr);
1096 if (writables == 0)
1097 WARN_ON(list_empty(pbusy_list));
1099 if (!list_empty(pbusy_list)) {
1100 pasync_handle = list_entry(pbusy_list->next,
1101 struct async_pdu_handle,
1102 link);
1103 WARN_ON(!pasync_handle);
1104 pasync_handle->consumed = 1;
1107 writables++;
1110 if (!writables) {
1111 SE_DEBUG(DBG_LVL_1,
1112 "Duplicate notification received - index 0x%x!!\n",
1113 cq_index);
1114 WARN_ON(1);
1117 *pwritables = *pwritables + writables;
1118 return 0;
1121 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1122 unsigned int cri)
1124 struct hwi_controller *phwi_ctrlr;
1125 struct hwi_async_pdu_context *pasync_ctx;
1126 struct async_pdu_handle *pasync_handle, *tmp_handle;
1127 struct list_head *plist;
1128 unsigned int i = 0;
1130 phwi_ctrlr = phba->phwi_ctrlr;
1131 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1133 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1135 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1136 list_del(&pasync_handle->link);
1138 if (i == 0) {
1139 list_add_tail(&pasync_handle->link,
1140 &pasync_ctx->async_header.free_list);
1141 pasync_ctx->async_header.free_entries++;
1142 i++;
1143 } else {
1144 list_add_tail(&pasync_handle->link,
1145 &pasync_ctx->async_data.free_list);
1146 pasync_ctx->async_data.free_entries++;
1147 i++;
1151 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1152 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1153 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1154 return 0;
1157 static struct phys_addr *
1158 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1159 unsigned int is_header, unsigned int host_write_ptr)
1161 struct phys_addr *pasync_sge = NULL;
1163 if (is_header)
1164 pasync_sge = pasync_ctx->async_header.ring_base;
1165 else
1166 pasync_sge = pasync_ctx->async_data.ring_base;
1168 return pasync_sge + host_write_ptr;
1171 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1172 unsigned int is_header)
1174 struct hwi_controller *phwi_ctrlr;
1175 struct hwi_async_pdu_context *pasync_ctx;
1176 struct async_pdu_handle *pasync_handle;
1177 struct list_head *pfree_link, *pbusy_list;
1178 struct phys_addr *pasync_sge;
1179 unsigned int ring_id, num_entries;
1180 unsigned int host_write_num;
1181 unsigned int writables;
1182 unsigned int i = 0;
1183 u32 doorbell = 0;
1185 phwi_ctrlr = phba->phwi_ctrlr;
1186 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1188 if (is_header) {
1189 num_entries = pasync_ctx->async_header.num_entries;
1190 writables = min(pasync_ctx->async_header.writables,
1191 pasync_ctx->async_header.free_entries);
1192 pfree_link = pasync_ctx->async_header.free_list.next;
1193 host_write_num = pasync_ctx->async_header.host_write_ptr;
1194 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1195 } else {
1196 num_entries = pasync_ctx->async_data.num_entries;
1197 writables = min(pasync_ctx->async_data.writables,
1198 pasync_ctx->async_data.free_entries);
1199 pfree_link = pasync_ctx->async_data.free_list.next;
1200 host_write_num = pasync_ctx->async_data.host_write_ptr;
1201 ring_id = phwi_ctrlr->default_pdu_data.id;
1204 writables = (writables / 8) * 8;
1205 if (writables) {
1206 for (i = 0; i < writables; i++) {
1207 pbusy_list =
1208 hwi_get_async_busy_list(pasync_ctx, is_header,
1209 host_write_num);
1210 pasync_handle =
1211 list_entry(pfree_link, struct async_pdu_handle,
1212 link);
1213 WARN_ON(!pasync_handle);
1214 pasync_handle->consumed = 0;
1216 pfree_link = pfree_link->next;
1218 pasync_sge = hwi_get_ring_address(pasync_ctx,
1219 is_header, host_write_num);
1221 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1222 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1224 list_move(&pasync_handle->link, pbusy_list);
1226 host_write_num++;
1227 host_write_num = host_write_num % num_entries;
1230 if (is_header) {
1231 pasync_ctx->async_header.host_write_ptr =
1232 host_write_num;
1233 pasync_ctx->async_header.free_entries -= writables;
1234 pasync_ctx->async_header.writables -= writables;
1235 pasync_ctx->async_header.busy_entries += writables;
1236 } else {
1237 pasync_ctx->async_data.host_write_ptr = host_write_num;
1238 pasync_ctx->async_data.free_entries -= writables;
1239 pasync_ctx->async_data.writables -= writables;
1240 pasync_ctx->async_data.busy_entries += writables;
1243 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1244 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1245 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1246 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1247 << DB_DEF_PDU_CQPROC_SHIFT;
1249 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1253 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1254 struct beiscsi_conn *beiscsi_conn,
1255 struct i_t_dpdu_cqe *pdpdu_cqe)
1257 struct hwi_controller *phwi_ctrlr;
1258 struct hwi_async_pdu_context *pasync_ctx;
1259 struct async_pdu_handle *pasync_handle = NULL;
1260 unsigned int cq_index = -1;
1262 phwi_ctrlr = phba->phwi_ctrlr;
1263 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1265 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1266 pdpdu_cqe, &cq_index);
1267 BUG_ON(pasync_handle->is_header != 0);
1268 if (pasync_handle->consumed == 0)
1269 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1270 cq_index);
1272 hwi_free_async_msg(phba, pasync_handle->cri);
1273 hwi_post_async_buffers(phba, pasync_handle->is_header);
1276 static unsigned int
1277 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1278 struct beiscsi_hba *phba,
1279 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1281 struct list_head *plist;
1282 struct async_pdu_handle *pasync_handle;
1283 void *phdr = NULL;
1284 unsigned int hdr_len = 0, buf_len = 0;
1285 unsigned int status, index = 0, offset = 0;
1286 void *pfirst_buffer = NULL;
1287 unsigned int num_buf = 0;
1289 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1291 list_for_each_entry(pasync_handle, plist, link) {
1292 if (index == 0) {
1293 phdr = pasync_handle->pbuffer;
1294 hdr_len = pasync_handle->buffer_len;
1295 } else {
1296 buf_len = pasync_handle->buffer_len;
1297 if (!num_buf) {
1298 pfirst_buffer = pasync_handle->pbuffer;
1299 num_buf++;
1301 memcpy(pfirst_buffer + offset,
1302 pasync_handle->pbuffer, buf_len);
1303 offset = buf_len;
1305 index++;
1308 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1309 (beiscsi_conn->beiscsi_conn_cid -
1310 phba->fw_config.iscsi_cid_start),
1311 phdr, hdr_len, pfirst_buffer,
1312 buf_len);
1314 if (status == 0)
1315 hwi_free_async_msg(phba, cri);
1316 return 0;
1319 static unsigned int
1320 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1321 struct beiscsi_hba *phba,
1322 struct async_pdu_handle *pasync_handle)
1324 struct hwi_async_pdu_context *pasync_ctx;
1325 struct hwi_controller *phwi_ctrlr;
1326 unsigned int bytes_needed = 0, status = 0;
1327 unsigned short cri = pasync_handle->cri;
1328 struct pdu_base *ppdu;
1330 phwi_ctrlr = phba->phwi_ctrlr;
1331 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1333 list_del(&pasync_handle->link);
1334 if (pasync_handle->is_header) {
1335 pasync_ctx->async_header.busy_entries--;
1336 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1337 hwi_free_async_msg(phba, cri);
1338 BUG();
1341 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1342 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1343 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1344 (unsigned short)pasync_handle->buffer_len;
1345 list_add_tail(&pasync_handle->link,
1346 &pasync_ctx->async_entry[cri].wait_queue.list);
1348 ppdu = pasync_handle->pbuffer;
1349 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1350 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1351 0xFFFF0000) | ((be16_to_cpu((ppdu->
1352 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1353 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1355 if (status == 0) {
1356 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1357 bytes_needed;
1359 if (bytes_needed == 0)
1360 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1361 pasync_ctx, cri);
1363 } else {
1364 pasync_ctx->async_data.busy_entries--;
1365 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1366 list_add_tail(&pasync_handle->link,
1367 &pasync_ctx->async_entry[cri].wait_queue.
1368 list);
1369 pasync_ctx->async_entry[cri].wait_queue.
1370 bytes_received +=
1371 (unsigned short)pasync_handle->buffer_len;
1373 if (pasync_ctx->async_entry[cri].wait_queue.
1374 bytes_received >=
1375 pasync_ctx->async_entry[cri].wait_queue.
1376 bytes_needed)
1377 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1378 pasync_ctx, cri);
1381 return status;
1384 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1385 struct beiscsi_hba *phba,
1386 struct i_t_dpdu_cqe *pdpdu_cqe)
1388 struct hwi_controller *phwi_ctrlr;
1389 struct hwi_async_pdu_context *pasync_ctx;
1390 struct async_pdu_handle *pasync_handle = NULL;
1391 unsigned int cq_index = -1;
1393 phwi_ctrlr = phba->phwi_ctrlr;
1394 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1395 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1396 pdpdu_cqe, &cq_index);
1398 if (pasync_handle->consumed == 0)
1399 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1400 cq_index);
1401 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1402 hwi_post_async_buffers(phba, pasync_handle->is_header);
1405 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1407 struct be_queue_info *mcc_cq;
1408 struct be_mcc_compl *mcc_compl;
1409 unsigned int num_processed = 0;
1411 mcc_cq = &phba->ctrl.mcc_obj.cq;
1412 mcc_compl = queue_tail_node(mcc_cq);
1413 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1414 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1416 if (num_processed >= 32) {
1417 hwi_ring_cq_db(phba, mcc_cq->id,
1418 num_processed, 0, 0);
1419 num_processed = 0;
1421 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1422 /* Interpret flags as an async trailer */
1423 if (is_link_state_evt(mcc_compl->flags))
1424 /* Interpret compl as a async link evt */
1425 beiscsi_async_link_state_process(phba,
1426 (struct be_async_event_link_state *) mcc_compl);
1427 else
1428 SE_DEBUG(DBG_LVL_1,
1429 " Unsupported Async Event, flags"
1430 " = 0x%08x \n", mcc_compl->flags);
1431 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1432 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1433 atomic_dec(&phba->ctrl.mcc_obj.q.used);
1436 mcc_compl->flags = 0;
1437 queue_tail_inc(mcc_cq);
1438 mcc_compl = queue_tail_node(mcc_cq);
1439 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1440 num_processed++;
1443 if (num_processed > 0)
1444 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1448 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1450 struct be_queue_info *cq;
1451 struct sol_cqe *sol;
1452 struct dmsg_cqe *dmsg;
1453 unsigned int num_processed = 0;
1454 unsigned int tot_nump = 0;
1455 struct beiscsi_conn *beiscsi_conn;
1456 struct beiscsi_endpoint *beiscsi_ep;
1457 struct iscsi_endpoint *ep;
1458 struct beiscsi_hba *phba;
1460 cq = pbe_eq->cq;
1461 sol = queue_tail_node(cq);
1462 phba = pbe_eq->phba;
1464 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1465 CQE_VALID_MASK) {
1466 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1468 ep = phba->ep_array[(u32) ((sol->
1469 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1470 SOL_CID_MASK) >> 6) -
1471 phba->fw_config.iscsi_cid_start];
1473 beiscsi_ep = ep->dd_data;
1474 beiscsi_conn = beiscsi_ep->conn;
1476 if (num_processed >= 32) {
1477 hwi_ring_cq_db(phba, cq->id,
1478 num_processed, 0, 0);
1479 tot_nump += num_processed;
1480 num_processed = 0;
1483 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1484 32] & CQE_CODE_MASK) {
1485 case SOL_CMD_COMPLETE:
1486 hwi_complete_cmd(beiscsi_conn, phba, sol);
1487 break;
1488 case DRIVERMSG_NOTIFY:
1489 SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1490 dmsg = (struct dmsg_cqe *)sol;
1491 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1492 break;
1493 case UNSOL_HDR_NOTIFY:
1494 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1495 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1496 (struct i_t_dpdu_cqe *)sol);
1497 break;
1498 case UNSOL_DATA_NOTIFY:
1499 SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1500 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1501 (struct i_t_dpdu_cqe *)sol);
1502 break;
1503 case CXN_INVALIDATE_INDEX_NOTIFY:
1504 case CMD_INVALIDATED_NOTIFY:
1505 case CXN_INVALIDATE_NOTIFY:
1506 SE_DEBUG(DBG_LVL_1,
1507 "Ignoring CQ Error notification for cmd/cxn"
1508 "invalidate\n");
1509 break;
1510 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1511 case CMD_KILLED_INVALID_STATSN_RCVD:
1512 case CMD_KILLED_INVALID_R2T_RCVD:
1513 case CMD_CXN_KILLED_LUN_INVALID:
1514 case CMD_CXN_KILLED_ICD_INVALID:
1515 case CMD_CXN_KILLED_ITT_INVALID:
1516 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1517 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1518 SE_DEBUG(DBG_LVL_1,
1519 "CQ Error notification for cmd.. "
1520 "code %d cid 0x%x\n",
1521 sol->dw[offsetof(struct amap_sol_cqe, code) /
1522 32] & CQE_CODE_MASK,
1523 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1524 32] & SOL_CID_MASK));
1525 break;
1526 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1527 SE_DEBUG(DBG_LVL_1,
1528 "Digest error on def pdu ring, dropping..\n");
1529 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1530 (struct i_t_dpdu_cqe *) sol);
1531 break;
1532 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1533 case CXN_KILLED_BURST_LEN_MISMATCH:
1534 case CXN_KILLED_AHS_RCVD:
1535 case CXN_KILLED_HDR_DIGEST_ERR:
1536 case CXN_KILLED_UNKNOWN_HDR:
1537 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1538 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1539 case CXN_KILLED_TIMED_OUT:
1540 case CXN_KILLED_FIN_RCVD:
1541 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1542 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1543 case CXN_KILLED_OVER_RUN_RESIDUAL:
1544 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1545 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1546 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1547 "0x%x...\n",
1548 sol->dw[offsetof(struct amap_sol_cqe, code) /
1549 32] & CQE_CODE_MASK,
1550 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1551 32] & CQE_CID_MASK));
1552 iscsi_conn_failure(beiscsi_conn->conn,
1553 ISCSI_ERR_CONN_FAILED);
1554 break;
1555 case CXN_KILLED_RST_SENT:
1556 case CXN_KILLED_RST_RCVD:
1557 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1558 "received/sent on CID 0x%x...\n",
1559 sol->dw[offsetof(struct amap_sol_cqe, code) /
1560 32] & CQE_CODE_MASK,
1561 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1562 32] & CQE_CID_MASK));
1563 iscsi_conn_failure(beiscsi_conn->conn,
1564 ISCSI_ERR_CONN_FAILED);
1565 break;
1566 default:
1567 SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1568 "received on CID 0x%x...\n",
1569 sol->dw[offsetof(struct amap_sol_cqe, code) /
1570 32] & CQE_CODE_MASK,
1571 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1572 32] & CQE_CID_MASK));
1573 break;
1576 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1577 queue_tail_inc(cq);
1578 sol = queue_tail_node(cq);
1579 num_processed++;
1582 if (num_processed > 0) {
1583 tot_nump += num_processed;
1584 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1586 return tot_nump;
1589 void beiscsi_process_all_cqs(struct work_struct *work)
1591 unsigned long flags;
1592 struct hwi_controller *phwi_ctrlr;
1593 struct hwi_context_memory *phwi_context;
1594 struct be_eq_obj *pbe_eq;
1595 struct beiscsi_hba *phba =
1596 container_of(work, struct beiscsi_hba, work_cqs);
1598 phwi_ctrlr = phba->phwi_ctrlr;
1599 phwi_context = phwi_ctrlr->phwi_ctxt;
1600 if (phba->msix_enabled)
1601 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1602 else
1603 pbe_eq = &phwi_context->be_eq[0];
1605 if (phba->todo_mcc_cq) {
1606 spin_lock_irqsave(&phba->isr_lock, flags);
1607 phba->todo_mcc_cq = 0;
1608 spin_unlock_irqrestore(&phba->isr_lock, flags);
1609 beiscsi_process_mcc_isr(phba);
1612 if (phba->todo_cq) {
1613 spin_lock_irqsave(&phba->isr_lock, flags);
1614 phba->todo_cq = 0;
1615 spin_unlock_irqrestore(&phba->isr_lock, flags);
1616 beiscsi_process_cq(pbe_eq);
1620 static int be_iopoll(struct blk_iopoll *iop, int budget)
1622 static unsigned int ret;
1623 struct beiscsi_hba *phba;
1624 struct be_eq_obj *pbe_eq;
1626 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1627 ret = beiscsi_process_cq(pbe_eq);
1628 if (ret < budget) {
1629 phba = pbe_eq->phba;
1630 blk_iopoll_complete(iop);
1631 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1632 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1634 return ret;
1637 static void
1638 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1639 unsigned int num_sg, struct beiscsi_io_task *io_task)
1641 struct iscsi_sge *psgl;
1642 unsigned short sg_len, index;
1643 unsigned int sge_len = 0;
1644 unsigned long long addr;
1645 struct scatterlist *l_sg;
1646 unsigned int offset;
1648 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1649 io_task->bhs_pa.u.a32.address_lo);
1650 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1651 io_task->bhs_pa.u.a32.address_hi);
1653 l_sg = sg;
1654 for (index = 0; (index < num_sg) && (index < 2); index++,
1655 sg = sg_next(sg)) {
1656 if (index == 0) {
1657 sg_len = sg_dma_len(sg);
1658 addr = (u64) sg_dma_address(sg);
1659 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1660 (addr & 0xFFFFFFFF));
1661 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1662 (addr >> 32));
1663 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1664 sg_len);
1665 sge_len = sg_len;
1666 } else {
1667 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1668 pwrb, sge_len);
1669 sg_len = sg_dma_len(sg);
1670 addr = (u64) sg_dma_address(sg);
1671 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1672 (addr & 0xFFFFFFFF));
1673 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1674 (addr >> 32));
1675 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1676 sg_len);
1679 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1680 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1682 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1684 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1685 io_task->bhs_pa.u.a32.address_hi);
1686 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1687 io_task->bhs_pa.u.a32.address_lo);
1689 if (num_sg == 1) {
1690 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1692 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1694 } else if (num_sg == 2) {
1695 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1697 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1699 } else {
1700 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1702 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1705 sg = l_sg;
1706 psgl++;
1707 psgl++;
1708 offset = 0;
1709 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
1710 sg_len = sg_dma_len(sg);
1711 addr = (u64) sg_dma_address(sg);
1712 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1713 (addr & 0xFFFFFFFF));
1714 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1715 (addr >> 32));
1716 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1717 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1718 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1719 offset += sg_len;
1721 psgl--;
1722 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1725 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1727 struct iscsi_sge *psgl;
1728 unsigned long long addr;
1729 struct beiscsi_io_task *io_task = task->dd_data;
1730 struct beiscsi_conn *beiscsi_conn = io_task->conn;
1731 struct beiscsi_hba *phba = beiscsi_conn->phba;
1733 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1734 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1735 io_task->bhs_pa.u.a32.address_lo);
1736 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1737 io_task->bhs_pa.u.a32.address_hi);
1739 if (task->data) {
1740 if (task->data_count) {
1741 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1742 addr = (u64) pci_map_single(phba->pcidev,
1743 task->data,
1744 task->data_count, 1);
1745 } else {
1746 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1747 addr = 0;
1749 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1750 (addr & 0xFFFFFFFF));
1751 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1752 (addr >> 32));
1753 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1754 task->data_count);
1756 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1757 } else {
1758 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1759 addr = 0;
1762 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1764 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1766 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1767 io_task->bhs_pa.u.a32.address_hi);
1768 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1769 io_task->bhs_pa.u.a32.address_lo);
1770 if (task->data) {
1771 psgl++;
1772 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1773 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1774 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1775 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1776 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1777 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1779 psgl++;
1780 if (task->data) {
1781 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1782 (addr & 0xFFFFFFFF));
1783 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1784 (addr >> 32));
1786 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1788 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1791 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1793 unsigned int num_cq_pages, num_async_pdu_buf_pages;
1794 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1795 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1797 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1798 sizeof(struct sol_cqe));
1799 num_async_pdu_buf_pages =
1800 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1801 phba->params.defpdu_hdr_sz);
1802 num_async_pdu_buf_sgl_pages =
1803 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1804 sizeof(struct phys_addr));
1805 num_async_pdu_data_pages =
1806 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1807 phba->params.defpdu_data_sz);
1808 num_async_pdu_data_sgl_pages =
1809 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1810 sizeof(struct phys_addr));
1812 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1814 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1815 BE_ISCSI_PDU_HEADER_SIZE;
1816 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1817 sizeof(struct hwi_context_memory);
1820 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1821 * (phba->params.wrbs_per_cxn)
1822 * phba->params.cxns_per_ctrl;
1823 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
1824 (phba->params.wrbs_per_cxn);
1825 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1826 phba->params.cxns_per_ctrl);
1828 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1829 phba->params.icds_per_ctrl;
1830 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1831 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1833 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1834 num_async_pdu_buf_pages * PAGE_SIZE;
1835 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1836 num_async_pdu_data_pages * PAGE_SIZE;
1837 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1838 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1839 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1840 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1841 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1842 phba->params.asyncpdus_per_ctrl *
1843 sizeof(struct async_pdu_handle);
1844 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1845 phba->params.asyncpdus_per_ctrl *
1846 sizeof(struct async_pdu_handle);
1847 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1848 sizeof(struct hwi_async_pdu_context) +
1849 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1852 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1854 struct be_mem_descriptor *mem_descr;
1855 dma_addr_t bus_add;
1856 struct mem_array *mem_arr, *mem_arr_orig;
1857 unsigned int i, j, alloc_size, curr_alloc_size;
1859 phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1860 if (!phba->phwi_ctrlr)
1861 return -ENOMEM;
1863 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1864 GFP_KERNEL);
1865 if (!phba->init_mem) {
1866 kfree(phba->phwi_ctrlr);
1867 return -ENOMEM;
1870 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1871 GFP_KERNEL);
1872 if (!mem_arr_orig) {
1873 kfree(phba->init_mem);
1874 kfree(phba->phwi_ctrlr);
1875 return -ENOMEM;
1878 mem_descr = phba->init_mem;
1879 for (i = 0; i < SE_MEM_MAX; i++) {
1880 j = 0;
1881 mem_arr = mem_arr_orig;
1882 alloc_size = phba->mem_req[i];
1883 memset(mem_arr, 0, sizeof(struct mem_array) *
1884 BEISCSI_MAX_FRAGS_INIT);
1885 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
1886 do {
1887 mem_arr->virtual_address = pci_alloc_consistent(
1888 phba->pcidev,
1889 curr_alloc_size,
1890 &bus_add);
1891 if (!mem_arr->virtual_address) {
1892 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
1893 goto free_mem;
1894 if (curr_alloc_size -
1895 rounddown_pow_of_two(curr_alloc_size))
1896 curr_alloc_size = rounddown_pow_of_two
1897 (curr_alloc_size);
1898 else
1899 curr_alloc_size = curr_alloc_size / 2;
1900 } else {
1901 mem_arr->bus_address.u.
1902 a64.address = (__u64) bus_add;
1903 mem_arr->size = curr_alloc_size;
1904 alloc_size -= curr_alloc_size;
1905 curr_alloc_size = min(be_max_phys_size *
1906 1024, alloc_size);
1907 j++;
1908 mem_arr++;
1910 } while (alloc_size);
1911 mem_descr->num_elements = j;
1912 mem_descr->size_in_bytes = phba->mem_req[i];
1913 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
1914 GFP_KERNEL);
1915 if (!mem_descr->mem_array)
1916 goto free_mem;
1918 memcpy(mem_descr->mem_array, mem_arr_orig,
1919 sizeof(struct mem_array) * j);
1920 mem_descr++;
1922 kfree(mem_arr_orig);
1923 return 0;
1924 free_mem:
1925 mem_descr->num_elements = j;
1926 while ((i) || (j)) {
1927 for (j = mem_descr->num_elements; j > 0; j--) {
1928 pci_free_consistent(phba->pcidev,
1929 mem_descr->mem_array[j - 1].size,
1930 mem_descr->mem_array[j - 1].
1931 virtual_address,
1932 mem_descr->mem_array[j - 1].
1933 bus_address.u.a64.address);
1935 if (i) {
1936 i--;
1937 kfree(mem_descr->mem_array);
1938 mem_descr--;
1941 kfree(mem_arr_orig);
1942 kfree(phba->init_mem);
1943 kfree(phba->phwi_ctrlr);
1944 return -ENOMEM;
1947 static int beiscsi_get_memory(struct beiscsi_hba *phba)
1949 beiscsi_find_mem_req(phba);
1950 return beiscsi_alloc_mem(phba);
1953 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
1955 struct pdu_data_out *pdata_out;
1956 struct pdu_nop_out *pnop_out;
1957 struct be_mem_descriptor *mem_descr;
1959 mem_descr = phba->init_mem;
1960 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
1961 pdata_out =
1962 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
1963 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1965 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
1966 IIOC_SCSI_DATA);
1968 pnop_out =
1969 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
1970 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
1972 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1973 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
1974 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
1975 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
1978 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1980 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
1981 struct wrb_handle *pwrb_handle;
1982 struct hwi_controller *phwi_ctrlr;
1983 struct hwi_wrb_context *pwrb_context;
1984 struct iscsi_wrb *pwrb;
1985 unsigned int num_cxn_wrbh;
1986 unsigned int num_cxn_wrb, j, idx, index;
1988 mem_descr_wrbh = phba->init_mem;
1989 mem_descr_wrbh += HWI_MEM_WRBH;
1991 mem_descr_wrb = phba->init_mem;
1992 mem_descr_wrb += HWI_MEM_WRB;
1994 idx = 0;
1995 pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
1996 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
1997 ((sizeof(struct wrb_handle)) *
1998 phba->params.wrbs_per_cxn));
1999 phwi_ctrlr = phba->phwi_ctrlr;
2001 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2002 pwrb_context = &phwi_ctrlr->wrb_context[index];
2003 pwrb_context->pwrb_handle_base =
2004 kzalloc(sizeof(struct wrb_handle *) *
2005 phba->params.wrbs_per_cxn, GFP_KERNEL);
2006 pwrb_context->pwrb_handle_basestd =
2007 kzalloc(sizeof(struct wrb_handle *) *
2008 phba->params.wrbs_per_cxn, GFP_KERNEL);
2009 if (num_cxn_wrbh) {
2010 pwrb_context->alloc_index = 0;
2011 pwrb_context->wrb_handles_available = 0;
2012 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2013 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2014 pwrb_context->pwrb_handle_basestd[j] =
2015 pwrb_handle;
2016 pwrb_context->wrb_handles_available++;
2017 pwrb_handle->wrb_index = j;
2018 pwrb_handle++;
2020 pwrb_context->free_index = 0;
2021 num_cxn_wrbh--;
2022 } else {
2023 idx++;
2024 pwrb_handle =
2025 mem_descr_wrbh->mem_array[idx].virtual_address;
2026 num_cxn_wrbh =
2027 ((mem_descr_wrbh->mem_array[idx].size) /
2028 ((sizeof(struct wrb_handle)) *
2029 phba->params.wrbs_per_cxn));
2030 pwrb_context->alloc_index = 0;
2031 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2032 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2033 pwrb_context->pwrb_handle_basestd[j] =
2034 pwrb_handle;
2035 pwrb_context->wrb_handles_available++;
2036 pwrb_handle->wrb_index = j;
2037 pwrb_handle++;
2039 pwrb_context->free_index = 0;
2040 num_cxn_wrbh--;
2043 idx = 0;
2044 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2045 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2046 ((sizeof(struct iscsi_wrb) *
2047 phba->params.wrbs_per_cxn));
2048 for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
2049 pwrb_context = &phwi_ctrlr->wrb_context[index];
2050 if (num_cxn_wrb) {
2051 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2052 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2053 pwrb_handle->pwrb = pwrb;
2054 pwrb++;
2056 num_cxn_wrb--;
2057 } else {
2058 idx++;
2059 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2060 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2061 ((sizeof(struct iscsi_wrb) *
2062 phba->params.wrbs_per_cxn));
2063 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2064 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2065 pwrb_handle->pwrb = pwrb;
2066 pwrb++;
2068 num_cxn_wrb--;
2073 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2075 struct hwi_controller *phwi_ctrlr;
2076 struct hba_parameters *p = &phba->params;
2077 struct hwi_async_pdu_context *pasync_ctx;
2078 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2079 unsigned int index;
2080 struct be_mem_descriptor *mem_descr;
2082 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2083 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2085 phwi_ctrlr = phba->phwi_ctrlr;
2086 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2087 mem_descr->mem_array[0].virtual_address;
2088 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2089 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2091 pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2092 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2093 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2094 pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2096 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2097 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2098 if (mem_descr->mem_array[0].virtual_address) {
2099 SE_DEBUG(DBG_LVL_8,
2100 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2101 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2102 } else
2103 shost_printk(KERN_WARNING, phba->shost,
2104 "No Virtual address \n");
2106 pasync_ctx->async_header.va_base =
2107 mem_descr->mem_array[0].virtual_address;
2109 pasync_ctx->async_header.pa_base.u.a64.address =
2110 mem_descr->mem_array[0].bus_address.u.a64.address;
2112 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2113 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2114 if (mem_descr->mem_array[0].virtual_address) {
2115 SE_DEBUG(DBG_LVL_8,
2116 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2117 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2118 } else
2119 shost_printk(KERN_WARNING, phba->shost,
2120 "No Virtual address \n");
2121 pasync_ctx->async_header.ring_base =
2122 mem_descr->mem_array[0].virtual_address;
2124 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2125 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2126 if (mem_descr->mem_array[0].virtual_address) {
2127 SE_DEBUG(DBG_LVL_8,
2128 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2129 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2130 } else
2131 shost_printk(KERN_WARNING, phba->shost,
2132 "No Virtual address \n");
2134 pasync_ctx->async_header.handle_base =
2135 mem_descr->mem_array[0].virtual_address;
2136 pasync_ctx->async_header.writables = 0;
2137 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2139 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2140 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2141 if (mem_descr->mem_array[0].virtual_address) {
2142 SE_DEBUG(DBG_LVL_8,
2143 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2144 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2145 } else
2146 shost_printk(KERN_WARNING, phba->shost,
2147 "No Virtual address \n");
2148 pasync_ctx->async_data.va_base =
2149 mem_descr->mem_array[0].virtual_address;
2150 pasync_ctx->async_data.pa_base.u.a64.address =
2151 mem_descr->mem_array[0].bus_address.u.a64.address;
2153 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2154 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2155 if (mem_descr->mem_array[0].virtual_address) {
2156 SE_DEBUG(DBG_LVL_8,
2157 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2158 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2159 } else
2160 shost_printk(KERN_WARNING, phba->shost,
2161 "No Virtual address \n");
2163 pasync_ctx->async_data.ring_base =
2164 mem_descr->mem_array[0].virtual_address;
2166 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2167 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2168 if (!mem_descr->mem_array[0].virtual_address)
2169 shost_printk(KERN_WARNING, phba->shost,
2170 "No Virtual address \n");
2172 pasync_ctx->async_data.handle_base =
2173 mem_descr->mem_array[0].virtual_address;
2174 pasync_ctx->async_data.writables = 0;
2175 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2177 pasync_header_h =
2178 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2179 pasync_data_h =
2180 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2182 for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2183 pasync_header_h->cri = -1;
2184 pasync_header_h->index = (char)index;
2185 INIT_LIST_HEAD(&pasync_header_h->link);
2186 pasync_header_h->pbuffer =
2187 (void *)((unsigned long)
2188 (pasync_ctx->async_header.va_base) +
2189 (p->defpdu_hdr_sz * index));
2191 pasync_header_h->pa.u.a64.address =
2192 pasync_ctx->async_header.pa_base.u.a64.address +
2193 (p->defpdu_hdr_sz * index);
2195 list_add_tail(&pasync_header_h->link,
2196 &pasync_ctx->async_header.free_list);
2197 pasync_header_h++;
2198 pasync_ctx->async_header.free_entries++;
2199 pasync_ctx->async_header.writables++;
2201 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2202 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2203 header_busy_list);
2204 pasync_data_h->cri = -1;
2205 pasync_data_h->index = (char)index;
2206 INIT_LIST_HEAD(&pasync_data_h->link);
2207 pasync_data_h->pbuffer =
2208 (void *)((unsigned long)
2209 (pasync_ctx->async_data.va_base) +
2210 (p->defpdu_data_sz * index));
2212 pasync_data_h->pa.u.a64.address =
2213 pasync_ctx->async_data.pa_base.u.a64.address +
2214 (p->defpdu_data_sz * index);
2216 list_add_tail(&pasync_data_h->link,
2217 &pasync_ctx->async_data.free_list);
2218 pasync_data_h++;
2219 pasync_ctx->async_data.free_entries++;
2220 pasync_ctx->async_data.writables++;
2222 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2225 pasync_ctx->async_header.host_write_ptr = 0;
2226 pasync_ctx->async_header.ep_read_ptr = -1;
2227 pasync_ctx->async_data.host_write_ptr = 0;
2228 pasync_ctx->async_data.ep_read_ptr = -1;
2231 static int
2232 be_sgl_create_contiguous(void *virtual_address,
2233 u64 physical_address, u32 length,
2234 struct be_dma_mem *sgl)
2236 WARN_ON(!virtual_address);
2237 WARN_ON(!physical_address);
2238 WARN_ON(!length > 0);
2239 WARN_ON(!sgl);
2241 sgl->va = virtual_address;
2242 sgl->dma = physical_address;
2243 sgl->size = length;
2245 return 0;
2248 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2250 memset(sgl, 0, sizeof(*sgl));
2253 static void
2254 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2255 struct mem_array *pmem, struct be_dma_mem *sgl)
2257 if (sgl->va)
2258 be_sgl_destroy_contiguous(sgl);
2260 be_sgl_create_contiguous(pmem->virtual_address,
2261 pmem->bus_address.u.a64.address,
2262 pmem->size, sgl);
2265 static void
2266 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2267 struct mem_array *pmem, struct be_dma_mem *sgl)
2269 if (sgl->va)
2270 be_sgl_destroy_contiguous(sgl);
2272 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2273 pmem->bus_address.u.a64.address,
2274 pmem->size, sgl);
2277 static int be_fill_queue(struct be_queue_info *q,
2278 u16 len, u16 entry_size, void *vaddress)
2280 struct be_dma_mem *mem = &q->dma_mem;
2282 memset(q, 0, sizeof(*q));
2283 q->len = len;
2284 q->entry_size = entry_size;
2285 mem->size = len * entry_size;
2286 mem->va = vaddress;
2287 if (!mem->va)
2288 return -ENOMEM;
2289 memset(mem->va, 0, mem->size);
2290 return 0;
2293 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2294 struct hwi_context_memory *phwi_context)
2296 unsigned int i, num_eq_pages;
2297 int ret, eq_for_mcc;
2298 struct be_queue_info *eq;
2299 struct be_dma_mem *mem;
2300 void *eq_vaddress;
2301 dma_addr_t paddr;
2303 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2304 sizeof(struct be_eq_entry));
2306 if (phba->msix_enabled)
2307 eq_for_mcc = 1;
2308 else
2309 eq_for_mcc = 0;
2310 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2311 eq = &phwi_context->be_eq[i].q;
2312 mem = &eq->dma_mem;
2313 phwi_context->be_eq[i].phba = phba;
2314 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2315 num_eq_pages * PAGE_SIZE,
2316 &paddr);
2317 if (!eq_vaddress)
2318 goto create_eq_error;
2320 mem->va = eq_vaddress;
2321 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2322 sizeof(struct be_eq_entry), eq_vaddress);
2323 if (ret) {
2324 shost_printk(KERN_ERR, phba->shost,
2325 "be_fill_queue Failed for EQ \n");
2326 goto create_eq_error;
2329 mem->dma = paddr;
2330 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2331 phwi_context->cur_eqd);
2332 if (ret) {
2333 shost_printk(KERN_ERR, phba->shost,
2334 "beiscsi_cmd_eq_create"
2335 "Failedfor EQ \n");
2336 goto create_eq_error;
2338 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2340 return 0;
2341 create_eq_error:
2342 for (i = 0; i < (phba->num_cpus + 1); i++) {
2343 eq = &phwi_context->be_eq[i].q;
2344 mem = &eq->dma_mem;
2345 if (mem->va)
2346 pci_free_consistent(phba->pcidev, num_eq_pages
2347 * PAGE_SIZE,
2348 mem->va, mem->dma);
2350 return ret;
2353 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2354 struct hwi_context_memory *phwi_context)
2356 unsigned int i, num_cq_pages;
2357 int ret;
2358 struct be_queue_info *cq, *eq;
2359 struct be_dma_mem *mem;
2360 struct be_eq_obj *pbe_eq;
2361 void *cq_vaddress;
2362 dma_addr_t paddr;
2364 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2365 sizeof(struct sol_cqe));
2367 for (i = 0; i < phba->num_cpus; i++) {
2368 cq = &phwi_context->be_cq[i];
2369 eq = &phwi_context->be_eq[i].q;
2370 pbe_eq = &phwi_context->be_eq[i];
2371 pbe_eq->cq = cq;
2372 pbe_eq->phba = phba;
2373 mem = &cq->dma_mem;
2374 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2375 num_cq_pages * PAGE_SIZE,
2376 &paddr);
2377 if (!cq_vaddress)
2378 goto create_cq_error;
2379 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2380 sizeof(struct sol_cqe), cq_vaddress);
2381 if (ret) {
2382 shost_printk(KERN_ERR, phba->shost,
2383 "be_fill_queue Failed for ISCSI CQ \n");
2384 goto create_cq_error;
2387 mem->dma = paddr;
2388 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2389 false, 0);
2390 if (ret) {
2391 shost_printk(KERN_ERR, phba->shost,
2392 "beiscsi_cmd_eq_create"
2393 "Failed for ISCSI CQ \n");
2394 goto create_cq_error;
2396 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2397 cq->id, eq->id);
2398 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2400 return 0;
2402 create_cq_error:
2403 for (i = 0; i < phba->num_cpus; i++) {
2404 cq = &phwi_context->be_cq[i];
2405 mem = &cq->dma_mem;
2406 if (mem->va)
2407 pci_free_consistent(phba->pcidev, num_cq_pages
2408 * PAGE_SIZE,
2409 mem->va, mem->dma);
2411 return ret;
2415 static int
2416 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2417 struct hwi_context_memory *phwi_context,
2418 struct hwi_controller *phwi_ctrlr,
2419 unsigned int def_pdu_ring_sz)
2421 unsigned int idx;
2422 int ret;
2423 struct be_queue_info *dq, *cq;
2424 struct be_dma_mem *mem;
2425 struct be_mem_descriptor *mem_descr;
2426 void *dq_vaddress;
2428 idx = 0;
2429 dq = &phwi_context->be_def_hdrq;
2430 cq = &phwi_context->be_cq[0];
2431 mem = &dq->dma_mem;
2432 mem_descr = phba->init_mem;
2433 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2434 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2435 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2436 sizeof(struct phys_addr),
2437 sizeof(struct phys_addr), dq_vaddress);
2438 if (ret) {
2439 shost_printk(KERN_ERR, phba->shost,
2440 "be_fill_queue Failed for DEF PDU HDR\n");
2441 return ret;
2443 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2444 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2445 def_pdu_ring_sz,
2446 phba->params.defpdu_hdr_sz);
2447 if (ret) {
2448 shost_printk(KERN_ERR, phba->shost,
2449 "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2450 return ret;
2452 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2453 SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2454 phwi_context->be_def_hdrq.id);
2455 hwi_post_async_buffers(phba, 1);
2456 return 0;
2459 static int
2460 beiscsi_create_def_data(struct beiscsi_hba *phba,
2461 struct hwi_context_memory *phwi_context,
2462 struct hwi_controller *phwi_ctrlr,
2463 unsigned int def_pdu_ring_sz)
2465 unsigned int idx;
2466 int ret;
2467 struct be_queue_info *dataq, *cq;
2468 struct be_dma_mem *mem;
2469 struct be_mem_descriptor *mem_descr;
2470 void *dq_vaddress;
2472 idx = 0;
2473 dataq = &phwi_context->be_def_dataq;
2474 cq = &phwi_context->be_cq[0];
2475 mem = &dataq->dma_mem;
2476 mem_descr = phba->init_mem;
2477 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2478 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2479 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2480 sizeof(struct phys_addr),
2481 sizeof(struct phys_addr), dq_vaddress);
2482 if (ret) {
2483 shost_printk(KERN_ERR, phba->shost,
2484 "be_fill_queue Failed for DEF PDU DATA\n");
2485 return ret;
2487 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2488 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2489 def_pdu_ring_sz,
2490 phba->params.defpdu_data_sz);
2491 if (ret) {
2492 shost_printk(KERN_ERR, phba->shost,
2493 "be_cmd_create_default_pdu_queue Failed"
2494 " for DEF PDU DATA\n");
2495 return ret;
2497 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2498 SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2499 phwi_context->be_def_dataq.id);
2500 hwi_post_async_buffers(phba, 0);
2501 SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2502 return 0;
2505 static int
2506 beiscsi_post_pages(struct beiscsi_hba *phba)
2508 struct be_mem_descriptor *mem_descr;
2509 struct mem_array *pm_arr;
2510 unsigned int page_offset, i;
2511 struct be_dma_mem sgl;
2512 int status;
2514 mem_descr = phba->init_mem;
2515 mem_descr += HWI_MEM_SGE;
2516 pm_arr = mem_descr->mem_array;
2518 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2519 phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2520 for (i = 0; i < mem_descr->num_elements; i++) {
2521 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2522 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2523 page_offset,
2524 (pm_arr->size / PAGE_SIZE));
2525 page_offset += pm_arr->size / PAGE_SIZE;
2526 if (status != 0) {
2527 shost_printk(KERN_ERR, phba->shost,
2528 "post sgl failed.\n");
2529 return status;
2531 pm_arr++;
2533 SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2534 return 0;
2537 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2539 struct be_dma_mem *mem = &q->dma_mem;
2540 if (mem->va)
2541 pci_free_consistent(phba->pcidev, mem->size,
2542 mem->va, mem->dma);
2545 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2546 u16 len, u16 entry_size)
2548 struct be_dma_mem *mem = &q->dma_mem;
2550 memset(q, 0, sizeof(*q));
2551 q->len = len;
2552 q->entry_size = entry_size;
2553 mem->size = len * entry_size;
2554 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2555 if (!mem->va)
2556 return -1;
2557 memset(mem->va, 0, mem->size);
2558 return 0;
2561 static int
2562 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2563 struct hwi_context_memory *phwi_context,
2564 struct hwi_controller *phwi_ctrlr)
2566 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2567 u64 pa_addr_lo;
2568 unsigned int idx, num, i;
2569 struct mem_array *pwrb_arr;
2570 void *wrb_vaddr;
2571 struct be_dma_mem sgl;
2572 struct be_mem_descriptor *mem_descr;
2573 int status;
2575 idx = 0;
2576 mem_descr = phba->init_mem;
2577 mem_descr += HWI_MEM_WRB;
2578 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2579 GFP_KERNEL);
2580 if (!pwrb_arr) {
2581 shost_printk(KERN_ERR, phba->shost,
2582 "Memory alloc failed in create wrb ring.\n");
2583 return -ENOMEM;
2585 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2586 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2587 num_wrb_rings = mem_descr->mem_array[idx].size /
2588 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2590 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2591 if (num_wrb_rings) {
2592 pwrb_arr[num].virtual_address = wrb_vaddr;
2593 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2594 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2595 sizeof(struct iscsi_wrb);
2596 wrb_vaddr += pwrb_arr[num].size;
2597 pa_addr_lo += pwrb_arr[num].size;
2598 num_wrb_rings--;
2599 } else {
2600 idx++;
2601 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2602 pa_addr_lo = mem_descr->mem_array[idx].\
2603 bus_address.u.a64.address;
2604 num_wrb_rings = mem_descr->mem_array[idx].size /
2605 (phba->params.wrbs_per_cxn *
2606 sizeof(struct iscsi_wrb));
2607 pwrb_arr[num].virtual_address = wrb_vaddr;
2608 pwrb_arr[num].bus_address.u.a64.address\
2609 = pa_addr_lo;
2610 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2611 sizeof(struct iscsi_wrb);
2612 wrb_vaddr += pwrb_arr[num].size;
2613 pa_addr_lo += pwrb_arr[num].size;
2614 num_wrb_rings--;
2617 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2618 wrb_mem_index = 0;
2619 offset = 0;
2620 size = 0;
2622 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2623 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2624 &phwi_context->be_wrbq[i]);
2625 if (status != 0) {
2626 shost_printk(KERN_ERR, phba->shost,
2627 "wrbq create failed.");
2628 return status;
2630 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2633 kfree(pwrb_arr);
2634 return 0;
2637 static void free_wrb_handles(struct beiscsi_hba *phba)
2639 unsigned int index;
2640 struct hwi_controller *phwi_ctrlr;
2641 struct hwi_wrb_context *pwrb_context;
2643 phwi_ctrlr = phba->phwi_ctrlr;
2644 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2645 pwrb_context = &phwi_ctrlr->wrb_context[index];
2646 kfree(pwrb_context->pwrb_handle_base);
2647 kfree(pwrb_context->pwrb_handle_basestd);
2651 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2653 struct be_queue_info *q;
2654 struct be_ctrl_info *ctrl = &phba->ctrl;
2656 q = &phba->ctrl.mcc_obj.q;
2657 if (q->created)
2658 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2659 be_queue_free(phba, q);
2661 q = &phba->ctrl.mcc_obj.cq;
2662 if (q->created)
2663 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2664 be_queue_free(phba, q);
2667 static void hwi_cleanup(struct beiscsi_hba *phba)
2669 struct be_queue_info *q;
2670 struct be_ctrl_info *ctrl = &phba->ctrl;
2671 struct hwi_controller *phwi_ctrlr;
2672 struct hwi_context_memory *phwi_context;
2673 int i, eq_num;
2675 phwi_ctrlr = phba->phwi_ctrlr;
2676 phwi_context = phwi_ctrlr->phwi_ctxt;
2677 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2678 q = &phwi_context->be_wrbq[i];
2679 if (q->created)
2680 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2682 free_wrb_handles(phba);
2684 q = &phwi_context->be_def_hdrq;
2685 if (q->created)
2686 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2688 q = &phwi_context->be_def_dataq;
2689 if (q->created)
2690 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2692 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2694 for (i = 0; i < (phba->num_cpus); i++) {
2695 q = &phwi_context->be_cq[i];
2696 if (q->created)
2697 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2699 if (phba->msix_enabled)
2700 eq_num = 1;
2701 else
2702 eq_num = 0;
2703 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2704 q = &phwi_context->be_eq[i].q;
2705 if (q->created)
2706 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2708 be_mcc_queues_destroy(phba);
2711 static int be_mcc_queues_create(struct beiscsi_hba *phba,
2712 struct hwi_context_memory *phwi_context)
2714 struct be_queue_info *q, *cq;
2715 struct be_ctrl_info *ctrl = &phba->ctrl;
2717 /* Alloc MCC compl queue */
2718 cq = &phba->ctrl.mcc_obj.cq;
2719 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2720 sizeof(struct be_mcc_compl)))
2721 goto err;
2722 /* Ask BE to create MCC compl queue; */
2723 if (phba->msix_enabled) {
2724 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2725 [phba->num_cpus].q, false, true, 0))
2726 goto mcc_cq_free;
2727 } else {
2728 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2729 false, true, 0))
2730 goto mcc_cq_free;
2733 /* Alloc MCC queue */
2734 q = &phba->ctrl.mcc_obj.q;
2735 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2736 goto mcc_cq_destroy;
2738 /* Ask BE to create MCC queue */
2739 if (beiscsi_cmd_mccq_create(phba, q, cq))
2740 goto mcc_q_free;
2742 return 0;
2744 mcc_q_free:
2745 be_queue_free(phba, q);
2746 mcc_cq_destroy:
2747 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2748 mcc_cq_free:
2749 be_queue_free(phba, cq);
2750 err:
2751 return -1;
2754 static int find_num_cpus(void)
2756 int num_cpus = 0;
2758 num_cpus = num_online_cpus();
2759 if (num_cpus >= MAX_CPUS)
2760 num_cpus = MAX_CPUS - 1;
2762 SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2763 return num_cpus;
2766 static int hwi_init_port(struct beiscsi_hba *phba)
2768 struct hwi_controller *phwi_ctrlr;
2769 struct hwi_context_memory *phwi_context;
2770 unsigned int def_pdu_ring_sz;
2771 struct be_ctrl_info *ctrl = &phba->ctrl;
2772 int status;
2774 def_pdu_ring_sz =
2775 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2776 phwi_ctrlr = phba->phwi_ctrlr;
2777 phwi_context = phwi_ctrlr->phwi_ctxt;
2778 phwi_context->max_eqd = 0;
2779 phwi_context->min_eqd = 0;
2780 phwi_context->cur_eqd = 64;
2781 be_cmd_fw_initialize(&phba->ctrl);
2783 status = beiscsi_create_eqs(phba, phwi_context);
2784 if (status != 0) {
2785 shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2786 goto error;
2789 status = be_mcc_queues_create(phba, phwi_context);
2790 if (status != 0)
2791 goto error;
2793 status = mgmt_check_supported_fw(ctrl, phba);
2794 if (status != 0) {
2795 shost_printk(KERN_ERR, phba->shost,
2796 "Unsupported fw version \n");
2797 goto error;
2800 status = beiscsi_create_cqs(phba, phwi_context);
2801 if (status != 0) {
2802 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2803 goto error;
2806 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2807 def_pdu_ring_sz);
2808 if (status != 0) {
2809 shost_printk(KERN_ERR, phba->shost,
2810 "Default Header not created\n");
2811 goto error;
2814 status = beiscsi_create_def_data(phba, phwi_context,
2815 phwi_ctrlr, def_pdu_ring_sz);
2816 if (status != 0) {
2817 shost_printk(KERN_ERR, phba->shost,
2818 "Default Data not created\n");
2819 goto error;
2822 status = beiscsi_post_pages(phba);
2823 if (status != 0) {
2824 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2825 goto error;
2828 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2829 if (status != 0) {
2830 shost_printk(KERN_ERR, phba->shost,
2831 "WRB Rings not created\n");
2832 goto error;
2835 SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2836 return 0;
2838 error:
2839 shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2840 hwi_cleanup(phba);
2841 return -ENOMEM;
2844 static int hwi_init_controller(struct beiscsi_hba *phba)
2846 struct hwi_controller *phwi_ctrlr;
2848 phwi_ctrlr = phba->phwi_ctrlr;
2849 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2850 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2851 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2852 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2853 phwi_ctrlr->phwi_ctxt);
2854 } else {
2855 shost_printk(KERN_ERR, phba->shost,
2856 "HWI_MEM_ADDN_CONTEXT is more than one element."
2857 "Failing to load\n");
2858 return -ENOMEM;
2861 iscsi_init_global_templates(phba);
2862 beiscsi_init_wrb_handle(phba);
2863 hwi_init_async_pdu_ctx(phba);
2864 if (hwi_init_port(phba) != 0) {
2865 shost_printk(KERN_ERR, phba->shost,
2866 "hwi_init_controller failed\n");
2867 return -ENOMEM;
2869 return 0;
2872 static void beiscsi_free_mem(struct beiscsi_hba *phba)
2874 struct be_mem_descriptor *mem_descr;
2875 int i, j;
2877 mem_descr = phba->init_mem;
2878 i = 0;
2879 j = 0;
2880 for (i = 0; i < SE_MEM_MAX; i++) {
2881 for (j = mem_descr->num_elements; j > 0; j--) {
2882 pci_free_consistent(phba->pcidev,
2883 mem_descr->mem_array[j - 1].size,
2884 mem_descr->mem_array[j - 1].virtual_address,
2885 mem_descr->mem_array[j - 1].bus_address.
2886 u.a64.address);
2888 kfree(mem_descr->mem_array);
2889 mem_descr++;
2891 kfree(phba->init_mem);
2892 kfree(phba->phwi_ctrlr);
2895 static int beiscsi_init_controller(struct beiscsi_hba *phba)
2897 int ret = -ENOMEM;
2899 ret = beiscsi_get_memory(phba);
2900 if (ret < 0) {
2901 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
2902 "Failed in beiscsi_alloc_memory \n");
2903 return ret;
2906 ret = hwi_init_controller(phba);
2907 if (ret)
2908 goto free_init;
2909 SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
2910 return 0;
2912 free_init:
2913 beiscsi_free_mem(phba);
2914 return -ENOMEM;
2917 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2919 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
2920 struct sgl_handle *psgl_handle;
2921 struct iscsi_sge *pfrag;
2922 unsigned int arr_index, i, idx;
2924 phba->io_sgl_hndl_avbl = 0;
2925 phba->eh_sgl_hndl_avbl = 0;
2927 mem_descr_sglh = phba->init_mem;
2928 mem_descr_sglh += HWI_MEM_SGLH;
2929 if (1 == mem_descr_sglh->num_elements) {
2930 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2931 phba->params.ios_per_ctrl,
2932 GFP_KERNEL);
2933 if (!phba->io_sgl_hndl_base) {
2934 shost_printk(KERN_ERR, phba->shost,
2935 "Mem Alloc Failed. Failing to load\n");
2936 return -ENOMEM;
2938 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2939 (phba->params.icds_per_ctrl -
2940 phba->params.ios_per_ctrl),
2941 GFP_KERNEL);
2942 if (!phba->eh_sgl_hndl_base) {
2943 kfree(phba->io_sgl_hndl_base);
2944 shost_printk(KERN_ERR, phba->shost,
2945 "Mem Alloc Failed. Failing to load\n");
2946 return -ENOMEM;
2948 } else {
2949 shost_printk(KERN_ERR, phba->shost,
2950 "HWI_MEM_SGLH is more than one element."
2951 "Failing to load\n");
2952 return -ENOMEM;
2955 arr_index = 0;
2956 idx = 0;
2957 while (idx < mem_descr_sglh->num_elements) {
2958 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
2960 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
2961 sizeof(struct sgl_handle)); i++) {
2962 if (arr_index < phba->params.ios_per_ctrl) {
2963 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
2964 phba->io_sgl_hndl_avbl++;
2965 arr_index++;
2966 } else {
2967 phba->eh_sgl_hndl_base[arr_index -
2968 phba->params.ios_per_ctrl] =
2969 psgl_handle;
2970 arr_index++;
2971 phba->eh_sgl_hndl_avbl++;
2973 psgl_handle++;
2975 idx++;
2977 SE_DEBUG(DBG_LVL_8,
2978 "phba->io_sgl_hndl_avbl=%d"
2979 "phba->eh_sgl_hndl_avbl=%d \n",
2980 phba->io_sgl_hndl_avbl,
2981 phba->eh_sgl_hndl_avbl);
2982 mem_descr_sg = phba->init_mem;
2983 mem_descr_sg += HWI_MEM_SGE;
2984 SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
2985 mem_descr_sg->num_elements);
2986 arr_index = 0;
2987 idx = 0;
2988 while (idx < mem_descr_sg->num_elements) {
2989 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
2991 for (i = 0;
2992 i < (mem_descr_sg->mem_array[idx].size) /
2993 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
2994 i++) {
2995 if (arr_index < phba->params.ios_per_ctrl)
2996 psgl_handle = phba->io_sgl_hndl_base[arr_index];
2997 else
2998 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
2999 phba->params.ios_per_ctrl];
3000 psgl_handle->pfrag = pfrag;
3001 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3002 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3003 pfrag += phba->params.num_sge_per_io;
3004 psgl_handle->sgl_index =
3005 phba->fw_config.iscsi_icd_start + arr_index++;
3007 idx++;
3009 phba->io_sgl_free_index = 0;
3010 phba->io_sgl_alloc_index = 0;
3011 phba->eh_sgl_free_index = 0;
3012 phba->eh_sgl_alloc_index = 0;
3013 return 0;
3016 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3018 int i, new_cid;
3020 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3021 GFP_KERNEL);
3022 if (!phba->cid_array) {
3023 shost_printk(KERN_ERR, phba->shost,
3024 "Failed to allocate memory in "
3025 "hba_setup_cid_tbls\n");
3026 return -ENOMEM;
3028 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3029 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3030 if (!phba->ep_array) {
3031 shost_printk(KERN_ERR, phba->shost,
3032 "Failed to allocate memory in "
3033 "hba_setup_cid_tbls \n");
3034 kfree(phba->cid_array);
3035 return -ENOMEM;
3037 new_cid = phba->fw_config.iscsi_cid_start;
3038 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3039 phba->cid_array[i] = new_cid;
3040 new_cid += 2;
3042 phba->avlbl_cids = phba->params.cxns_per_ctrl;
3043 return 0;
3046 static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3048 struct be_ctrl_info *ctrl = &phba->ctrl;
3049 struct hwi_controller *phwi_ctrlr;
3050 struct hwi_context_memory *phwi_context;
3051 struct be_queue_info *eq;
3052 u8 __iomem *addr;
3053 u32 reg, i;
3054 u32 enabled;
3056 phwi_ctrlr = phba->phwi_ctrlr;
3057 phwi_context = phwi_ctrlr->phwi_ctxt;
3059 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3060 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3061 reg = ioread32(addr);
3062 SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3064 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3065 if (!enabled) {
3066 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3067 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3068 iowrite32(reg, addr);
3069 for (i = 0; i <= phba->num_cpus; i++) {
3070 eq = &phwi_context->be_eq[i].q;
3071 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3072 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3074 } else
3075 shost_printk(KERN_WARNING, phba->shost,
3076 "In hwi_enable_intr, Not Enabled \n");
3077 return true;
3080 static void hwi_disable_intr(struct beiscsi_hba *phba)
3082 struct be_ctrl_info *ctrl = &phba->ctrl;
3084 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3085 u32 reg = ioread32(addr);
3087 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3088 if (enabled) {
3089 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3090 iowrite32(reg, addr);
3091 } else
3092 shost_printk(KERN_WARNING, phba->shost,
3093 "In hwi_disable_intr, Already Disabled \n");
3096 static int beiscsi_init_port(struct beiscsi_hba *phba)
3098 int ret;
3100 ret = beiscsi_init_controller(phba);
3101 if (ret < 0) {
3102 shost_printk(KERN_ERR, phba->shost,
3103 "beiscsi_dev_probe - Failed in"
3104 "beiscsi_init_controller \n");
3105 return ret;
3107 ret = beiscsi_init_sgl_handle(phba);
3108 if (ret < 0) {
3109 shost_printk(KERN_ERR, phba->shost,
3110 "beiscsi_dev_probe - Failed in"
3111 "beiscsi_init_sgl_handle \n");
3112 goto do_cleanup_ctrlr;
3115 if (hba_setup_cid_tbls(phba)) {
3116 shost_printk(KERN_ERR, phba->shost,
3117 "Failed in hba_setup_cid_tbls\n");
3118 kfree(phba->io_sgl_hndl_base);
3119 kfree(phba->eh_sgl_hndl_base);
3120 goto do_cleanup_ctrlr;
3123 return ret;
3125 do_cleanup_ctrlr:
3126 hwi_cleanup(phba);
3127 return ret;
3130 static void hwi_purge_eq(struct beiscsi_hba *phba)
3132 struct hwi_controller *phwi_ctrlr;
3133 struct hwi_context_memory *phwi_context;
3134 struct be_queue_info *eq;
3135 struct be_eq_entry *eqe = NULL;
3136 int i, eq_msix;
3137 unsigned int num_processed;
3139 phwi_ctrlr = phba->phwi_ctrlr;
3140 phwi_context = phwi_ctrlr->phwi_ctxt;
3141 if (phba->msix_enabled)
3142 eq_msix = 1;
3143 else
3144 eq_msix = 0;
3146 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3147 eq = &phwi_context->be_eq[i].q;
3148 eqe = queue_tail_node(eq);
3149 num_processed = 0;
3150 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3151 & EQE_VALID_MASK) {
3152 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3153 queue_tail_inc(eq);
3154 eqe = queue_tail_node(eq);
3155 num_processed++;
3158 if (num_processed)
3159 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3163 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3165 unsigned char mgmt_status;
3167 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3168 if (mgmt_status)
3169 shost_printk(KERN_WARNING, phba->shost,
3170 "mgmt_epfw_cleanup FAILED \n");
3172 hwi_purge_eq(phba);
3173 hwi_cleanup(phba);
3174 kfree(phba->io_sgl_hndl_base);
3175 kfree(phba->eh_sgl_hndl_base);
3176 kfree(phba->cid_array);
3177 kfree(phba->ep_array);
3180 void
3181 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3182 struct beiscsi_offload_params *params)
3184 struct wrb_handle *pwrb_handle;
3185 struct iscsi_target_context_update_wrb *pwrb = NULL;
3186 struct be_mem_descriptor *mem_descr;
3187 struct beiscsi_hba *phba = beiscsi_conn->phba;
3188 u32 doorbell = 0;
3191 * We can always use 0 here because it is reserved by libiscsi for
3192 * login/startup related tasks.
3194 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3195 phba->fw_config.iscsi_cid_start));
3196 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3197 memset(pwrb, 0, sizeof(*pwrb));
3198 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3199 max_burst_length, pwrb, params->dw[offsetof
3200 (struct amap_beiscsi_offload_params,
3201 max_burst_length) / 32]);
3202 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3203 max_send_data_segment_length, pwrb,
3204 params->dw[offsetof(struct amap_beiscsi_offload_params,
3205 max_send_data_segment_length) / 32]);
3206 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3207 first_burst_length,
3208 pwrb,
3209 params->dw[offsetof(struct amap_beiscsi_offload_params,
3210 first_burst_length) / 32]);
3212 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3213 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3214 erl) / 32] & OFFLD_PARAMS_ERL));
3215 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3216 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3217 dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3218 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3219 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3220 hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3221 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3222 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3223 ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3224 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3225 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3226 imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3227 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3228 pwrb,
3229 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3230 exp_statsn) / 32] + 1));
3231 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3232 0x7);
3233 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3234 pwrb, pwrb_handle->wrb_index);
3235 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3236 pwrb, pwrb_handle->nxt_wrb_index);
3237 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3238 session_state, pwrb, 0);
3239 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3240 pwrb, 1);
3241 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3242 pwrb, 0);
3243 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3246 mem_descr = phba->init_mem;
3247 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3249 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3250 pad_buffer_addr_hi, pwrb,
3251 mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3252 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3253 pad_buffer_addr_lo, pwrb,
3254 mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3256 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3258 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3259 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3260 << DB_DEF_PDU_WRB_INDEX_SHIFT;
3261 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3263 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3266 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3267 int *index, int *age)
3269 *index = (int)itt;
3270 if (age)
3271 *age = conn->session->age;
3275 * beiscsi_alloc_pdu - allocates pdu and related resources
3276 * @task: libiscsi task
3277 * @opcode: opcode of pdu for task
3279 * This is called with the session lock held. It will allocate
3280 * the wrb and sgl if needed for the command. And it will prep
3281 * the pdu's itt. beiscsi_parse_pdu will later translate
3282 * the pdu itt to the libiscsi task itt.
3284 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3286 struct beiscsi_io_task *io_task = task->dd_data;
3287 struct iscsi_conn *conn = task->conn;
3288 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3289 struct beiscsi_hba *phba = beiscsi_conn->phba;
3290 struct hwi_wrb_context *pwrb_context;
3291 struct hwi_controller *phwi_ctrlr;
3292 itt_t itt;
3293 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3294 dma_addr_t paddr;
3296 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3297 GFP_KERNEL, &paddr);
3298 if (!io_task->cmd_bhs)
3299 return -ENOMEM;
3300 io_task->bhs_pa.u.a64.address = paddr;
3301 io_task->libiscsi_itt = (itt_t)task->itt;
3302 io_task->pwrb_handle = alloc_wrb_handle(phba,
3303 beiscsi_conn->beiscsi_conn_cid -
3304 phba->fw_config.iscsi_cid_start
3306 io_task->conn = beiscsi_conn;
3308 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3309 task->hdr_max = sizeof(struct be_cmd_bhs);
3311 if (task->sc) {
3312 spin_lock(&phba->io_sgl_lock);
3313 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3314 spin_unlock(&phba->io_sgl_lock);
3315 if (!io_task->psgl_handle)
3316 goto free_hndls;
3317 } else {
3318 io_task->scsi_cmnd = NULL;
3319 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3320 if (!beiscsi_conn->login_in_progress) {
3321 spin_lock(&phba->mgmt_sgl_lock);
3322 io_task->psgl_handle = (struct sgl_handle *)
3323 alloc_mgmt_sgl_handle(phba);
3324 spin_unlock(&phba->mgmt_sgl_lock);
3325 if (!io_task->psgl_handle)
3326 goto free_hndls;
3328 beiscsi_conn->login_in_progress = 1;
3329 beiscsi_conn->plogin_sgl_handle =
3330 io_task->psgl_handle;
3331 } else {
3332 io_task->psgl_handle =
3333 beiscsi_conn->plogin_sgl_handle;
3335 } else {
3336 spin_lock(&phba->mgmt_sgl_lock);
3337 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3338 spin_unlock(&phba->mgmt_sgl_lock);
3339 if (!io_task->psgl_handle)
3340 goto free_hndls;
3343 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3344 wrb_index << 16) | (unsigned int)
3345 (io_task->psgl_handle->sgl_index));
3346 io_task->pwrb_handle->pio_handle = task;
3348 io_task->cmd_bhs->iscsi_hdr.itt = itt;
3349 return 0;
3351 free_hndls:
3352 phwi_ctrlr = phba->phwi_ctrlr;
3353 pwrb_context = &phwi_ctrlr->wrb_context[
3354 beiscsi_conn->beiscsi_conn_cid -
3355 phba->fw_config.iscsi_cid_start];
3356 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3357 io_task->pwrb_handle = NULL;
3358 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3359 io_task->bhs_pa.u.a64.address);
3360 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3361 return -ENOMEM;
3364 static void beiscsi_cleanup_task(struct iscsi_task *task)
3366 struct beiscsi_io_task *io_task = task->dd_data;
3367 struct iscsi_conn *conn = task->conn;
3368 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3369 struct beiscsi_hba *phba = beiscsi_conn->phba;
3370 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3371 struct hwi_wrb_context *pwrb_context;
3372 struct hwi_controller *phwi_ctrlr;
3374 phwi_ctrlr = phba->phwi_ctrlr;
3375 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3376 - phba->fw_config.iscsi_cid_start];
3377 if (io_task->pwrb_handle) {
3378 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3379 io_task->pwrb_handle = NULL;
3382 if (io_task->cmd_bhs) {
3383 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3384 io_task->bhs_pa.u.a64.address);
3387 if (task->sc) {
3388 if (io_task->psgl_handle) {
3389 spin_lock(&phba->io_sgl_lock);
3390 free_io_sgl_handle(phba, io_task->psgl_handle);
3391 spin_unlock(&phba->io_sgl_lock);
3392 io_task->psgl_handle = NULL;
3394 } else {
3395 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3396 return;
3397 if (io_task->psgl_handle) {
3398 spin_lock(&phba->mgmt_sgl_lock);
3399 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3400 spin_unlock(&phba->mgmt_sgl_lock);
3401 io_task->psgl_handle = NULL;
3406 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3407 unsigned int num_sg, unsigned int xferlen,
3408 unsigned int writedir)
3411 struct beiscsi_io_task *io_task = task->dd_data;
3412 struct iscsi_conn *conn = task->conn;
3413 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3414 struct beiscsi_hba *phba = beiscsi_conn->phba;
3415 struct iscsi_wrb *pwrb = NULL;
3416 unsigned int doorbell = 0;
3418 pwrb = io_task->pwrb_handle->pwrb;
3419 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3420 io_task->bhs_len = sizeof(struct be_cmd_bhs);
3422 if (writedir) {
3423 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3424 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3425 &io_task->cmd_bhs->iscsi_data_pdu,
3426 (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3427 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3428 &io_task->cmd_bhs->iscsi_data_pdu,
3429 ISCSI_OPCODE_SCSI_DATA_OUT);
3430 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3431 &io_task->cmd_bhs->iscsi_data_pdu, 1);
3432 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3433 INI_WR_CMD);
3434 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3435 } else {
3436 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3437 INI_RD_CMD);
3438 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3440 memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3441 dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3442 io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3444 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3445 cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3446 lun[0]));
3447 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3448 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3449 io_task->pwrb_handle->wrb_index);
3450 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3451 be32_to_cpu(task->cmdsn));
3452 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3453 io_task->psgl_handle->sgl_index);
3455 hwi_write_sgl(pwrb, sg, num_sg, io_task);
3457 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3458 io_task->pwrb_handle->nxt_wrb_index);
3459 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3461 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3462 doorbell |= (io_task->pwrb_handle->wrb_index &
3463 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3464 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3466 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3467 return 0;
3470 static int beiscsi_mtask(struct iscsi_task *task)
3472 struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
3473 struct iscsi_conn *conn = task->conn;
3474 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3475 struct beiscsi_hba *phba = beiscsi_conn->phba;
3476 struct iscsi_session *session;
3477 struct iscsi_wrb *pwrb = NULL;
3478 struct hwi_controller *phwi_ctrlr;
3479 struct hwi_wrb_context *pwrb_context;
3480 struct wrb_handle *pwrb_handle;
3481 unsigned int doorbell = 0;
3482 unsigned int i, cid;
3483 struct iscsi_task *aborted_task;
3484 unsigned int tag;
3486 cid = beiscsi_conn->beiscsi_conn_cid;
3487 pwrb = io_task->pwrb_handle->pwrb;
3488 memset(pwrb, 0, sizeof(*pwrb));
3489 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3490 be32_to_cpu(task->cmdsn));
3491 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3492 io_task->pwrb_handle->wrb_index);
3493 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3494 io_task->psgl_handle->sgl_index);
3496 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3497 case ISCSI_OP_LOGIN:
3498 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3499 TGT_DM_CMD);
3500 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3501 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3502 hwi_write_buffer(pwrb, task);
3503 break;
3504 case ISCSI_OP_NOOP_OUT:
3505 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3506 INI_RD_CMD);
3507 if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3508 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3509 else
3510 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3511 hwi_write_buffer(pwrb, task);
3512 break;
3513 case ISCSI_OP_TEXT:
3514 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3515 TGT_DM_CMD);
3516 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3517 hwi_write_buffer(pwrb, task);
3518 break;
3519 case ISCSI_OP_SCSI_TMFUNC:
3520 session = conn->session;
3521 i = ((struct iscsi_tm *)task->hdr)->rtt;
3522 phwi_ctrlr = phba->phwi_ctrlr;
3523 pwrb_context = &phwi_ctrlr->wrb_context[cid -
3524 phba->fw_config.iscsi_cid_start];
3525 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3526 >> 16];
3527 aborted_task = pwrb_handle->pio_handle;
3528 if (!aborted_task)
3529 return 0;
3531 aborted_io_task = aborted_task->dd_data;
3532 if (!aborted_io_task->scsi_cmnd)
3533 return 0;
3535 tag = mgmt_invalidate_icds(phba,
3536 aborted_io_task->psgl_handle->sgl_index,
3537 cid);
3538 if (!tag) {
3539 shost_printk(KERN_WARNING, phba->shost,
3540 "mgmt_invalidate_icds could not be"
3541 " submitted\n");
3542 } else {
3543 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3544 phba->ctrl.mcc_numtag[tag]);
3545 free_mcc_tag(&phba->ctrl, tag);
3547 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3548 INI_TMF_CMD);
3549 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3550 hwi_write_buffer(pwrb, task);
3551 break;
3552 case ISCSI_OP_LOGOUT:
3553 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3554 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3555 HWH_TYPE_LOGOUT);
3556 hwi_write_buffer(pwrb, task);
3557 break;
3559 default:
3560 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3561 task->hdr->opcode & ISCSI_OPCODE_MASK);
3562 return -EINVAL;
3565 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3566 task->data_count);
3567 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3568 io_task->pwrb_handle->nxt_wrb_index);
3569 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3571 doorbell |= cid & DB_WRB_POST_CID_MASK;
3572 doorbell |= (io_task->pwrb_handle->wrb_index &
3573 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3574 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3575 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3576 return 0;
3579 static int beiscsi_task_xmit(struct iscsi_task *task)
3581 struct iscsi_conn *conn = task->conn;
3582 struct beiscsi_io_task *io_task = task->dd_data;
3583 struct scsi_cmnd *sc = task->sc;
3584 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3585 struct scatterlist *sg;
3586 int num_sg;
3587 unsigned int writedir = 0, xferlen = 0;
3589 SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3590 "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3591 task, conn, beiscsi_conn);
3592 if (!sc)
3593 return beiscsi_mtask(task);
3595 io_task->scsi_cmnd = sc;
3596 num_sg = scsi_dma_map(sc);
3597 if (num_sg < 0) {
3598 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3599 return num_sg;
3601 SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3602 (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3603 xferlen = scsi_bufflen(sc);
3604 sg = scsi_sglist(sc);
3605 if (sc->sc_data_direction == DMA_TO_DEVICE) {
3606 writedir = 1;
3607 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3608 task->imm_count);
3609 } else
3610 writedir = 0;
3611 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3614 static void beiscsi_remove(struct pci_dev *pcidev)
3616 struct beiscsi_hba *phba = NULL;
3617 struct hwi_controller *phwi_ctrlr;
3618 struct hwi_context_memory *phwi_context;
3619 struct be_eq_obj *pbe_eq;
3620 unsigned int i, msix_vec;
3622 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3623 if (!phba) {
3624 dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3625 return;
3628 phwi_ctrlr = phba->phwi_ctrlr;
3629 phwi_context = phwi_ctrlr->phwi_ctxt;
3630 hwi_disable_intr(phba);
3631 if (phba->msix_enabled) {
3632 for (i = 0; i <= phba->num_cpus; i++) {
3633 msix_vec = phba->msix_entries[i].vector;
3634 free_irq(msix_vec, &phwi_context->be_eq[i]);
3636 } else
3637 if (phba->pcidev->irq)
3638 free_irq(phba->pcidev->irq, phba);
3639 pci_disable_msix(phba->pcidev);
3640 destroy_workqueue(phba->wq);
3641 if (blk_iopoll_enabled)
3642 for (i = 0; i < phba->num_cpus; i++) {
3643 pbe_eq = &phwi_context->be_eq[i];
3644 blk_iopoll_disable(&pbe_eq->iopoll);
3647 beiscsi_clean_port(phba);
3648 beiscsi_free_mem(phba);
3649 beiscsi_unmap_pci_function(phba);
3650 pci_free_consistent(phba->pcidev,
3651 phba->ctrl.mbox_mem_alloced.size,
3652 phba->ctrl.mbox_mem_alloced.va,
3653 phba->ctrl.mbox_mem_alloced.dma);
3654 iscsi_host_remove(phba->shost);
3655 pci_dev_put(phba->pcidev);
3656 iscsi_host_free(phba->shost);
3659 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3661 int i, status;
3663 for (i = 0; i <= phba->num_cpus; i++)
3664 phba->msix_entries[i].entry = i;
3666 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3667 (phba->num_cpus + 1));
3668 if (!status)
3669 phba->msix_enabled = true;
3671 return;
3674 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3675 const struct pci_device_id *id)
3677 struct beiscsi_hba *phba = NULL;
3678 struct hwi_controller *phwi_ctrlr;
3679 struct hwi_context_memory *phwi_context;
3680 struct be_eq_obj *pbe_eq;
3681 int ret, msix_vec, num_cpus, i;
3683 ret = beiscsi_enable_pci(pcidev);
3684 if (ret < 0) {
3685 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3686 "Failed to enable pci device \n");
3687 return ret;
3690 phba = beiscsi_hba_alloc(pcidev);
3691 if (!phba) {
3692 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3693 " Failed in beiscsi_hba_alloc \n");
3694 goto disable_pci;
3696 SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
3698 if (enable_msix)
3699 num_cpus = find_num_cpus();
3700 else
3701 num_cpus = 1;
3702 phba->num_cpus = num_cpus;
3703 SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3705 if (enable_msix)
3706 beiscsi_msix_enable(phba);
3707 ret = be_ctrl_init(phba, pcidev);
3708 if (ret) {
3709 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3710 "Failed in be_ctrl_init\n");
3711 goto hba_free;
3714 spin_lock_init(&phba->io_sgl_lock);
3715 spin_lock_init(&phba->mgmt_sgl_lock);
3716 spin_lock_init(&phba->isr_lock);
3717 ret = mgmt_get_fw_config(&phba->ctrl, phba);
3718 if (ret != 0) {
3719 shost_printk(KERN_ERR, phba->shost,
3720 "Error getting fw config\n");
3721 goto free_port;
3723 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3724 beiscsi_get_params(phba);
3725 phba->shost->can_queue = phba->params.ios_per_ctrl;
3726 ret = beiscsi_init_port(phba);
3727 if (ret < 0) {
3728 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3729 "Failed in beiscsi_init_port\n");
3730 goto free_port;
3733 for (i = 0; i < MAX_MCC_CMD ; i++) {
3734 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
3735 phba->ctrl.mcc_tag[i] = i + 1;
3736 phba->ctrl.mcc_numtag[i + 1] = 0;
3737 phba->ctrl.mcc_tag_available++;
3740 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
3742 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3743 phba->shost->host_no);
3744 phba->wq = create_workqueue(phba->wq_name);
3745 if (!phba->wq) {
3746 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3747 "Failed to allocate work queue\n");
3748 goto free_twq;
3751 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3753 phwi_ctrlr = phba->phwi_ctrlr;
3754 phwi_context = phwi_ctrlr->phwi_ctxt;
3755 if (blk_iopoll_enabled) {
3756 for (i = 0; i < phba->num_cpus; i++) {
3757 pbe_eq = &phwi_context->be_eq[i];
3758 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3759 be_iopoll);
3760 blk_iopoll_enable(&pbe_eq->iopoll);
3763 ret = beiscsi_init_irqs(phba);
3764 if (ret < 0) {
3765 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3766 "Failed to beiscsi_init_irqs\n");
3767 goto free_blkenbld;
3769 ret = hwi_enable_intr(phba);
3770 if (ret < 0) {
3771 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3772 "Failed to hwi_enable_intr\n");
3773 goto free_ctrlr;
3775 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3776 return 0;
3778 free_ctrlr:
3779 if (phba->msix_enabled) {
3780 for (i = 0; i <= phba->num_cpus; i++) {
3781 msix_vec = phba->msix_entries[i].vector;
3782 free_irq(msix_vec, &phwi_context->be_eq[i]);
3784 } else
3785 if (phba->pcidev->irq)
3786 free_irq(phba->pcidev->irq, phba);
3787 pci_disable_msix(phba->pcidev);
3788 free_blkenbld:
3789 destroy_workqueue(phba->wq);
3790 if (blk_iopoll_enabled)
3791 for (i = 0; i < phba->num_cpus; i++) {
3792 pbe_eq = &phwi_context->be_eq[i];
3793 blk_iopoll_disable(&pbe_eq->iopoll);
3795 free_twq:
3796 beiscsi_clean_port(phba);
3797 beiscsi_free_mem(phba);
3798 free_port:
3799 pci_free_consistent(phba->pcidev,
3800 phba->ctrl.mbox_mem_alloced.size,
3801 phba->ctrl.mbox_mem_alloced.va,
3802 phba->ctrl.mbox_mem_alloced.dma);
3803 beiscsi_unmap_pci_function(phba);
3804 hba_free:
3805 iscsi_host_remove(phba->shost);
3806 pci_dev_put(phba->pcidev);
3807 iscsi_host_free(phba->shost);
3808 disable_pci:
3809 pci_disable_device(pcidev);
3810 return ret;
3813 struct iscsi_transport beiscsi_iscsi_transport = {
3814 .owner = THIS_MODULE,
3815 .name = DRV_NAME,
3816 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
3817 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3818 .param_mask = ISCSI_MAX_RECV_DLENGTH |
3819 ISCSI_MAX_XMIT_DLENGTH |
3820 ISCSI_HDRDGST_EN |
3821 ISCSI_DATADGST_EN |
3822 ISCSI_INITIAL_R2T_EN |
3823 ISCSI_MAX_R2T |
3824 ISCSI_IMM_DATA_EN |
3825 ISCSI_FIRST_BURST |
3826 ISCSI_MAX_BURST |
3827 ISCSI_PDU_INORDER_EN |
3828 ISCSI_DATASEQ_INORDER_EN |
3829 ISCSI_ERL |
3830 ISCSI_CONN_PORT |
3831 ISCSI_CONN_ADDRESS |
3832 ISCSI_EXP_STATSN |
3833 ISCSI_PERSISTENT_PORT |
3834 ISCSI_PERSISTENT_ADDRESS |
3835 ISCSI_TARGET_NAME | ISCSI_TPGT |
3836 ISCSI_USERNAME | ISCSI_PASSWORD |
3837 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3838 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3839 ISCSI_LU_RESET_TMO |
3840 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3841 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3842 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3843 ISCSI_HOST_INITIATOR_NAME,
3844 .create_session = beiscsi_session_create,
3845 .destroy_session = beiscsi_session_destroy,
3846 .create_conn = beiscsi_conn_create,
3847 .bind_conn = beiscsi_conn_bind,
3848 .destroy_conn = iscsi_conn_teardown,
3849 .set_param = beiscsi_set_param,
3850 .get_conn_param = beiscsi_conn_get_param,
3851 .get_session_param = iscsi_session_get_param,
3852 .get_host_param = beiscsi_get_host_param,
3853 .start_conn = beiscsi_conn_start,
3854 .stop_conn = beiscsi_conn_stop,
3855 .send_pdu = iscsi_conn_send_pdu,
3856 .xmit_task = beiscsi_task_xmit,
3857 .cleanup_task = beiscsi_cleanup_task,
3858 .alloc_pdu = beiscsi_alloc_pdu,
3859 .parse_pdu_itt = beiscsi_parse_pdu,
3860 .get_stats = beiscsi_conn_get_stats,
3861 .ep_connect = beiscsi_ep_connect,
3862 .ep_poll = beiscsi_ep_poll,
3863 .ep_disconnect = beiscsi_ep_disconnect,
3864 .session_recovery_timedout = iscsi_session_recovery_timedout,
3867 static struct pci_driver beiscsi_pci_driver = {
3868 .name = DRV_NAME,
3869 .probe = beiscsi_dev_probe,
3870 .remove = beiscsi_remove,
3871 .id_table = beiscsi_pci_id_table
3875 static int __init beiscsi_module_init(void)
3877 int ret;
3879 beiscsi_scsi_transport =
3880 iscsi_register_transport(&beiscsi_iscsi_transport);
3881 if (!beiscsi_scsi_transport) {
3882 SE_DEBUG(DBG_LVL_1,
3883 "beiscsi_module_init - Unable to register beiscsi"
3884 "transport.\n");
3885 return -ENOMEM;
3887 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3888 &beiscsi_iscsi_transport);
3890 ret = pci_register_driver(&beiscsi_pci_driver);
3891 if (ret) {
3892 SE_DEBUG(DBG_LVL_1,
3893 "beiscsi_module_init - Unable to register"
3894 "beiscsi pci driver.\n");
3895 goto unregister_iscsi_transport;
3897 return 0;
3899 unregister_iscsi_transport:
3900 iscsi_unregister_transport(&beiscsi_iscsi_transport);
3901 return ret;
3904 static void __exit beiscsi_module_exit(void)
3906 pci_unregister_driver(&beiscsi_pci_driver);
3907 iscsi_unregister_transport(&beiscsi_iscsi_transport);
3910 module_init(beiscsi_module_init);
3911 module_exit(beiscsi_module_exit);