2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static void qla2x00_isp_cmd(struct scsi_qla_host
*, struct req_que
*);
16 static void qla25xx_set_que(srb_t
*, struct rsp_que
**);
18 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
21 * Returns the proper CF_* direction based on CDB.
23 static inline uint16_t
24 qla2x00_get_cmd_direction(srb_t
*sp
)
30 /* Set transfer direction */
31 if (sp
->cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
33 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
34 scsi_bufflen(sp
->cmd
);
35 } else if (sp
->cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
37 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
38 scsi_bufflen(sp
->cmd
);
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds
)
58 iocbs
+= (dsds
- 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds
)
80 iocbs
+= (dsds
- 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t
*
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
96 cont_entry_t
*cont_pkt
;
97 struct req_que
*req
= vha
->req
;
98 /* Adjust ring index. */
100 if (req
->ring_index
== req
->length
) {
102 req
->ring_ptr
= req
->ring
;
107 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt
->entry_type
)) =
111 __constant_cpu_to_le32(CONTINUE_TYPE
);
117 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t
*
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
)
125 cont_a64_entry_t
*cont_pkt
;
127 struct req_que
*req
= vha
->req
;
128 /* Adjust ring index. */
130 if (req
->ring_index
== req
->length
) {
132 req
->ring_ptr
= req
->ring
;
137 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
139 /* Load packet defaults. */
140 *((uint32_t *)(&cont_pkt
->entry_type
)) =
141 __constant_cpu_to_le32(CONTINUE_A64_TYPE
);
147 qla24xx_configure_prot_mode(srb_t
*sp
, uint16_t *fw_prot_opts
)
149 uint8_t guard
= scsi_host_get_guard(sp
->cmd
->device
->host
);
151 /* We only support T10 DIF right now */
152 if (guard
!= SHOST_DIX_GUARD_CRC
) {
153 DEBUG2(printk(KERN_ERR
"Unsupported guard: %d\n", guard
));
157 /* We always use DIFF Bundling for best performance */
160 /* Translate SCSI opcode to a protection opcode */
161 switch (scsi_get_prot_op(sp
->cmd
)) {
162 case SCSI_PROT_READ_STRIP
:
163 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
165 case SCSI_PROT_WRITE_INSERT
:
166 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
168 case SCSI_PROT_READ_INSERT
:
169 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
171 case SCSI_PROT_WRITE_STRIP
:
172 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
174 case SCSI_PROT_READ_PASS
:
175 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
177 case SCSI_PROT_WRITE_PASS
:
178 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
180 default: /* Normal Request */
181 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
185 return scsi_prot_sg_count(sp
->cmd
);
189 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190 * capable IOCB types.
192 * @sp: SRB command to process
193 * @cmd_pkt: Command type 2 IOCB
194 * @tot_dsds: Total number of segments to transfer
196 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
201 scsi_qla_host_t
*vha
;
202 struct scsi_cmnd
*cmd
;
203 struct scatterlist
*sg
;
208 /* Update entry type to indicate Command Type 2 IOCB */
209 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
210 __constant_cpu_to_le32(COMMAND_TYPE
);
212 /* No data transfer */
213 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
214 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
218 vha
= sp
->fcport
->vha
;
219 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
221 /* Three DSDs are available in the Command Type 2 IOCB */
223 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
225 /* Load data segments */
226 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
227 cont_entry_t
*cont_pkt
;
229 /* Allocate additional continuation packets? */
230 if (avail_dsds
== 0) {
232 * Seven DSDs are available in the Continuation
235 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
236 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
240 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
241 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
247 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248 * capable IOCB types.
250 * @sp: SRB command to process
251 * @cmd_pkt: Command type 3 IOCB
252 * @tot_dsds: Total number of segments to transfer
254 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
259 scsi_qla_host_t
*vha
;
260 struct scsi_cmnd
*cmd
;
261 struct scatterlist
*sg
;
266 /* Update entry type to indicate Command Type 3 IOCB */
267 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
268 __constant_cpu_to_le32(COMMAND_A64_TYPE
);
270 /* No data transfer */
271 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
272 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
276 vha
= sp
->fcport
->vha
;
277 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
279 /* Two DSDs are available in the Command Type 3 IOCB */
281 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
283 /* Load data segments */
284 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
286 cont_a64_entry_t
*cont_pkt
;
288 /* Allocate additional continuation packets? */
289 if (avail_dsds
== 0) {
291 * Five DSDs are available in the Continuation
294 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
295 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
299 sle_dma
= sg_dma_address(sg
);
300 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
301 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
302 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
308 * qla2x00_start_scsi() - Send a SCSI command to the ISP
309 * @sp: command to send to the ISP
311 * Returns non-zero if a failure occurred, else zero.
314 qla2x00_start_scsi(srb_t
*sp
)
318 scsi_qla_host_t
*vha
;
319 struct scsi_cmnd
*cmd
;
323 cmd_entry_t
*cmd_pkt
;
327 struct device_reg_2xxx __iomem
*reg
;
328 struct qla_hw_data
*ha
;
332 /* Setup device pointers. */
334 vha
= sp
->fcport
->vha
;
336 reg
= &ha
->iobase
->isp
;
338 req
= ha
->req_q_map
[0];
339 rsp
= ha
->rsp_q_map
[0];
340 /* So we know we haven't pci_map'ed anything yet */
343 /* Send marker if required */
344 if (vha
->marker_needed
!= 0) {
345 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
)
347 return (QLA_FUNCTION_FAILED
);
348 vha
->marker_needed
= 0;
351 /* Acquire ring specific lock */
352 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
354 /* Check for room in outstanding command list. */
355 handle
= req
->current_outstanding_cmd
;
356 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
358 if (handle
== MAX_OUTSTANDING_COMMANDS
)
360 if (!req
->outstanding_cmds
[handle
])
363 if (index
== MAX_OUTSTANDING_COMMANDS
)
366 /* Map the sg table so we have an accurate count of sg entries needed */
367 if (scsi_sg_count(cmd
)) {
368 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
369 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
377 /* Calculate the number of request entries needed. */
378 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
379 if (req
->cnt
< (req_cnt
+ 2)) {
380 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
381 if (req
->ring_index
< cnt
)
382 req
->cnt
= cnt
- req
->ring_index
;
384 req
->cnt
= req
->length
-
385 (req
->ring_index
- cnt
);
387 if (req
->cnt
< (req_cnt
+ 2))
390 /* Build command packet */
391 req
->current_outstanding_cmd
= handle
;
392 req
->outstanding_cmds
[handle
] = sp
;
394 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
397 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
398 cmd_pkt
->handle
= handle
;
399 /* Zero out remaining portion of packet. */
400 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
401 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
402 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
404 /* Set target ID and LUN number*/
405 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
406 cmd_pkt
->lun
= cpu_to_le16(sp
->cmd
->device
->lun
);
408 /* Update tagged queuing modifier */
409 cmd_pkt
->control_flags
= __constant_cpu_to_le16(CF_SIMPLE_TAG
);
411 /* Load SCSI command packet. */
412 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
413 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
415 /* Build IOCB segments */
416 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
418 /* Set total data segment count. */
419 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
422 /* Adjust ring index. */
424 if (req
->ring_index
== req
->length
) {
426 req
->ring_ptr
= req
->ring
;
430 sp
->flags
|= SRB_DMA_VALID
;
432 /* Set chip new ring index. */
433 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
434 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
436 /* Manage unprocessed RIO/ZIO commands in response queue. */
437 if (vha
->flags
.process_response_queue
&&
438 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
439 qla2x00_process_response_queue(rsp
);
441 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
442 return (QLA_SUCCESS
);
448 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
450 return (QLA_FUNCTION_FAILED
);
454 * qla2x00_marker() - Send a marker IOCB to the firmware.
458 * @type: marker modifier
460 * Can be called from both normal and interrupt context.
462 * Returns non-zero if a failure occurred, else zero.
465 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
466 struct rsp_que
*rsp
, uint16_t loop_id
,
467 uint16_t lun
, uint8_t type
)
470 struct mrk_entry_24xx
*mrk24
;
471 struct qla_hw_data
*ha
= vha
->hw
;
472 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
475 mrk
= (mrk_entry_t
*)qla2x00_alloc_iocbs(vha
, 0);
477 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
478 __func__
, base_vha
->host_no
));
480 return (QLA_FUNCTION_FAILED
);
483 mrk
->entry_type
= MARKER_TYPE
;
484 mrk
->modifier
= type
;
485 if (type
!= MK_SYNC_ALL
) {
486 if (IS_FWI2_CAPABLE(ha
)) {
487 mrk24
= (struct mrk_entry_24xx
*) mrk
;
488 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
489 mrk24
->lun
[1] = LSB(lun
);
490 mrk24
->lun
[2] = MSB(lun
);
491 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
492 mrk24
->vp_index
= vha
->vp_idx
;
493 mrk24
->handle
= MAKE_HANDLE(req
->id
, mrk24
->handle
);
495 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
496 mrk
->lun
= cpu_to_le16(lun
);
501 qla2x00_isp_cmd(vha
, req
);
503 return (QLA_SUCCESS
);
507 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
508 struct rsp_que
*rsp
, uint16_t loop_id
, uint16_t lun
,
512 unsigned long flags
= 0;
514 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
515 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
516 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
522 * qla2x00_isp_cmd() - Modify the request ring pointer.
525 * Note: The caller must hold the hardware lock before calling this routine.
528 qla2x00_isp_cmd(struct scsi_qla_host
*vha
, struct req_que
*req
)
530 struct qla_hw_data
*ha
= vha
->hw
;
531 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
532 struct device_reg_2xxx __iomem
*ioreg
= &ha
->iobase
->isp
;
534 DEBUG5(printk("%s(): IOCB data:\n", __func__
));
535 DEBUG5(qla2x00_dump_buffer(
536 (uint8_t *)req
->ring_ptr
, REQUEST_ENTRY_SIZE
));
538 /* Adjust ring index. */
540 if (req
->ring_index
== req
->length
) {
542 req
->ring_ptr
= req
->ring
;
546 /* Set chip new ring index. */
547 if (IS_QLA82XX(ha
)) {
548 uint32_t dbval
= 0x04 | (ha
->portnum
<< 5);
550 /* write, read and verify logic */
551 dbval
= dbval
| (req
->id
<< 8) | (req
->ring_index
<< 16);
553 qla82xx_wr_32(ha
, ha
->nxdb_wr_ptr
, dbval
);
556 (unsigned long __iomem
*)ha
->nxdb_wr_ptr
,
559 while (RD_REG_DWORD(ha
->nxdb_rd_ptr
) != dbval
) {
560 WRT_REG_DWORD((unsigned long __iomem
*)
561 ha
->nxdb_wr_ptr
, dbval
);
565 } else if (ha
->mqenable
) {
566 /* Set chip new ring index. */
567 WRT_REG_DWORD(®
->isp25mq
.req_q_in
, req
->ring_index
);
568 RD_REG_DWORD(&ioreg
->hccr
);
570 if (IS_FWI2_CAPABLE(ha
)) {
571 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
572 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
574 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
576 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
583 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
584 * Continuation Type 1 IOCBs to allocate.
586 * @dsds: number of data segment decriptors needed
588 * Returns the number of IOCB entries needed to store @dsds.
591 qla24xx_calc_iocbs(uint16_t dsds
)
597 iocbs
+= (dsds
- 1) / 5;
601 DEBUG3(printk(KERN_DEBUG
"%s(): Required PKT(s) = %d\n",
607 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
610 * @sp: SRB command to process
611 * @cmd_pkt: Command type 3 IOCB
612 * @tot_dsds: Total number of segments to transfer
615 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
620 scsi_qla_host_t
*vha
;
621 struct scsi_cmnd
*cmd
;
622 struct scatterlist
*sg
;
628 /* Update entry type to indicate Command Type 3 IOCB */
629 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
630 __constant_cpu_to_le32(COMMAND_TYPE_7
);
632 /* No data transfer */
633 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
634 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
638 vha
= sp
->fcport
->vha
;
641 /* Set transfer direction */
642 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
643 cmd_pkt
->task_mgmt_flags
=
644 __constant_cpu_to_le16(TMF_WRITE_DATA
);
645 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
646 scsi_bufflen(sp
->cmd
);
647 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
648 cmd_pkt
->task_mgmt_flags
=
649 __constant_cpu_to_le16(TMF_READ_DATA
);
650 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
651 scsi_bufflen(sp
->cmd
);
654 /* One DSD is available in the Command Type 3 IOCB */
656 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
658 /* Load data segments */
660 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
662 cont_a64_entry_t
*cont_pkt
;
664 /* Allocate additional continuation packets? */
665 if (avail_dsds
== 0) {
667 * Five DSDs are available in the Continuation
670 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
671 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
675 sle_dma
= sg_dma_address(sg
);
676 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
677 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
678 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
683 struct fw_dif_context
{
686 uint8_t ref_tag_mask
[4]; /* Validation/Replacement Mask*/
687 uint8_t app_tag_mask
[2]; /* Validation/Replacement Mask*/
691 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
695 qla24xx_set_t10dif_tags(struct scsi_cmnd
*cmd
, struct fw_dif_context
*pkt
,
696 unsigned int protcnt
)
698 struct sd_dif_tuple
*spt
;
699 unsigned char op
= scsi_get_prot_op(cmd
);
701 switch (scsi_get_prot_type(cmd
)) {
702 /* For TYPE 0 protection: no checking */
703 case SCSI_PROT_DIF_TYPE0
:
704 pkt
->ref_tag_mask
[0] = 0x00;
705 pkt
->ref_tag_mask
[1] = 0x00;
706 pkt
->ref_tag_mask
[2] = 0x00;
707 pkt
->ref_tag_mask
[3] = 0x00;
711 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
712 * match LBA in CDB + N
714 case SCSI_PROT_DIF_TYPE2
:
715 if (!ql2xenablehba_err_chk
)
718 if (scsi_prot_sg_count(cmd
)) {
719 spt
= page_address(sg_page(scsi_prot_sglist(cmd
))) +
720 scsi_prot_sglist(cmd
)[0].offset
;
721 pkt
->app_tag
= swab32(spt
->app_tag
);
722 pkt
->app_tag_mask
[0] = 0xff;
723 pkt
->app_tag_mask
[1] = 0xff;
726 pkt
->ref_tag
= cpu_to_le32((uint32_t)
727 (0xffffffff & scsi_get_lba(cmd
)));
729 /* enable ALL bytes of the ref tag */
730 pkt
->ref_tag_mask
[0] = 0xff;
731 pkt
->ref_tag_mask
[1] = 0xff;
732 pkt
->ref_tag_mask
[2] = 0xff;
733 pkt
->ref_tag_mask
[3] = 0xff;
736 /* For Type 3 protection: 16 bit GUARD only */
737 case SCSI_PROT_DIF_TYPE3
:
738 pkt
->ref_tag_mask
[0] = pkt
->ref_tag_mask
[1] =
739 pkt
->ref_tag_mask
[2] = pkt
->ref_tag_mask
[3] =
744 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
747 case SCSI_PROT_DIF_TYPE1
:
748 if (!ql2xenablehba_err_chk
)
751 if (protcnt
&& (op
== SCSI_PROT_WRITE_STRIP
||
752 op
== SCSI_PROT_WRITE_PASS
)) {
753 spt
= page_address(sg_page(scsi_prot_sglist(cmd
))) +
754 scsi_prot_sglist(cmd
)[0].offset
;
755 DEBUG18(printk(KERN_DEBUG
756 "%s(): LBA from user %p, lba = 0x%x\n",
757 __func__
, spt
, (int)spt
->ref_tag
));
758 pkt
->ref_tag
= swab32(spt
->ref_tag
);
759 pkt
->app_tag_mask
[0] = 0x0;
760 pkt
->app_tag_mask
[1] = 0x0;
762 pkt
->ref_tag
= cpu_to_le32((uint32_t)
763 (0xffffffff & scsi_get_lba(cmd
)));
764 pkt
->app_tag
= __constant_cpu_to_le16(0);
765 pkt
->app_tag_mask
[0] = 0x0;
766 pkt
->app_tag_mask
[1] = 0x0;
768 /* enable ALL bytes of the ref tag */
769 pkt
->ref_tag_mask
[0] = 0xff;
770 pkt
->ref_tag_mask
[1] = 0xff;
771 pkt
->ref_tag_mask
[2] = 0xff;
772 pkt
->ref_tag_mask
[3] = 0xff;
776 DEBUG18(printk(KERN_DEBUG
777 "%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
778 " app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
779 " prot_type=%u\n", __func__
, pkt
->ref_tag
, pkt
->app_tag
, protcnt
,
780 (int)scsi_get_lba(cmd
), scsi_get_prot_type(cmd
)));
785 qla24xx_walk_and_build_sglist(struct qla_hw_data
*ha
, srb_t
*sp
, uint32_t *dsd
,
789 uint8_t avail_dsds
= 0;
790 uint32_t dsd_list_len
;
791 struct dsd_dma
*dsd_ptr
;
792 struct scatterlist
*sg
;
793 uint32_t *cur_dsd
= dsd
;
795 uint16_t used_dsds
= tot_dsds
;
799 scsi_for_each_sg(sp
->cmd
, sg
, tot_dsds
, i
) {
802 /* Allocate additional continuation packets? */
803 if (avail_dsds
== 0) {
804 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
805 QLA_DSDS_PER_IOCB
: used_dsds
;
806 dsd_list_len
= (avail_dsds
+ 1) * 12;
807 used_dsds
-= avail_dsds
;
809 /* allocate tracking DS */
810 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
814 /* allocate new list */
815 dsd_ptr
->dsd_addr
= next_dsd
=
816 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
817 &dsd_ptr
->dsd_list_dma
);
821 * Need to cleanup only this dsd_ptr, rest
822 * will be done by sp_free_dma()
828 list_add_tail(&dsd_ptr
->list
,
829 &((struct crc_context
*)sp
->ctx
)->dsd_list
);
831 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
833 /* add new list to cmd iocb or last list */
834 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
835 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
836 *cur_dsd
++ = dsd_list_len
;
837 cur_dsd
= (uint32_t *)next_dsd
;
839 sle_dma
= sg_dma_address(sg
);
840 DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
841 " len =%d\n", __func__
, cur_dsd
, i
, LSD(sle_dma
),
842 MSD(sle_dma
), sg_dma_len(sg
)));
843 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
844 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
845 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
848 if (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_PASS
) {
849 cp
= page_address(sg_page(sg
)) + sg
->offset
;
850 DEBUG18(printk("%s(): User Data buffer= %p:\n",
854 /* Null termination */
862 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data
*ha
, srb_t
*sp
,
867 uint8_t avail_dsds
= 0;
868 uint32_t dsd_list_len
;
869 struct dsd_dma
*dsd_ptr
;
870 struct scatterlist
*sg
;
872 struct scsi_cmnd
*cmd
;
873 uint32_t *cur_dsd
= dsd
;
874 uint16_t used_dsds
= tot_dsds
;
880 scsi_for_each_prot_sg(cmd
, sg
, tot_dsds
, i
) {
883 /* Allocate additional continuation packets? */
884 if (avail_dsds
== 0) {
885 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
886 QLA_DSDS_PER_IOCB
: used_dsds
;
887 dsd_list_len
= (avail_dsds
+ 1) * 12;
888 used_dsds
-= avail_dsds
;
890 /* allocate tracking DS */
891 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
895 /* allocate new list */
896 dsd_ptr
->dsd_addr
= next_dsd
=
897 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
898 &dsd_ptr
->dsd_list_dma
);
902 * Need to cleanup only this dsd_ptr, rest
903 * will be done by sp_free_dma()
909 list_add_tail(&dsd_ptr
->list
,
910 &((struct crc_context
*)sp
->ctx
)->dsd_list
);
912 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
914 /* add new list to cmd iocb or last list */
915 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
916 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
917 *cur_dsd
++ = dsd_list_len
;
918 cur_dsd
= (uint32_t *)next_dsd
;
920 sle_dma
= sg_dma_address(sg
);
921 if (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_PASS
) {
922 DEBUG18(printk(KERN_DEBUG
923 "%s(): %p, sg entry %d - addr =0x%x"
924 "0x%x, len =%d\n", __func__
, cur_dsd
, i
,
925 LSD(sle_dma
), MSD(sle_dma
), sg_dma_len(sg
)));
927 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
928 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
929 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
931 if (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_PASS
) {
932 cp
= page_address(sg_page(sg
)) + sg
->offset
;
933 DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
938 /* Null termination */
946 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
949 * @sp: SRB command to process
950 * @cmd_pkt: Command type 3 IOCB
951 * @tot_dsds: Total number of segments to transfer
954 qla24xx_build_scsi_crc_2_iocbs(srb_t
*sp
, struct cmd_type_crc_2
*cmd_pkt
,
955 uint16_t tot_dsds
, uint16_t tot_prot_dsds
, uint16_t fw_prot_opts
)
957 uint32_t *cur_dsd
, *fcp_dl
;
958 scsi_qla_host_t
*vha
;
959 struct scsi_cmnd
*cmd
;
960 struct scatterlist
*cur_seg
;
962 uint32_t total_bytes
;
965 uint8_t bundling
= 1;
968 struct crc_context
*crc_ctx_pkt
= NULL
;
969 struct qla_hw_data
*ha
;
970 uint8_t additional_fcpcdb_len
;
971 uint16_t fcp_cmnd_len
;
972 struct fcp_cmnd
*fcp_cmnd
;
973 dma_addr_t crc_ctx_dma
;
978 /* Update entry type to indicate Command Type CRC_2 IOCB */
979 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
980 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2
);
982 /* No data transfer */
983 data_bytes
= scsi_bufflen(cmd
);
984 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
985 DEBUG18(printk(KERN_INFO
"%s: Zero data bytes or DMA-NONE %d\n",
986 __func__
, data_bytes
));
987 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
991 vha
= sp
->fcport
->vha
;
994 DEBUG18(printk(KERN_DEBUG
995 "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__
,
996 vha
->host_no
, sp
, scsi_get_prot_op(sp
->cmd
)));
998 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
1000 /* Set transfer direction */
1001 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1002 cmd_pkt
->control_flags
=
1003 __constant_cpu_to_le16(CF_WRITE_DATA
);
1004 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1005 cmd_pkt
->control_flags
=
1006 __constant_cpu_to_le16(CF_READ_DATA
);
1009 tot_prot_dsds
= scsi_prot_sg_count(cmd
);
1013 /* Allocate CRC context from global pool */
1014 crc_ctx_pkt
= sp
->ctx
= dma_pool_alloc(ha
->dl_dma_pool
,
1015 GFP_ATOMIC
, &crc_ctx_dma
);
1018 goto crc_queuing_error
;
1020 /* Zero out CTX area. */
1021 clr_ptr
= (uint8_t *)crc_ctx_pkt
;
1022 memset(clr_ptr
, 0, sizeof(*crc_ctx_pkt
));
1024 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
1026 sp
->flags
|= SRB_CRC_CTX_DMA_VALID
;
1029 crc_ctx_pkt
->handle
= cmd_pkt
->handle
;
1031 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
1033 qla24xx_set_t10dif_tags(cmd
, (struct fw_dif_context
*)
1034 &crc_ctx_pkt
->ref_tag
, tot_prot_dsds
);
1036 cmd_pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
1037 cmd_pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
1038 cmd_pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
1040 /* Determine SCSI command length -- align to 4 byte boundary */
1041 if (cmd
->cmd_len
> 16) {
1042 DEBUG18(printk(KERN_INFO
"%s(): **** SCSI CMD > 16\n",
1044 additional_fcpcdb_len
= cmd
->cmd_len
- 16;
1045 if ((cmd
->cmd_len
% 4) != 0) {
1046 /* SCSI cmd > 16 bytes must be multiple of 4 */
1047 goto crc_queuing_error
;
1049 fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
1051 additional_fcpcdb_len
= 0;
1052 fcp_cmnd_len
= 12 + 16 + 4;
1055 fcp_cmnd
= &crc_ctx_pkt
->fcp_cmnd
;
1057 fcp_cmnd
->additional_cdb_len
= additional_fcpcdb_len
;
1058 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1059 fcp_cmnd
->additional_cdb_len
|= 1;
1060 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1061 fcp_cmnd
->additional_cdb_len
|= 2;
1063 int_to_scsilun(sp
->cmd
->device
->lun
, &fcp_cmnd
->lun
);
1064 host_to_fcp_swap((uint8_t *)&fcp_cmnd
->lun
, sizeof(fcp_cmnd
->lun
));
1065 memcpy(fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1066 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(fcp_cmnd_len
);
1067 cmd_pkt
->fcp_cmnd_dseg_address
[0] = cpu_to_le32(
1068 LSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1069 cmd_pkt
->fcp_cmnd_dseg_address
[1] = cpu_to_le32(
1070 MSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1071 fcp_cmnd
->task_attribute
= 0;
1072 fcp_cmnd
->task_management
= 0;
1074 cmd_pkt
->fcp_rsp_dseg_len
= 0; /* Let response come in status iocb */
1076 DEBUG18(printk(KERN_INFO
"%s(%ld): Total SG(s) Entries %d, Data"
1077 "entries %d, data bytes %d, Protection entries %d\n",
1078 __func__
, vha
->host_no
, tot_dsds
, (tot_dsds
-tot_prot_dsds
),
1079 data_bytes
, tot_prot_dsds
));
1081 /* Compute dif len and adjust data len to incude protection */
1082 total_bytes
= data_bytes
;
1084 blk_size
= cmd
->device
->sector_size
;
1085 if (scsi_get_prot_op(cmd
) != SCSI_PROT_NORMAL
) {
1086 dif_bytes
= (data_bytes
/ blk_size
) * 8;
1087 total_bytes
+= dif_bytes
;
1090 if (!ql2xenablehba_err_chk
)
1091 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
1094 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
1097 * Configure Bundling if we need to fetch interlaving
1098 * protection PCI accesses
1100 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
1101 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
1102 crc_ctx_pkt
->u
.bundling
.dseg_count
= cpu_to_le16(tot_dsds
-
1104 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
1107 /* Finish the common fields of CRC pkt */
1108 crc_ctx_pkt
->blk_size
= cpu_to_le16(blk_size
);
1109 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
1110 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
1111 crc_ctx_pkt
->guard_seed
= __constant_cpu_to_le16(0);
1112 /* Fibre channel byte count */
1113 cmd_pkt
->byte_count
= cpu_to_le32(total_bytes
);
1114 fcp_dl
= (uint32_t *)(crc_ctx_pkt
->fcp_cmnd
.cdb
+ 16 +
1115 additional_fcpcdb_len
);
1116 *fcp_dl
= htonl(total_bytes
);
1118 DEBUG18(printk(KERN_INFO
"%s(%ld): dif bytes = 0x%x (%d), total bytes"
1119 " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__
,
1120 vha
->host_no
, dif_bytes
, dif_bytes
, total_bytes
, total_bytes
,
1121 crc_ctx_pkt
->blk_size
, crc_ctx_pkt
->blk_size
));
1123 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1124 DEBUG18(printk(KERN_INFO
"%s: Zero data bytes or DMA-NONE %d\n",
1125 __func__
, data_bytes
));
1126 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
1129 /* Walks data segments */
1131 cmd_pkt
->control_flags
|=
1132 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE
);
1133 if (qla24xx_walk_and_build_sglist(ha
, sp
, cur_dsd
,
1134 (tot_dsds
- tot_prot_dsds
)))
1135 goto crc_queuing_error
;
1137 if (bundling
&& tot_prot_dsds
) {
1138 /* Walks dif segments */
1139 cur_seg
= scsi_prot_sglist(cmd
);
1140 cmd_pkt
->control_flags
|=
1141 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE
);
1142 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
1143 if (qla24xx_walk_and_build_prot_sglist(ha
, sp
, cur_dsd
,
1145 goto crc_queuing_error
;
1150 DEBUG18(qla_printk(KERN_INFO
, ha
,
1151 "CMD sent FAILED crc_q error:sp = %p\n", sp
));
1152 /* Cleanup will be performed by the caller */
1154 return QLA_FUNCTION_FAILED
;
1158 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1159 * @sp: command to send to the ISP
1161 * Returns non-zero if a failure occurred, else zero.
1164 qla24xx_start_scsi(srb_t
*sp
)
1167 unsigned long flags
;
1171 struct cmd_type_7
*cmd_pkt
;
1175 struct req_que
*req
= NULL
;
1176 struct rsp_que
*rsp
= NULL
;
1177 struct scsi_cmnd
*cmd
= sp
->cmd
;
1178 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1179 struct qla_hw_data
*ha
= vha
->hw
;
1181 /* Setup device pointers. */
1184 qla25xx_set_que(sp
, &rsp
);
1187 /* So we know we haven't pci_map'ed anything yet */
1190 /* Send marker if required */
1191 if (vha
->marker_needed
!= 0) {
1192 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
)
1194 return QLA_FUNCTION_FAILED
;
1195 vha
->marker_needed
= 0;
1198 /* Acquire ring specific lock */
1199 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1201 /* Check for room in outstanding command list. */
1202 handle
= req
->current_outstanding_cmd
;
1203 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1205 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1207 if (!req
->outstanding_cmds
[handle
])
1210 if (index
== MAX_OUTSTANDING_COMMANDS
)
1213 /* Map the sg table so we have an accurate count of sg entries needed */
1214 if (scsi_sg_count(cmd
)) {
1215 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1216 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1217 if (unlikely(!nseg
))
1224 req_cnt
= qla24xx_calc_iocbs(tot_dsds
);
1225 if (req
->cnt
< (req_cnt
+ 2)) {
1226 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
1228 if (req
->ring_index
< cnt
)
1229 req
->cnt
= cnt
- req
->ring_index
;
1231 req
->cnt
= req
->length
-
1232 (req
->ring_index
- cnt
);
1234 if (req
->cnt
< (req_cnt
+ 2))
1237 /* Build command packet. */
1238 req
->current_outstanding_cmd
= handle
;
1239 req
->outstanding_cmds
[handle
] = sp
;
1240 sp
->handle
= handle
;
1241 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1242 req
->cnt
-= req_cnt
;
1244 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1245 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1247 /* Zero out remaining portion of packet. */
1248 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1249 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1250 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1251 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1253 /* Set NPORT-ID and LUN number*/
1254 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1255 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1256 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1257 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1258 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
1260 int_to_scsilun(sp
->cmd
->device
->lun
, &cmd_pkt
->lun
);
1261 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1263 /* Load SCSI command packet. */
1264 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1265 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1267 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1269 /* Build IOCB segments */
1270 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
1272 /* Set total data segment count. */
1273 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1274 /* Specify response queue number where completion should happen */
1275 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1278 /* Adjust ring index. */
1280 if (req
->ring_index
== req
->length
) {
1281 req
->ring_index
= 0;
1282 req
->ring_ptr
= req
->ring
;
1286 sp
->flags
|= SRB_DMA_VALID
;
1288 /* Set chip new ring index. */
1289 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1290 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1292 /* Manage unprocessed RIO/ZIO commands in response queue. */
1293 if (vha
->flags
.process_response_queue
&&
1294 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1295 qla24xx_process_response_queue(vha
, rsp
);
1297 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1302 scsi_dma_unmap(cmd
);
1304 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1306 return QLA_FUNCTION_FAILED
;
1311 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1312 * @sp: command to send to the ISP
1314 * Returns non-zero if a failure occurred, else zero.
1317 qla24xx_dif_start_scsi(srb_t
*sp
)
1320 unsigned long flags
;
1325 uint16_t req_cnt
= 0;
1327 uint16_t tot_prot_dsds
;
1328 uint16_t fw_prot_opts
= 0;
1329 struct req_que
*req
= NULL
;
1330 struct rsp_que
*rsp
= NULL
;
1331 struct scsi_cmnd
*cmd
= sp
->cmd
;
1332 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1333 struct qla_hw_data
*ha
= vha
->hw
;
1334 struct cmd_type_crc_2
*cmd_pkt
;
1335 uint32_t status
= 0;
1337 #define QDSS_GOT_Q_SPACE BIT_0
1339 /* Only process protection or >16 cdb in this routine */
1340 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1341 if (cmd
->cmd_len
<= 16)
1342 return qla24xx_start_scsi(sp
);
1345 /* Setup device pointers. */
1347 qla25xx_set_que(sp
, &rsp
);
1350 /* So we know we haven't pci_map'ed anything yet */
1353 /* Send marker if required */
1354 if (vha
->marker_needed
!= 0) {
1355 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1357 return QLA_FUNCTION_FAILED
;
1358 vha
->marker_needed
= 0;
1361 /* Acquire ring specific lock */
1362 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1364 /* Check for room in outstanding command list. */
1365 handle
= req
->current_outstanding_cmd
;
1366 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1368 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1370 if (!req
->outstanding_cmds
[handle
])
1374 if (index
== MAX_OUTSTANDING_COMMANDS
)
1377 /* Compute number of required data segments */
1378 /* Map the sg table so we have an accurate count of sg entries needed */
1379 if (scsi_sg_count(cmd
)) {
1380 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1381 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1382 if (unlikely(!nseg
))
1385 sp
->flags
|= SRB_DMA_VALID
;
1389 /* number of required data segments */
1392 /* Compute number of required protection segments */
1393 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1394 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1395 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1396 if (unlikely(!nseg
))
1399 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
1405 /* Total Data and protection sg segment(s) */
1406 tot_prot_dsds
= nseg
;
1408 if (req
->cnt
< (req_cnt
+ 2)) {
1409 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
1411 if (req
->ring_index
< cnt
)
1412 req
->cnt
= cnt
- req
->ring_index
;
1414 req
->cnt
= req
->length
-
1415 (req
->ring_index
- cnt
);
1418 if (req
->cnt
< (req_cnt
+ 2))
1421 status
|= QDSS_GOT_Q_SPACE
;
1423 /* Build header part of command packet (excluding the OPCODE). */
1424 req
->current_outstanding_cmd
= handle
;
1425 req
->outstanding_cmds
[handle
] = sp
;
1426 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1427 req
->cnt
-= req_cnt
;
1429 /* Fill-in common area */
1430 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
1431 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1433 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1434 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1436 /* Set NPORT-ID and LUN number*/
1437 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1438 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1439 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1440 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1442 int_to_scsilun(sp
->cmd
->device
->lun
, &cmd_pkt
->lun
);
1443 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1445 /* Total Data and protection segment(s) */
1446 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1448 /* Build IOCB segments and adjust for data protection segments */
1449 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
1450 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
1454 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1455 /* Specify response queue number where completion should happen */
1456 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1457 cmd_pkt
->timeout
= __constant_cpu_to_le16(0);
1460 /* Adjust ring index. */
1462 if (req
->ring_index
== req
->length
) {
1463 req
->ring_index
= 0;
1464 req
->ring_ptr
= req
->ring
;
1468 /* Set chip new ring index. */
1469 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1470 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1472 /* Manage unprocessed RIO/ZIO commands in response queue. */
1473 if (vha
->flags
.process_response_queue
&&
1474 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1475 qla24xx_process_response_queue(vha
, rsp
);
1477 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1482 if (status
& QDSS_GOT_Q_SPACE
) {
1483 req
->outstanding_cmds
[handle
] = NULL
;
1484 req
->cnt
+= req_cnt
;
1486 /* Cleanup will be performed by the caller (queuecommand) */
1488 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1490 DEBUG18(qla_printk(KERN_INFO
, ha
,
1491 "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd
)));
1492 return QLA_FUNCTION_FAILED
;
1496 static void qla25xx_set_que(srb_t
*sp
, struct rsp_que
**rsp
)
1498 struct scsi_cmnd
*cmd
= sp
->cmd
;
1499 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1500 int affinity
= cmd
->request
->cpu
;
1502 if (ha
->flags
.cpu_affinity_enabled
&& affinity
>= 0 &&
1503 affinity
< ha
->max_rsp_queues
- 1)
1504 *rsp
= ha
->rsp_q_map
[affinity
+ 1];
1506 *rsp
= ha
->rsp_q_map
[0];
1509 /* Generic Control-SRB manipulation functions. */
1511 qla2x00_alloc_iocbs(scsi_qla_host_t
*vha
, srb_t
*sp
)
1513 struct qla_hw_data
*ha
= vha
->hw
;
1514 struct req_que
*req
= ha
->req_q_map
[0];
1515 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
1516 uint32_t index
, handle
;
1518 uint16_t cnt
, req_cnt
;
1525 goto skip_cmd_array
;
1527 /* Check for room in outstanding command list. */
1528 handle
= req
->current_outstanding_cmd
;
1529 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1531 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1533 if (!req
->outstanding_cmds
[handle
])
1536 if (index
== MAX_OUTSTANDING_COMMANDS
)
1539 /* Prep command array. */
1540 req
->current_outstanding_cmd
= handle
;
1541 req
->outstanding_cmds
[handle
] = sp
;
1542 sp
->handle
= handle
;
1545 /* Check for room on request queue. */
1546 if (req
->cnt
< req_cnt
) {
1548 cnt
= RD_REG_DWORD(®
->isp25mq
.req_q_out
);
1549 else if (IS_QLA82XX(ha
))
1550 cnt
= RD_REG_DWORD(®
->isp82
.req_q_out
);
1551 else if (IS_FWI2_CAPABLE(ha
))
1552 cnt
= RD_REG_DWORD(®
->isp24
.req_q_out
);
1554 cnt
= qla2x00_debounce_register(
1555 ISP_REQ_Q_OUT(ha
, ®
->isp
));
1557 if (req
->ring_index
< cnt
)
1558 req
->cnt
= cnt
- req
->ring_index
;
1560 req
->cnt
= req
->length
-
1561 (req
->ring_index
- cnt
);
1563 if (req
->cnt
< req_cnt
)
1567 req
->cnt
-= req_cnt
;
1568 pkt
= req
->ring_ptr
;
1569 memset(pkt
, 0, REQUEST_ENTRY_SIZE
);
1570 pkt
->entry_count
= req_cnt
;
1571 pkt
->handle
= handle
;
1578 qla2x00_start_iocbs(srb_t
*sp
)
1580 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1581 struct req_que
*req
= ha
->req_q_map
[0];
1582 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
1583 struct device_reg_2xxx __iomem
*ioreg
= &ha
->iobase
->isp
;
1585 if (IS_QLA82XX(ha
)) {
1586 qla82xx_start_iocbs(sp
);
1588 /* Adjust ring index. */
1590 if (req
->ring_index
== req
->length
) {
1591 req
->ring_index
= 0;
1592 req
->ring_ptr
= req
->ring
;
1596 /* Set chip new ring index. */
1598 WRT_REG_DWORD(®
->isp25mq
.req_q_in
, req
->ring_index
);
1599 RD_REG_DWORD(&ioreg
->hccr
);
1600 } else if (IS_QLA82XX(ha
)) {
1601 qla82xx_start_iocbs(sp
);
1602 } else if (IS_FWI2_CAPABLE(ha
)) {
1603 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
1604 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
1606 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
1608 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
1614 qla24xx_login_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1616 struct srb_ctx
*ctx
= sp
->ctx
;
1617 struct srb_iocb
*lio
= ctx
->u
.iocb_cmd
;
1619 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1620 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
1621 if (lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
)
1622 logio
->control_flags
|= cpu_to_le16(LCF_COND_PLOGI
);
1623 if (lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
)
1624 logio
->control_flags
|= cpu_to_le16(LCF_SKIP_PRLI
);
1625 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1626 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1627 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1628 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1629 logio
->vp_index
= sp
->fcport
->vp_idx
;
1633 qla2x00_login_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1635 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1636 struct srb_ctx
*ctx
= sp
->ctx
;
1637 struct srb_iocb
*lio
= ctx
->u
.iocb_cmd
;
1640 mbx
->entry_type
= MBX_IOCB_TYPE
;
1641 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1642 mbx
->mb0
= cpu_to_le16(MBC_LOGIN_FABRIC_PORT
);
1643 opts
= lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
? BIT_0
: 0;
1644 opts
|= lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
? BIT_1
: 0;
1645 if (HAS_EXTENDED_IDS(ha
)) {
1646 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
1647 mbx
->mb10
= cpu_to_le16(opts
);
1649 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | opts
);
1651 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1652 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1653 sp
->fcport
->d_id
.b
.al_pa
);
1654 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1658 qla24xx_logout_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1660 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1661 logio
->control_flags
=
1662 cpu_to_le16(LCF_COMMAND_LOGO
|LCF_IMPL_LOGO
);
1663 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1664 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1665 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1666 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1667 logio
->vp_index
= sp
->fcport
->vp_idx
;
1671 qla2x00_logout_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1673 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1675 mbx
->entry_type
= MBX_IOCB_TYPE
;
1676 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1677 mbx
->mb0
= cpu_to_le16(MBC_LOGOUT_FABRIC_PORT
);
1678 mbx
->mb1
= HAS_EXTENDED_IDS(ha
) ?
1679 cpu_to_le16(sp
->fcport
->loop_id
):
1680 cpu_to_le16(sp
->fcport
->loop_id
<< 8);
1681 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1682 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1683 sp
->fcport
->d_id
.b
.al_pa
);
1684 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1685 /* Implicit: mbx->mbx10 = 0. */
1689 qla24xx_adisc_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1691 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1692 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_ADISC
);
1693 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1694 logio
->vp_index
= sp
->fcport
->vp_idx
;
1698 qla2x00_adisc_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1700 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1702 mbx
->entry_type
= MBX_IOCB_TYPE
;
1703 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1704 mbx
->mb0
= cpu_to_le16(MBC_GET_PORT_DATABASE
);
1705 if (HAS_EXTENDED_IDS(ha
)) {
1706 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
1707 mbx
->mb10
= cpu_to_le16(BIT_0
);
1709 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | BIT_0
);
1711 mbx
->mb2
= cpu_to_le16(MSW(ha
->async_pd_dma
));
1712 mbx
->mb3
= cpu_to_le16(LSW(ha
->async_pd_dma
));
1713 mbx
->mb6
= cpu_to_le16(MSW(MSD(ha
->async_pd_dma
)));
1714 mbx
->mb7
= cpu_to_le16(LSW(MSD(ha
->async_pd_dma
)));
1715 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1719 qla24xx_tm_iocb(srb_t
*sp
, struct tsk_mgmt_entry
*tsk
)
1723 struct fc_port
*fcport
= sp
->fcport
;
1724 scsi_qla_host_t
*vha
= fcport
->vha
;
1725 struct qla_hw_data
*ha
= vha
->hw
;
1726 struct srb_ctx
*ctx
= sp
->ctx
;
1727 struct srb_iocb
*iocb
= ctx
->u
.iocb_cmd
;
1728 struct req_que
*req
= vha
->req
;
1730 flags
= iocb
->u
.tmf
.flags
;
1731 lun
= iocb
->u
.tmf
.lun
;
1733 tsk
->entry_type
= TSK_MGMT_IOCB_TYPE
;
1734 tsk
->entry_count
= 1;
1735 tsk
->handle
= MAKE_HANDLE(req
->id
, tsk
->handle
);
1736 tsk
->nport_handle
= cpu_to_le16(fcport
->loop_id
);
1737 tsk
->timeout
= cpu_to_le16(ha
->r_a_tov
/ 10 * 2);
1738 tsk
->control_flags
= cpu_to_le32(flags
);
1739 tsk
->port_id
[0] = fcport
->d_id
.b
.al_pa
;
1740 tsk
->port_id
[1] = fcport
->d_id
.b
.area
;
1741 tsk
->port_id
[2] = fcport
->d_id
.b
.domain
;
1742 tsk
->vp_index
= fcport
->vp_idx
;
1744 if (flags
== TCF_LUN_RESET
) {
1745 int_to_scsilun(lun
, &tsk
->lun
);
1746 host_to_fcp_swap((uint8_t *)&tsk
->lun
,
1752 qla24xx_els_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
1754 struct fc_bsg_job
*bsg_job
= ((struct srb_ctx
*)sp
->ctx
)->u
.bsg_job
;
1756 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
1757 els_iocb
->entry_count
= 1;
1758 els_iocb
->sys_define
= 0;
1759 els_iocb
->entry_status
= 0;
1760 els_iocb
->handle
= sp
->handle
;
1761 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1762 els_iocb
->tx_dsd_count
= __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
1763 els_iocb
->vp_index
= sp
->fcport
->vp_idx
;
1764 els_iocb
->sof_type
= EST_SOFI3
;
1765 els_iocb
->rx_dsd_count
= __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
1768 (((struct srb_ctx
*)sp
->ctx
)->type
== SRB_ELS_CMD_RPT
) ?
1769 bsg_job
->request
->rqst_data
.r_els
.els_code
:
1770 bsg_job
->request
->rqst_data
.h_els
.command_code
;
1771 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1772 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1773 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1774 els_iocb
->control_flags
= 0;
1775 els_iocb
->rx_byte_count
=
1776 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
1777 els_iocb
->tx_byte_count
=
1778 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
1780 els_iocb
->tx_address
[0] = cpu_to_le32(LSD(sg_dma_address
1781 (bsg_job
->request_payload
.sg_list
)));
1782 els_iocb
->tx_address
[1] = cpu_to_le32(MSD(sg_dma_address
1783 (bsg_job
->request_payload
.sg_list
)));
1784 els_iocb
->tx_len
= cpu_to_le32(sg_dma_len
1785 (bsg_job
->request_payload
.sg_list
));
1787 els_iocb
->rx_address
[0] = cpu_to_le32(LSD(sg_dma_address
1788 (bsg_job
->reply_payload
.sg_list
)));
1789 els_iocb
->rx_address
[1] = cpu_to_le32(MSD(sg_dma_address
1790 (bsg_job
->reply_payload
.sg_list
)));
1791 els_iocb
->rx_len
= cpu_to_le32(sg_dma_len
1792 (bsg_job
->reply_payload
.sg_list
));
1796 qla2x00_ct_iocb(srb_t
*sp
, ms_iocb_entry_t
*ct_iocb
)
1798 uint16_t avail_dsds
;
1800 struct scatterlist
*sg
;
1803 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
1804 struct qla_hw_data
*ha
= vha
->hw
;
1805 struct fc_bsg_job
*bsg_job
= ((struct srb_ctx
*)sp
->ctx
)->u
.bsg_job
;
1806 int loop_iterartion
= 0;
1807 int cont_iocb_prsnt
= 0;
1808 int entry_count
= 1;
1810 memset(ct_iocb
, 0, sizeof(ms_iocb_entry_t
));
1811 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
1812 ct_iocb
->entry_status
= 0;
1813 ct_iocb
->handle1
= sp
->handle
;
1814 SET_TARGET_ID(ha
, ct_iocb
->loop_id
, sp
->fcport
->loop_id
);
1815 ct_iocb
->status
= __constant_cpu_to_le16(0);
1816 ct_iocb
->control_flags
= __constant_cpu_to_le16(0);
1817 ct_iocb
->timeout
= 0;
1818 ct_iocb
->cmd_dsd_count
=
1819 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
1820 ct_iocb
->total_dsd_count
=
1821 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
+ 1);
1822 ct_iocb
->req_bytecount
=
1823 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
1824 ct_iocb
->rsp_bytecount
=
1825 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
1827 ct_iocb
->dseg_req_address
[0] = cpu_to_le32(LSD(sg_dma_address
1828 (bsg_job
->request_payload
.sg_list
)));
1829 ct_iocb
->dseg_req_address
[1] = cpu_to_le32(MSD(sg_dma_address
1830 (bsg_job
->request_payload
.sg_list
)));
1831 ct_iocb
->dseg_req_length
= ct_iocb
->req_bytecount
;
1833 ct_iocb
->dseg_rsp_address
[0] = cpu_to_le32(LSD(sg_dma_address
1834 (bsg_job
->reply_payload
.sg_list
)));
1835 ct_iocb
->dseg_rsp_address
[1] = cpu_to_le32(MSD(sg_dma_address
1836 (bsg_job
->reply_payload
.sg_list
)));
1837 ct_iocb
->dseg_rsp_length
= ct_iocb
->rsp_bytecount
;
1840 cur_dsd
= (uint32_t *)ct_iocb
->dseg_rsp_address
;
1842 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
1844 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
1846 cont_a64_entry_t
*cont_pkt
;
1848 /* Allocate additional continuation packets? */
1849 if (avail_dsds
== 0) {
1851 * Five DSDs are available in the Cont.
1854 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
1855 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
1857 cont_iocb_prsnt
= 1;
1861 sle_dma
= sg_dma_address(sg
);
1862 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1863 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1864 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1868 ct_iocb
->entry_count
= entry_count
;
1872 qla24xx_ct_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_iocb
)
1874 uint16_t avail_dsds
;
1876 struct scatterlist
*sg
;
1879 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
1880 struct fc_bsg_job
*bsg_job
= ((struct srb_ctx
*)sp
->ctx
)->u
.bsg_job
;
1881 int loop_iterartion
= 0;
1882 int cont_iocb_prsnt
= 0;
1883 int entry_count
= 1;
1885 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
1886 ct_iocb
->entry_status
= 0;
1887 ct_iocb
->sys_define
= 0;
1888 ct_iocb
->handle
= sp
->handle
;
1890 ct_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1891 ct_iocb
->vp_index
= sp
->fcport
->vp_idx
;
1892 ct_iocb
->comp_status
= __constant_cpu_to_le16(0);
1894 ct_iocb
->cmd_dsd_count
=
1895 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
1896 ct_iocb
->timeout
= 0;
1897 ct_iocb
->rsp_dsd_count
=
1898 __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
1899 ct_iocb
->rsp_byte_count
=
1900 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
1901 ct_iocb
->cmd_byte_count
=
1902 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
1903 ct_iocb
->dseg_0_address
[0] = cpu_to_le32(LSD(sg_dma_address
1904 (bsg_job
->request_payload
.sg_list
)));
1905 ct_iocb
->dseg_0_address
[1] = cpu_to_le32(MSD(sg_dma_address
1906 (bsg_job
->request_payload
.sg_list
)));
1907 ct_iocb
->dseg_0_len
= cpu_to_le32(sg_dma_len
1908 (bsg_job
->request_payload
.sg_list
));
1911 cur_dsd
= (uint32_t *)ct_iocb
->dseg_1_address
;
1913 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
1915 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
1917 cont_a64_entry_t
*cont_pkt
;
1919 /* Allocate additional continuation packets? */
1920 if (avail_dsds
== 0) {
1922 * Five DSDs are available in the Cont.
1925 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
1926 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
1928 cont_iocb_prsnt
= 1;
1932 sle_dma
= sg_dma_address(sg
);
1933 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1934 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1935 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1939 ct_iocb
->entry_count
= entry_count
;
1943 qla2x00_start_sp(srb_t
*sp
)
1946 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1948 struct srb_ctx
*ctx
= sp
->ctx
;
1949 unsigned long flags
;
1951 rval
= QLA_FUNCTION_FAILED
;
1952 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1953 pkt
= qla2x00_alloc_iocbs(sp
->fcport
->vha
, sp
);
1958 switch (ctx
->type
) {
1960 IS_FWI2_CAPABLE(ha
) ?
1961 qla24xx_login_iocb(sp
, pkt
) :
1962 qla2x00_login_iocb(sp
, pkt
);
1964 case SRB_LOGOUT_CMD
:
1965 IS_FWI2_CAPABLE(ha
) ?
1966 qla24xx_logout_iocb(sp
, pkt
) :
1967 qla2x00_logout_iocb(sp
, pkt
);
1969 case SRB_ELS_CMD_RPT
:
1970 case SRB_ELS_CMD_HST
:
1971 qla24xx_els_iocb(sp
, pkt
);
1974 IS_FWI2_CAPABLE(ha
) ?
1975 qla24xx_ct_iocb(sp
, pkt
) :
1976 qla2x00_ct_iocb(sp
, pkt
);
1979 IS_FWI2_CAPABLE(ha
) ?
1980 qla24xx_adisc_iocb(sp
, pkt
) :
1981 qla2x00_adisc_iocb(sp
, pkt
);
1984 qla24xx_tm_iocb(sp
, pkt
);
1991 qla2x00_start_iocbs(sp
);
1993 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);