2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static request_t
*qla2x00_req_pkt(struct scsi_qla_host
*, struct req_que
*,
16 static void qla2x00_isp_cmd(struct scsi_qla_host
*, struct req_que
*);
18 static void qla25xx_set_que(srb_t
*, struct rsp_que
**);
20 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
23 * Returns the proper CF_* direction based on CDB.
25 static inline uint16_t
26 qla2x00_get_cmd_direction(srb_t
*sp
)
32 /* Set transfer direction */
33 if (sp
->cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
35 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
36 scsi_bufflen(sp
->cmd
);
37 } else if (sp
->cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
39 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
40 scsi_bufflen(sp
->cmd
);
46 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
47 * Continuation Type 0 IOCBs to allocate.
49 * @dsds: number of data segment decriptors needed
51 * Returns the number of IOCB entries needed to store @dsds.
54 qla2x00_calc_iocbs_32(uint16_t dsds
)
60 iocbs
+= (dsds
- 3) / 7;
68 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
69 * Continuation Type 1 IOCBs to allocate.
71 * @dsds: number of data segment decriptors needed
73 * Returns the number of IOCB entries needed to store @dsds.
76 qla2x00_calc_iocbs_64(uint16_t dsds
)
82 iocbs
+= (dsds
- 2) / 5;
90 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
93 * Returns a pointer to the Continuation Type 0 IOCB packet.
95 static inline cont_entry_t
*
96 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
98 cont_entry_t
*cont_pkt
;
99 struct req_que
*req
= vha
->req
;
100 /* Adjust ring index. */
102 if (req
->ring_index
== req
->length
) {
104 req
->ring_ptr
= req
->ring
;
109 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
111 /* Load packet defaults. */
112 *((uint32_t *)(&cont_pkt
->entry_type
)) =
113 __constant_cpu_to_le32(CONTINUE_TYPE
);
119 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
122 * Returns a pointer to the continuation type 1 IOCB packet.
124 static inline cont_a64_entry_t
*
125 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
)
127 cont_a64_entry_t
*cont_pkt
;
129 struct req_que
*req
= vha
->req
;
130 /* Adjust ring index. */
132 if (req
->ring_index
== req
->length
) {
134 req
->ring_ptr
= req
->ring
;
139 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
141 /* Load packet defaults. */
142 *((uint32_t *)(&cont_pkt
->entry_type
)) =
143 __constant_cpu_to_le32(CONTINUE_A64_TYPE
);
149 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
150 * capable IOCB types.
152 * @sp: SRB command to process
153 * @cmd_pkt: Command type 2 IOCB
154 * @tot_dsds: Total number of segments to transfer
156 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
161 scsi_qla_host_t
*vha
;
162 struct scsi_cmnd
*cmd
;
163 struct scatterlist
*sg
;
168 /* Update entry type to indicate Command Type 2 IOCB */
169 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
170 __constant_cpu_to_le32(COMMAND_TYPE
);
172 /* No data transfer */
173 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
174 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
178 vha
= sp
->fcport
->vha
;
179 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
181 /* Three DSDs are available in the Command Type 2 IOCB */
183 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
185 /* Load data segments */
186 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
187 cont_entry_t
*cont_pkt
;
189 /* Allocate additional continuation packets? */
190 if (avail_dsds
== 0) {
192 * Seven DSDs are available in the Continuation
195 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
196 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
200 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
201 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
207 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
208 * capable IOCB types.
210 * @sp: SRB command to process
211 * @cmd_pkt: Command type 3 IOCB
212 * @tot_dsds: Total number of segments to transfer
214 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
219 scsi_qla_host_t
*vha
;
220 struct scsi_cmnd
*cmd
;
221 struct scatterlist
*sg
;
226 /* Update entry type to indicate Command Type 3 IOCB */
227 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
228 __constant_cpu_to_le32(COMMAND_A64_TYPE
);
230 /* No data transfer */
231 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
232 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
236 vha
= sp
->fcport
->vha
;
237 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
239 /* Two DSDs are available in the Command Type 3 IOCB */
241 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
243 /* Load data segments */
244 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
246 cont_a64_entry_t
*cont_pkt
;
248 /* Allocate additional continuation packets? */
249 if (avail_dsds
== 0) {
251 * Five DSDs are available in the Continuation
254 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
255 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
259 sle_dma
= sg_dma_address(sg
);
260 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
261 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
262 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
268 * qla2x00_start_scsi() - Send a SCSI command to the ISP
269 * @sp: command to send to the ISP
271 * Returns non-zero if a failure occurred, else zero.
274 qla2x00_start_scsi(srb_t
*sp
)
278 scsi_qla_host_t
*vha
;
279 struct scsi_cmnd
*cmd
;
283 cmd_entry_t
*cmd_pkt
;
287 struct device_reg_2xxx __iomem
*reg
;
288 struct qla_hw_data
*ha
;
292 /* Setup device pointers. */
294 vha
= sp
->fcport
->vha
;
296 reg
= &ha
->iobase
->isp
;
298 req
= ha
->req_q_map
[0];
299 rsp
= ha
->rsp_q_map
[0];
300 /* So we know we haven't pci_map'ed anything yet */
303 /* Send marker if required */
304 if (vha
->marker_needed
!= 0) {
305 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
)
307 return (QLA_FUNCTION_FAILED
);
308 vha
->marker_needed
= 0;
311 /* Acquire ring specific lock */
312 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
314 /* Check for room in outstanding command list. */
315 handle
= req
->current_outstanding_cmd
;
316 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
318 if (handle
== MAX_OUTSTANDING_COMMANDS
)
320 if (!req
->outstanding_cmds
[handle
])
323 if (index
== MAX_OUTSTANDING_COMMANDS
)
326 /* Map the sg table so we have an accurate count of sg entries needed */
327 if (scsi_sg_count(cmd
)) {
328 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
329 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
337 /* Calculate the number of request entries needed. */
338 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
339 if (req
->cnt
< (req_cnt
+ 2)) {
340 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
341 if (req
->ring_index
< cnt
)
342 req
->cnt
= cnt
- req
->ring_index
;
344 req
->cnt
= req
->length
-
345 (req
->ring_index
- cnt
);
347 if (req
->cnt
< (req_cnt
+ 2))
350 /* Build command packet */
351 req
->current_outstanding_cmd
= handle
;
352 req
->outstanding_cmds
[handle
] = sp
;
354 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
357 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
358 cmd_pkt
->handle
= handle
;
359 /* Zero out remaining portion of packet. */
360 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
361 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
362 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
364 /* Set target ID and LUN number*/
365 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
366 cmd_pkt
->lun
= cpu_to_le16(sp
->cmd
->device
->lun
);
368 /* Update tagged queuing modifier */
369 cmd_pkt
->control_flags
= __constant_cpu_to_le16(CF_SIMPLE_TAG
);
371 /* Load SCSI command packet. */
372 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
373 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
375 /* Build IOCB segments */
376 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
378 /* Set total data segment count. */
379 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
382 /* Adjust ring index. */
384 if (req
->ring_index
== req
->length
) {
386 req
->ring_ptr
= req
->ring
;
390 sp
->flags
|= SRB_DMA_VALID
;
392 /* Set chip new ring index. */
393 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
394 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
396 /* Manage unprocessed RIO/ZIO commands in response queue. */
397 if (vha
->flags
.process_response_queue
&&
398 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
399 qla2x00_process_response_queue(rsp
);
401 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
402 return (QLA_SUCCESS
);
408 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
410 return (QLA_FUNCTION_FAILED
);
414 * qla2x00_marker() - Send a marker IOCB to the firmware.
418 * @type: marker modifier
420 * Can be called from both normal and interrupt context.
422 * Returns non-zero if a failure occurred, else zero.
425 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
426 struct rsp_que
*rsp
, uint16_t loop_id
,
427 uint16_t lun
, uint8_t type
)
430 struct mrk_entry_24xx
*mrk24
;
431 struct qla_hw_data
*ha
= vha
->hw
;
432 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
435 mrk
= (mrk_entry_t
*)qla2x00_req_pkt(vha
, req
, rsp
);
437 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
438 __func__
, base_vha
->host_no
));
440 return (QLA_FUNCTION_FAILED
);
443 mrk
->entry_type
= MARKER_TYPE
;
444 mrk
->modifier
= type
;
445 if (type
!= MK_SYNC_ALL
) {
446 if (IS_FWI2_CAPABLE(ha
)) {
447 mrk24
= (struct mrk_entry_24xx
*) mrk
;
448 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
449 mrk24
->lun
[1] = LSB(lun
);
450 mrk24
->lun
[2] = MSB(lun
);
451 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
452 mrk24
->vp_index
= vha
->vp_idx
;
453 mrk24
->handle
= MAKE_HANDLE(req
->id
, mrk24
->handle
);
455 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
456 mrk
->lun
= cpu_to_le16(lun
);
461 qla2x00_isp_cmd(vha
, req
);
463 return (QLA_SUCCESS
);
467 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
468 struct rsp_que
*rsp
, uint16_t loop_id
, uint16_t lun
,
472 unsigned long flags
= 0;
474 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
475 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
476 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
482 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
485 * Note: The caller must hold the hardware lock before calling this routine.
487 * Returns NULL if function failed, else, a pointer to the request packet.
490 qla2x00_req_pkt(struct scsi_qla_host
*vha
, struct req_que
*req
,
493 struct qla_hw_data
*ha
= vha
->hw
;
494 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
495 request_t
*pkt
= NULL
;
499 uint16_t req_cnt
= 1;
501 /* Wait 1 second for slot. */
502 for (timer
= HZ
; timer
; timer
--) {
503 if ((req_cnt
+ 2) >= req
->cnt
) {
504 /* Calculate number of free request entries. */
507 RD_REG_DWORD(®
->isp25mq
.req_q_out
);
509 if (IS_FWI2_CAPABLE(ha
))
510 cnt
= (uint16_t)RD_REG_DWORD(
511 ®
->isp24
.req_q_out
);
513 cnt
= qla2x00_debounce_register(
514 ISP_REQ_Q_OUT(ha
, ®
->isp
));
516 if (req
->ring_index
< cnt
)
517 req
->cnt
= cnt
- req
->ring_index
;
519 req
->cnt
= req
->length
-
520 (req
->ring_index
- cnt
);
522 /* If room for request in request ring. */
523 if ((req_cnt
+ 2) < req
->cnt
) {
527 /* Zero out packet. */
528 dword_ptr
= (uint32_t *)pkt
;
529 for (cnt
= 0; cnt
< REQUEST_ENTRY_SIZE
/ 4; cnt
++)
532 /* Set entry count. */
533 pkt
->entry_count
= 1;
538 /* Release ring specific lock */
539 spin_unlock_irq(&ha
->hardware_lock
);
541 udelay(2); /* 2 us */
543 /* Check for pending interrupts. */
544 /* During init we issue marker directly */
545 if (!vha
->marker_needed
&& !vha
->flags
.init_done
)
547 spin_lock_irq(&ha
->hardware_lock
);
550 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__
));
557 * qla2x00_isp_cmd() - Modify the request ring pointer.
560 * Note: The caller must hold the hardware lock before calling this routine.
563 qla2x00_isp_cmd(struct scsi_qla_host
*vha
, struct req_que
*req
)
565 struct qla_hw_data
*ha
= vha
->hw
;
566 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
567 struct device_reg_2xxx __iomem
*ioreg
= &ha
->iobase
->isp
;
569 DEBUG5(printk("%s(): IOCB data:\n", __func__
));
570 DEBUG5(qla2x00_dump_buffer(
571 (uint8_t *)req
->ring_ptr
, REQUEST_ENTRY_SIZE
));
573 /* Adjust ring index. */
575 if (req
->ring_index
== req
->length
) {
577 req
->ring_ptr
= req
->ring
;
581 /* Set chip new ring index. */
583 WRT_REG_DWORD(®
->isp25mq
.req_q_in
, req
->ring_index
);
584 RD_REG_DWORD(&ioreg
->hccr
);
587 if (IS_FWI2_CAPABLE(ha
)) {
588 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
589 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
591 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
593 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
600 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
601 * Continuation Type 1 IOCBs to allocate.
603 * @dsds: number of data segment decriptors needed
605 * Returns the number of IOCB entries needed to store @dsds.
607 static inline uint16_t
608 qla24xx_calc_iocbs(uint16_t dsds
)
614 iocbs
+= (dsds
- 1) / 5;
622 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
625 * @sp: SRB command to process
626 * @cmd_pkt: Command type 3 IOCB
627 * @tot_dsds: Total number of segments to transfer
630 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
635 scsi_qla_host_t
*vha
;
636 struct scsi_cmnd
*cmd
;
637 struct scatterlist
*sg
;
643 /* Update entry type to indicate Command Type 3 IOCB */
644 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
645 __constant_cpu_to_le32(COMMAND_TYPE_7
);
647 /* No data transfer */
648 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
649 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
653 vha
= sp
->fcport
->vha
;
656 /* Set transfer direction */
657 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
658 cmd_pkt
->task_mgmt_flags
=
659 __constant_cpu_to_le16(TMF_WRITE_DATA
);
660 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
661 scsi_bufflen(sp
->cmd
);
662 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
663 cmd_pkt
->task_mgmt_flags
=
664 __constant_cpu_to_le16(TMF_READ_DATA
);
665 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
666 scsi_bufflen(sp
->cmd
);
669 /* One DSD is available in the Command Type 3 IOCB */
671 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
673 /* Load data segments */
675 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
677 cont_a64_entry_t
*cont_pkt
;
679 /* Allocate additional continuation packets? */
680 if (avail_dsds
== 0) {
682 * Five DSDs are available in the Continuation
685 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
686 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
690 sle_dma
= sg_dma_address(sg
);
691 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
692 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
693 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
700 * qla24xx_start_scsi() - Send a SCSI command to the ISP
701 * @sp: command to send to the ISP
703 * Returns non-zero if a failure occurred, else zero.
706 qla24xx_start_scsi(srb_t
*sp
)
713 struct cmd_type_7
*cmd_pkt
;
717 struct req_que
*req
= NULL
;
718 struct rsp_que
*rsp
= NULL
;
719 struct scsi_cmnd
*cmd
= sp
->cmd
;
720 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
721 struct qla_hw_data
*ha
= vha
->hw
;
723 /* Setup device pointers. */
726 qla25xx_set_que(sp
, &rsp
);
729 /* So we know we haven't pci_map'ed anything yet */
732 /* Send marker if required */
733 if (vha
->marker_needed
!= 0) {
734 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
)
736 return QLA_FUNCTION_FAILED
;
737 vha
->marker_needed
= 0;
740 /* Acquire ring specific lock */
741 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
743 /* Check for room in outstanding command list. */
744 handle
= req
->current_outstanding_cmd
;
745 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
747 if (handle
== MAX_OUTSTANDING_COMMANDS
)
749 if (!req
->outstanding_cmds
[handle
])
752 if (index
== MAX_OUTSTANDING_COMMANDS
)
755 /* Map the sg table so we have an accurate count of sg entries needed */
756 if (scsi_sg_count(cmd
)) {
757 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
758 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
766 req_cnt
= qla24xx_calc_iocbs(tot_dsds
);
767 if (req
->cnt
< (req_cnt
+ 2)) {
768 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
770 if (req
->ring_index
< cnt
)
771 req
->cnt
= cnt
- req
->ring_index
;
773 req
->cnt
= req
->length
-
774 (req
->ring_index
- cnt
);
776 if (req
->cnt
< (req_cnt
+ 2))
779 /* Build command packet. */
780 req
->current_outstanding_cmd
= handle
;
781 req
->outstanding_cmds
[handle
] = sp
;
783 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
786 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
787 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
789 /* Zero out remaining portion of packet. */
790 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
791 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
792 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
793 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
795 /* Set NPORT-ID and LUN number*/
796 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
797 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
798 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
799 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
800 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
802 int_to_scsilun(sp
->cmd
->device
->lun
, &cmd_pkt
->lun
);
803 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
805 /* Load SCSI command packet. */
806 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
807 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
809 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
811 /* Build IOCB segments */
812 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
814 /* Set total data segment count. */
815 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
816 /* Specify response queue number where completion should happen */
817 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
820 /* Adjust ring index. */
822 if (req
->ring_index
== req
->length
) {
824 req
->ring_ptr
= req
->ring
;
828 sp
->flags
|= SRB_DMA_VALID
;
830 /* Set chip new ring index. */
831 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
832 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
834 /* Manage unprocessed RIO/ZIO commands in response queue. */
835 if (vha
->flags
.process_response_queue
&&
836 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
837 qla24xx_process_response_queue(vha
, rsp
);
839 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
846 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
848 return QLA_FUNCTION_FAILED
;
851 static void qla25xx_set_que(srb_t
*sp
, struct rsp_que
**rsp
)
853 struct scsi_cmnd
*cmd
= sp
->cmd
;
854 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
855 int affinity
= cmd
->request
->cpu
;
857 if (ha
->flags
.cpu_affinity_enabled
&& affinity
>= 0 &&
858 affinity
< ha
->max_rsp_queues
- 1)
859 *rsp
= ha
->rsp_q_map
[affinity
+ 1];
861 *rsp
= ha
->rsp_q_map
[0];
864 /* Generic Control-SRB manipulation functions. */
867 qla2x00_alloc_iocbs(srb_t
*sp
)
869 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
870 struct qla_hw_data
*ha
= vha
->hw
;
871 struct req_que
*req
= ha
->req_q_map
[0];
872 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
873 uint32_t index
, handle
;
875 uint16_t cnt
, req_cnt
;
880 /* Check for room in outstanding command list. */
881 handle
= req
->current_outstanding_cmd
;
882 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
884 if (handle
== MAX_OUTSTANDING_COMMANDS
)
886 if (!req
->outstanding_cmds
[handle
])
889 if (index
== MAX_OUTSTANDING_COMMANDS
)
892 /* Check for room on request queue. */
893 if (req
->cnt
< req_cnt
) {
895 cnt
= RD_REG_DWORD(®
->isp25mq
.req_q_out
);
896 else if (IS_FWI2_CAPABLE(ha
))
897 cnt
= RD_REG_DWORD(®
->isp24
.req_q_out
);
899 cnt
= qla2x00_debounce_register(
900 ISP_REQ_Q_OUT(ha
, ®
->isp
));
902 if (req
->ring_index
< cnt
)
903 req
->cnt
= cnt
- req
->ring_index
;
905 req
->cnt
= req
->length
-
906 (req
->ring_index
- cnt
);
908 if (req
->cnt
< req_cnt
)
912 req
->current_outstanding_cmd
= handle
;
913 req
->outstanding_cmds
[handle
] = sp
;
917 memset(pkt
, 0, REQUEST_ENTRY_SIZE
);
918 pkt
->entry_count
= req_cnt
;
919 pkt
->handle
= handle
;
927 qla2x00_start_iocbs(srb_t
*sp
)
929 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
930 struct req_que
*req
= ha
->req_q_map
[0];
931 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
932 struct device_reg_2xxx __iomem
*ioreg
= &ha
->iobase
->isp
;
934 /* Adjust ring index. */
936 if (req
->ring_index
== req
->length
) {
938 req
->ring_ptr
= req
->ring
;
942 /* Set chip new ring index. */
944 WRT_REG_DWORD(®
->isp25mq
.req_q_in
, req
->ring_index
);
945 RD_REG_DWORD(&ioreg
->hccr
);
946 } else if (IS_FWI2_CAPABLE(ha
)) {
947 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
948 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
950 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
), req
->ring_index
);
951 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
956 qla24xx_login_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
958 struct srb_logio
*lio
= sp
->ctx
;
960 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
961 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
962 if (lio
->flags
& SRB_LOGIN_COND_PLOGI
)
963 logio
->control_flags
|= cpu_to_le16(LCF_COND_PLOGI
);
964 if (lio
->flags
& SRB_LOGIN_SKIP_PRLI
)
965 logio
->control_flags
|= cpu_to_le16(LCF_SKIP_PRLI
);
966 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
967 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
968 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
969 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
970 logio
->vp_index
= sp
->fcport
->vp_idx
;
974 qla2x00_login_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
976 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
977 struct srb_logio
*lio
= sp
->ctx
;
980 mbx
->entry_type
= MBX_IOCB_TYPE
;;
981 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
982 mbx
->mb0
= cpu_to_le16(MBC_LOGIN_FABRIC_PORT
);
983 opts
= lio
->flags
& SRB_LOGIN_COND_PLOGI
? BIT_0
: 0;
984 opts
|= lio
->flags
& SRB_LOGIN_SKIP_PRLI
? BIT_1
: 0;
985 if (HAS_EXTENDED_IDS(ha
)) {
986 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
987 mbx
->mb10
= cpu_to_le16(opts
);
989 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | opts
);
991 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
992 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
993 sp
->fcport
->d_id
.b
.al_pa
);
994 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
998 qla24xx_logout_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1000 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1001 logio
->control_flags
=
1002 cpu_to_le16(LCF_COMMAND_LOGO
|LCF_IMPL_LOGO
);
1003 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1004 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1005 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1006 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1007 logio
->vp_index
= sp
->fcport
->vp_idx
;
1011 qla2x00_logout_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1013 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1015 mbx
->entry_type
= MBX_IOCB_TYPE
;;
1016 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1017 mbx
->mb0
= cpu_to_le16(MBC_LOGOUT_FABRIC_PORT
);
1018 mbx
->mb1
= HAS_EXTENDED_IDS(ha
) ?
1019 cpu_to_le16(sp
->fcport
->loop_id
):
1020 cpu_to_le16(sp
->fcport
->loop_id
<< 8);
1021 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1022 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1023 sp
->fcport
->d_id
.b
.al_pa
);
1024 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1025 /* Implicit: mbx->mbx10 = 0. */
1029 qla24xx_els_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
1031 struct fc_bsg_job
*bsg_job
= ((struct srb_bsg
*)sp
->ctx
)->bsg_job
;
1033 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
1034 els_iocb
->entry_count
= 1;
1035 els_iocb
->sys_define
= 0;
1036 els_iocb
->entry_status
= 0;
1037 els_iocb
->handle
= sp
->handle
;
1038 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1039 els_iocb
->tx_dsd_count
= __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
1040 els_iocb
->vp_index
= sp
->fcport
->vp_idx
;
1041 els_iocb
->sof_type
= EST_SOFI3
;
1042 els_iocb
->rx_dsd_count
= __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
1044 els_iocb
->opcode
=(((struct srb_bsg
*)sp
->ctx
)->ctx
.type
== SRB_ELS_CMD_RPT
) ?
1045 bsg_job
->request
->rqst_data
.r_els
.els_code
: bsg_job
->request
->rqst_data
.h_els
.command_code
;
1046 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1047 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1048 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1049 els_iocb
->control_flags
= 0;
1050 els_iocb
->rx_byte_count
=
1051 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
1052 els_iocb
->tx_byte_count
=
1053 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
1055 els_iocb
->tx_address
[0] = cpu_to_le32(LSD(sg_dma_address
1056 (bsg_job
->request_payload
.sg_list
)));
1057 els_iocb
->tx_address
[1] = cpu_to_le32(MSD(sg_dma_address
1058 (bsg_job
->request_payload
.sg_list
)));
1059 els_iocb
->tx_len
= cpu_to_le32(sg_dma_len
1060 (bsg_job
->request_payload
.sg_list
));
1062 els_iocb
->rx_address
[0] = cpu_to_le32(LSD(sg_dma_address
1063 (bsg_job
->reply_payload
.sg_list
)));
1064 els_iocb
->rx_address
[1] = cpu_to_le32(MSD(sg_dma_address
1065 (bsg_job
->reply_payload
.sg_list
)));
1066 els_iocb
->rx_len
= cpu_to_le32(sg_dma_len
1067 (bsg_job
->reply_payload
.sg_list
));
1071 qla24xx_ct_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_iocb
)
1073 uint16_t avail_dsds
;
1075 struct scatterlist
*sg
;
1078 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
1079 struct fc_bsg_job
*bsg_job
= ((struct srb_bsg
*)sp
->ctx
)->bsg_job
;
1080 int loop_iterartion
= 0;
1081 int cont_iocb_prsnt
= 0;
1082 int entry_count
= 1;
1084 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
1085 ct_iocb
->entry_status
= 0;
1086 ct_iocb
->sys_define
= 0;
1087 ct_iocb
->handle
= sp
->handle
;
1089 ct_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1090 ct_iocb
->vp_index
= sp
->fcport
->vp_idx
;
1091 ct_iocb
->comp_status
= __constant_cpu_to_le16(0);
1093 ct_iocb
->cmd_dsd_count
=
1094 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
1095 ct_iocb
->timeout
= 0;
1096 ct_iocb
->rsp_dsd_count
=
1097 __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
1098 ct_iocb
->rsp_byte_count
=
1099 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
1100 ct_iocb
->cmd_byte_count
=
1101 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
1102 ct_iocb
->dseg_0_address
[0] = cpu_to_le32(LSD(sg_dma_address
1103 (bsg_job
->request_payload
.sg_list
)));
1104 ct_iocb
->dseg_0_address
[1] = cpu_to_le32(MSD(sg_dma_address
1105 (bsg_job
->request_payload
.sg_list
)));
1106 ct_iocb
->dseg_0_len
= cpu_to_le32(sg_dma_len
1107 (bsg_job
->request_payload
.sg_list
));
1110 cur_dsd
= (uint32_t *)ct_iocb
->dseg_1_address
;
1112 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
1114 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
1116 cont_a64_entry_t
*cont_pkt
;
1118 /* Allocate additional continuation packets? */
1119 if (avail_dsds
== 0) {
1121 * Five DSDs are available in the Cont.
1124 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
1125 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
1127 cont_iocb_prsnt
= 1;
1131 sle_dma
= sg_dma_address(sg
);
1132 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1133 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1134 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1138 ct_iocb
->entry_count
= entry_count
;
1142 qla2x00_start_sp(srb_t
*sp
)
1145 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1147 struct srb_ctx
*ctx
= sp
->ctx
;
1148 unsigned long flags
;
1150 rval
= QLA_FUNCTION_FAILED
;
1151 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1152 pkt
= qla2x00_alloc_iocbs(sp
);
1157 switch (ctx
->type
) {
1159 IS_FWI2_CAPABLE(ha
) ?
1160 qla24xx_login_iocb(sp
, pkt
):
1161 qla2x00_login_iocb(sp
, pkt
);
1163 case SRB_LOGOUT_CMD
:
1164 IS_FWI2_CAPABLE(ha
) ?
1165 qla24xx_logout_iocb(sp
, pkt
):
1166 qla2x00_logout_iocb(sp
, pkt
);
1168 case SRB_ELS_CMD_RPT
:
1169 case SRB_ELS_CMD_HST
:
1170 qla24xx_els_iocb(sp
, pkt
);
1173 qla24xx_ct_iocb(sp
, pkt
);
1180 qla2x00_start_iocbs(sp
);
1182 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);