ring-buffer: Reset "real_end" when page is filled
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / qla2xxx / qla_iocb.c
blob8299a9891bfef9b8d03f7350d3e11ef2d31aa562
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15 struct rsp_que *rsp);
16 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
18 static void qla25xx_set_que(srb_t *, struct rsp_que **);
19 /**
20 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
21 * @cmd: SCSI command
23 * Returns the proper CF_* direction based on CDB.
25 static inline uint16_t
26 qla2x00_get_cmd_direction(srb_t *sp)
28 uint16_t cflags;
30 cflags = 0;
32 /* Set transfer direction */
33 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
34 cflags = CF_WRITE;
35 sp->fcport->vha->hw->qla_stats.output_bytes +=
36 scsi_bufflen(sp->cmd);
37 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
38 cflags = CF_READ;
39 sp->fcport->vha->hw->qla_stats.input_bytes +=
40 scsi_bufflen(sp->cmd);
42 return (cflags);
45 /**
46 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
47 * Continuation Type 0 IOCBs to allocate.
49 * @dsds: number of data segment decriptors needed
51 * Returns the number of IOCB entries needed to store @dsds.
53 uint16_t
54 qla2x00_calc_iocbs_32(uint16_t dsds)
56 uint16_t iocbs;
58 iocbs = 1;
59 if (dsds > 3) {
60 iocbs += (dsds - 3) / 7;
61 if ((dsds - 3) % 7)
62 iocbs++;
64 return (iocbs);
67 /**
68 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
69 * Continuation Type 1 IOCBs to allocate.
71 * @dsds: number of data segment decriptors needed
73 * Returns the number of IOCB entries needed to store @dsds.
75 uint16_t
76 qla2x00_calc_iocbs_64(uint16_t dsds)
78 uint16_t iocbs;
80 iocbs = 1;
81 if (dsds > 2) {
82 iocbs += (dsds - 2) / 5;
83 if ((dsds - 2) % 5)
84 iocbs++;
86 return (iocbs);
89 /**
90 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * @ha: HA context
93 * Returns a pointer to the Continuation Type 0 IOCB packet.
95 static inline cont_entry_t *
96 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
98 cont_entry_t *cont_pkt;
99 struct req_que *req = vha->req;
100 /* Adjust ring index. */
101 req->ring_index++;
102 if (req->ring_index == req->length) {
103 req->ring_index = 0;
104 req->ring_ptr = req->ring;
105 } else {
106 req->ring_ptr++;
109 cont_pkt = (cont_entry_t *)req->ring_ptr;
111 /* Load packet defaults. */
112 *((uint32_t *)(&cont_pkt->entry_type)) =
113 __constant_cpu_to_le32(CONTINUE_TYPE);
115 return (cont_pkt);
119 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
120 * @ha: HA context
122 * Returns a pointer to the continuation type 1 IOCB packet.
124 static inline cont_a64_entry_t *
125 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
127 cont_a64_entry_t *cont_pkt;
129 struct req_que *req = vha->req;
130 /* Adjust ring index. */
131 req->ring_index++;
132 if (req->ring_index == req->length) {
133 req->ring_index = 0;
134 req->ring_ptr = req->ring;
135 } else {
136 req->ring_ptr++;
139 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
141 /* Load packet defaults. */
142 *((uint32_t *)(&cont_pkt->entry_type)) =
143 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
145 return (cont_pkt);
149 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
150 * capable IOCB types.
152 * @sp: SRB command to process
153 * @cmd_pkt: Command type 2 IOCB
154 * @tot_dsds: Total number of segments to transfer
156 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
157 uint16_t tot_dsds)
159 uint16_t avail_dsds;
160 uint32_t *cur_dsd;
161 scsi_qla_host_t *vha;
162 struct scsi_cmnd *cmd;
163 struct scatterlist *sg;
164 int i;
166 cmd = sp->cmd;
168 /* Update entry type to indicate Command Type 2 IOCB */
169 *((uint32_t *)(&cmd_pkt->entry_type)) =
170 __constant_cpu_to_le32(COMMAND_TYPE);
172 /* No data transfer */
173 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
174 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
175 return;
178 vha = sp->fcport->vha;
179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
181 /* Three DSDs are available in the Command Type 2 IOCB */
182 avail_dsds = 3;
183 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
185 /* Load data segments */
186 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
187 cont_entry_t *cont_pkt;
189 /* Allocate additional continuation packets? */
190 if (avail_dsds == 0) {
192 * Seven DSDs are available in the Continuation
193 * Type 0 IOCB.
195 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 avail_dsds = 7;
200 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
201 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
202 avail_dsds--;
207 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
208 * capable IOCB types.
210 * @sp: SRB command to process
211 * @cmd_pkt: Command type 3 IOCB
212 * @tot_dsds: Total number of segments to transfer
214 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
215 uint16_t tot_dsds)
217 uint16_t avail_dsds;
218 uint32_t *cur_dsd;
219 scsi_qla_host_t *vha;
220 struct scsi_cmnd *cmd;
221 struct scatterlist *sg;
222 int i;
224 cmd = sp->cmd;
226 /* Update entry type to indicate Command Type 3 IOCB */
227 *((uint32_t *)(&cmd_pkt->entry_type)) =
228 __constant_cpu_to_le32(COMMAND_A64_TYPE);
230 /* No data transfer */
231 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
232 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
233 return;
236 vha = sp->fcport->vha;
237 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
239 /* Two DSDs are available in the Command Type 3 IOCB */
240 avail_dsds = 2;
241 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
243 /* Load data segments */
244 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
245 dma_addr_t sle_dma;
246 cont_a64_entry_t *cont_pkt;
248 /* Allocate additional continuation packets? */
249 if (avail_dsds == 0) {
251 * Five DSDs are available in the Continuation
252 * Type 1 IOCB.
254 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
255 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
256 avail_dsds = 5;
259 sle_dma = sg_dma_address(sg);
260 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
261 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
262 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
263 avail_dsds--;
268 * qla2x00_start_scsi() - Send a SCSI command to the ISP
269 * @sp: command to send to the ISP
271 * Returns non-zero if a failure occurred, else zero.
274 qla2x00_start_scsi(srb_t *sp)
276 int ret, nseg;
277 unsigned long flags;
278 scsi_qla_host_t *vha;
279 struct scsi_cmnd *cmd;
280 uint32_t *clr_ptr;
281 uint32_t index;
282 uint32_t handle;
283 cmd_entry_t *cmd_pkt;
284 uint16_t cnt;
285 uint16_t req_cnt;
286 uint16_t tot_dsds;
287 struct device_reg_2xxx __iomem *reg;
288 struct qla_hw_data *ha;
289 struct req_que *req;
290 struct rsp_que *rsp;
292 /* Setup device pointers. */
293 ret = 0;
294 vha = sp->fcport->vha;
295 ha = vha->hw;
296 reg = &ha->iobase->isp;
297 cmd = sp->cmd;
298 req = ha->req_q_map[0];
299 rsp = ha->rsp_q_map[0];
300 /* So we know we haven't pci_map'ed anything yet */
301 tot_dsds = 0;
303 /* Send marker if required */
304 if (vha->marker_needed != 0) {
305 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
306 != QLA_SUCCESS)
307 return (QLA_FUNCTION_FAILED);
308 vha->marker_needed = 0;
311 /* Acquire ring specific lock */
312 spin_lock_irqsave(&ha->hardware_lock, flags);
314 /* Check for room in outstanding command list. */
315 handle = req->current_outstanding_cmd;
316 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
317 handle++;
318 if (handle == MAX_OUTSTANDING_COMMANDS)
319 handle = 1;
320 if (!req->outstanding_cmds[handle])
321 break;
323 if (index == MAX_OUTSTANDING_COMMANDS)
324 goto queuing_error;
326 /* Map the sg table so we have an accurate count of sg entries needed */
327 if (scsi_sg_count(cmd)) {
328 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
329 scsi_sg_count(cmd), cmd->sc_data_direction);
330 if (unlikely(!nseg))
331 goto queuing_error;
332 } else
333 nseg = 0;
335 tot_dsds = nseg;
337 /* Calculate the number of request entries needed. */
338 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
339 if (req->cnt < (req_cnt + 2)) {
340 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
341 if (req->ring_index < cnt)
342 req->cnt = cnt - req->ring_index;
343 else
344 req->cnt = req->length -
345 (req->ring_index - cnt);
347 if (req->cnt < (req_cnt + 2))
348 goto queuing_error;
350 /* Build command packet */
351 req->current_outstanding_cmd = handle;
352 req->outstanding_cmds[handle] = sp;
353 sp->handle = handle;
354 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
355 req->cnt -= req_cnt;
357 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
358 cmd_pkt->handle = handle;
359 /* Zero out remaining portion of packet. */
360 clr_ptr = (uint32_t *)cmd_pkt + 2;
361 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
362 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
364 /* Set target ID and LUN number*/
365 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
366 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
368 /* Update tagged queuing modifier */
369 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
371 /* Load SCSI command packet. */
372 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
373 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
375 /* Build IOCB segments */
376 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
378 /* Set total data segment count. */
379 cmd_pkt->entry_count = (uint8_t)req_cnt;
380 wmb();
382 /* Adjust ring index. */
383 req->ring_index++;
384 if (req->ring_index == req->length) {
385 req->ring_index = 0;
386 req->ring_ptr = req->ring;
387 } else
388 req->ring_ptr++;
390 sp->flags |= SRB_DMA_VALID;
392 /* Set chip new ring index. */
393 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
394 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
396 /* Manage unprocessed RIO/ZIO commands in response queue. */
397 if (vha->flags.process_response_queue &&
398 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
399 qla2x00_process_response_queue(rsp);
401 spin_unlock_irqrestore(&ha->hardware_lock, flags);
402 return (QLA_SUCCESS);
404 queuing_error:
405 if (tot_dsds)
406 scsi_dma_unmap(cmd);
408 spin_unlock_irqrestore(&ha->hardware_lock, flags);
410 return (QLA_FUNCTION_FAILED);
414 * qla2x00_marker() - Send a marker IOCB to the firmware.
415 * @ha: HA context
416 * @loop_id: loop ID
417 * @lun: LUN
418 * @type: marker modifier
420 * Can be called from both normal and interrupt context.
422 * Returns non-zero if a failure occurred, else zero.
425 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
426 struct rsp_que *rsp, uint16_t loop_id,
427 uint16_t lun, uint8_t type)
429 mrk_entry_t *mrk;
430 struct mrk_entry_24xx *mrk24;
431 struct qla_hw_data *ha = vha->hw;
432 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
434 mrk24 = NULL;
435 mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
436 if (mrk == NULL) {
437 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
438 __func__, base_vha->host_no));
440 return (QLA_FUNCTION_FAILED);
443 mrk->entry_type = MARKER_TYPE;
444 mrk->modifier = type;
445 if (type != MK_SYNC_ALL) {
446 if (IS_FWI2_CAPABLE(ha)) {
447 mrk24 = (struct mrk_entry_24xx *) mrk;
448 mrk24->nport_handle = cpu_to_le16(loop_id);
449 mrk24->lun[1] = LSB(lun);
450 mrk24->lun[2] = MSB(lun);
451 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
452 mrk24->vp_index = vha->vp_idx;
453 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
454 } else {
455 SET_TARGET_ID(ha, mrk->target, loop_id);
456 mrk->lun = cpu_to_le16(lun);
459 wmb();
461 qla2x00_isp_cmd(vha, req);
463 return (QLA_SUCCESS);
467 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
468 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
469 uint8_t type)
471 int ret;
472 unsigned long flags = 0;
474 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
475 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
476 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
478 return (ret);
482 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
483 * @ha: HA context
485 * Note: The caller must hold the hardware lock before calling this routine.
487 * Returns NULL if function failed, else, a pointer to the request packet.
489 static request_t *
490 qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
491 struct rsp_que *rsp)
493 struct qla_hw_data *ha = vha->hw;
494 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
495 request_t *pkt = NULL;
496 uint16_t cnt;
497 uint32_t *dword_ptr;
498 uint32_t timer;
499 uint16_t req_cnt = 1;
501 /* Wait 1 second for slot. */
502 for (timer = HZ; timer; timer--) {
503 if ((req_cnt + 2) >= req->cnt) {
504 /* Calculate number of free request entries. */
505 if (ha->mqenable)
506 cnt = (uint16_t)
507 RD_REG_DWORD(&reg->isp25mq.req_q_out);
508 else {
509 if (IS_FWI2_CAPABLE(ha))
510 cnt = (uint16_t)RD_REG_DWORD(
511 &reg->isp24.req_q_out);
512 else
513 cnt = qla2x00_debounce_register(
514 ISP_REQ_Q_OUT(ha, &reg->isp));
516 if (req->ring_index < cnt)
517 req->cnt = cnt - req->ring_index;
518 else
519 req->cnt = req->length -
520 (req->ring_index - cnt);
522 /* If room for request in request ring. */
523 if ((req_cnt + 2) < req->cnt) {
524 req->cnt--;
525 pkt = req->ring_ptr;
527 /* Zero out packet. */
528 dword_ptr = (uint32_t *)pkt;
529 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
530 *dword_ptr++ = 0;
532 /* Set entry count. */
533 pkt->entry_count = 1;
535 break;
538 /* Release ring specific lock */
539 spin_unlock_irq(&ha->hardware_lock);
541 udelay(2); /* 2 us */
543 /* Check for pending interrupts. */
544 /* During init we issue marker directly */
545 if (!vha->marker_needed && !vha->flags.init_done)
546 qla2x00_poll(rsp);
547 spin_lock_irq(&ha->hardware_lock);
549 if (!pkt) {
550 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
553 return (pkt);
557 * qla2x00_isp_cmd() - Modify the request ring pointer.
558 * @ha: HA context
560 * Note: The caller must hold the hardware lock before calling this routine.
562 static void
563 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
565 struct qla_hw_data *ha = vha->hw;
566 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
567 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
569 DEBUG5(printk("%s(): IOCB data:\n", __func__));
570 DEBUG5(qla2x00_dump_buffer(
571 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
573 /* Adjust ring index. */
574 req->ring_index++;
575 if (req->ring_index == req->length) {
576 req->ring_index = 0;
577 req->ring_ptr = req->ring;
578 } else
579 req->ring_ptr++;
581 /* Set chip new ring index. */
582 if (ha->mqenable) {
583 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
584 RD_REG_DWORD(&ioreg->hccr);
586 else {
587 if (IS_FWI2_CAPABLE(ha)) {
588 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
589 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
590 } else {
591 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
592 req->ring_index);
593 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
600 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
601 * Continuation Type 1 IOCBs to allocate.
603 * @dsds: number of data segment decriptors needed
605 * Returns the number of IOCB entries needed to store @dsds.
607 static inline uint16_t
608 qla24xx_calc_iocbs(uint16_t dsds)
610 uint16_t iocbs;
612 iocbs = 1;
613 if (dsds > 1) {
614 iocbs += (dsds - 1) / 5;
615 if ((dsds - 1) % 5)
616 iocbs++;
618 return iocbs;
622 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
623 * IOCB types.
625 * @sp: SRB command to process
626 * @cmd_pkt: Command type 3 IOCB
627 * @tot_dsds: Total number of segments to transfer
629 static inline void
630 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
631 uint16_t tot_dsds)
633 uint16_t avail_dsds;
634 uint32_t *cur_dsd;
635 scsi_qla_host_t *vha;
636 struct scsi_cmnd *cmd;
637 struct scatterlist *sg;
638 int i;
639 struct req_que *req;
641 cmd = sp->cmd;
643 /* Update entry type to indicate Command Type 3 IOCB */
644 *((uint32_t *)(&cmd_pkt->entry_type)) =
645 __constant_cpu_to_le32(COMMAND_TYPE_7);
647 /* No data transfer */
648 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
649 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
650 return;
653 vha = sp->fcport->vha;
654 req = vha->req;
656 /* Set transfer direction */
657 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
658 cmd_pkt->task_mgmt_flags =
659 __constant_cpu_to_le16(TMF_WRITE_DATA);
660 sp->fcport->vha->hw->qla_stats.output_bytes +=
661 scsi_bufflen(sp->cmd);
662 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
663 cmd_pkt->task_mgmt_flags =
664 __constant_cpu_to_le16(TMF_READ_DATA);
665 sp->fcport->vha->hw->qla_stats.input_bytes +=
666 scsi_bufflen(sp->cmd);
669 /* One DSD is available in the Command Type 3 IOCB */
670 avail_dsds = 1;
671 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
673 /* Load data segments */
675 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
676 dma_addr_t sle_dma;
677 cont_a64_entry_t *cont_pkt;
679 /* Allocate additional continuation packets? */
680 if (avail_dsds == 0) {
682 * Five DSDs are available in the Continuation
683 * Type 1 IOCB.
685 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
686 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
687 avail_dsds = 5;
690 sle_dma = sg_dma_address(sg);
691 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
692 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
693 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
694 avail_dsds--;
700 * qla24xx_start_scsi() - Send a SCSI command to the ISP
701 * @sp: command to send to the ISP
703 * Returns non-zero if a failure occurred, else zero.
706 qla24xx_start_scsi(srb_t *sp)
708 int ret, nseg;
709 unsigned long flags;
710 uint32_t *clr_ptr;
711 uint32_t index;
712 uint32_t handle;
713 struct cmd_type_7 *cmd_pkt;
714 uint16_t cnt;
715 uint16_t req_cnt;
716 uint16_t tot_dsds;
717 struct req_que *req = NULL;
718 struct rsp_que *rsp = NULL;
719 struct scsi_cmnd *cmd = sp->cmd;
720 struct scsi_qla_host *vha = sp->fcport->vha;
721 struct qla_hw_data *ha = vha->hw;
723 /* Setup device pointers. */
724 ret = 0;
726 qla25xx_set_que(sp, &rsp);
727 req = vha->req;
729 /* So we know we haven't pci_map'ed anything yet */
730 tot_dsds = 0;
732 /* Send marker if required */
733 if (vha->marker_needed != 0) {
734 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
735 != QLA_SUCCESS)
736 return QLA_FUNCTION_FAILED;
737 vha->marker_needed = 0;
740 /* Acquire ring specific lock */
741 spin_lock_irqsave(&ha->hardware_lock, flags);
743 /* Check for room in outstanding command list. */
744 handle = req->current_outstanding_cmd;
745 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
746 handle++;
747 if (handle == MAX_OUTSTANDING_COMMANDS)
748 handle = 1;
749 if (!req->outstanding_cmds[handle])
750 break;
752 if (index == MAX_OUTSTANDING_COMMANDS)
753 goto queuing_error;
755 /* Map the sg table so we have an accurate count of sg entries needed */
756 if (scsi_sg_count(cmd)) {
757 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
758 scsi_sg_count(cmd), cmd->sc_data_direction);
759 if (unlikely(!nseg))
760 goto queuing_error;
761 } else
762 nseg = 0;
764 tot_dsds = nseg;
766 req_cnt = qla24xx_calc_iocbs(tot_dsds);
767 if (req->cnt < (req_cnt + 2)) {
768 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
770 if (req->ring_index < cnt)
771 req->cnt = cnt - req->ring_index;
772 else
773 req->cnt = req->length -
774 (req->ring_index - cnt);
776 if (req->cnt < (req_cnt + 2))
777 goto queuing_error;
779 /* Build command packet. */
780 req->current_outstanding_cmd = handle;
781 req->outstanding_cmds[handle] = sp;
782 sp->handle = handle;
783 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
784 req->cnt -= req_cnt;
786 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
787 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
789 /* Zero out remaining portion of packet. */
790 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
791 clr_ptr = (uint32_t *)cmd_pkt + 2;
792 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
793 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
795 /* Set NPORT-ID and LUN number*/
796 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
797 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
798 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
799 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
800 cmd_pkt->vp_index = sp->fcport->vp_idx;
802 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
803 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
805 /* Load SCSI command packet. */
806 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
807 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
809 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
811 /* Build IOCB segments */
812 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
814 /* Set total data segment count. */
815 cmd_pkt->entry_count = (uint8_t)req_cnt;
816 /* Specify response queue number where completion should happen */
817 cmd_pkt->entry_status = (uint8_t) rsp->id;
818 wmb();
820 /* Adjust ring index. */
821 req->ring_index++;
822 if (req->ring_index == req->length) {
823 req->ring_index = 0;
824 req->ring_ptr = req->ring;
825 } else
826 req->ring_ptr++;
828 sp->flags |= SRB_DMA_VALID;
830 /* Set chip new ring index. */
831 WRT_REG_DWORD(req->req_q_in, req->ring_index);
832 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
834 /* Manage unprocessed RIO/ZIO commands in response queue. */
835 if (vha->flags.process_response_queue &&
836 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
837 qla24xx_process_response_queue(vha, rsp);
839 spin_unlock_irqrestore(&ha->hardware_lock, flags);
840 return QLA_SUCCESS;
842 queuing_error:
843 if (tot_dsds)
844 scsi_dma_unmap(cmd);
846 spin_unlock_irqrestore(&ha->hardware_lock, flags);
848 return QLA_FUNCTION_FAILED;
851 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
853 struct scsi_cmnd *cmd = sp->cmd;
854 struct qla_hw_data *ha = sp->fcport->vha->hw;
855 int affinity = cmd->request->cpu;
857 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
858 affinity < ha->max_rsp_queues - 1)
859 *rsp = ha->rsp_q_map[affinity + 1];
860 else
861 *rsp = ha->rsp_q_map[0];
864 /* Generic Control-SRB manipulation functions. */
866 static void *
867 qla2x00_alloc_iocbs(srb_t *sp)
869 scsi_qla_host_t *vha = sp->fcport->vha;
870 struct qla_hw_data *ha = vha->hw;
871 struct req_que *req = ha->req_q_map[0];
872 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
873 uint32_t index, handle;
874 request_t *pkt;
875 uint16_t cnt, req_cnt;
877 pkt = NULL;
878 req_cnt = 1;
880 /* Check for room in outstanding command list. */
881 handle = req->current_outstanding_cmd;
882 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
883 handle++;
884 if (handle == MAX_OUTSTANDING_COMMANDS)
885 handle = 1;
886 if (!req->outstanding_cmds[handle])
887 break;
889 if (index == MAX_OUTSTANDING_COMMANDS)
890 goto queuing_error;
892 /* Check for room on request queue. */
893 if (req->cnt < req_cnt) {
894 if (ha->mqenable)
895 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
896 else if (IS_FWI2_CAPABLE(ha))
897 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
898 else
899 cnt = qla2x00_debounce_register(
900 ISP_REQ_Q_OUT(ha, &reg->isp));
902 if (req->ring_index < cnt)
903 req->cnt = cnt - req->ring_index;
904 else
905 req->cnt = req->length -
906 (req->ring_index - cnt);
908 if (req->cnt < req_cnt)
909 goto queuing_error;
911 /* Prep packet */
912 req->current_outstanding_cmd = handle;
913 req->outstanding_cmds[handle] = sp;
914 req->cnt -= req_cnt;
916 pkt = req->ring_ptr;
917 memset(pkt, 0, REQUEST_ENTRY_SIZE);
918 pkt->entry_count = req_cnt;
919 pkt->handle = handle;
920 sp->handle = handle;
922 queuing_error:
923 return pkt;
926 static void
927 qla2x00_start_iocbs(srb_t *sp)
929 struct qla_hw_data *ha = sp->fcport->vha->hw;
930 struct req_que *req = ha->req_q_map[0];
931 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
932 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
934 /* Adjust ring index. */
935 req->ring_index++;
936 if (req->ring_index == req->length) {
937 req->ring_index = 0;
938 req->ring_ptr = req->ring;
939 } else
940 req->ring_ptr++;
942 /* Set chip new ring index. */
943 if (ha->mqenable) {
944 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
945 RD_REG_DWORD(&ioreg->hccr);
946 } else if (IS_FWI2_CAPABLE(ha)) {
947 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
948 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
949 } else {
950 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index);
951 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
955 static void
956 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
958 struct srb_logio *lio = sp->ctx;
960 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
961 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
962 if (lio->flags & SRB_LOGIN_COND_PLOGI)
963 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
964 if (lio->flags & SRB_LOGIN_SKIP_PRLI)
965 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
966 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
967 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
968 logio->port_id[1] = sp->fcport->d_id.b.area;
969 logio->port_id[2] = sp->fcport->d_id.b.domain;
970 logio->vp_index = sp->fcport->vp_idx;
973 static void
974 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
976 struct qla_hw_data *ha = sp->fcport->vha->hw;
977 struct srb_logio *lio = sp->ctx;
978 uint16_t opts;
980 mbx->entry_type = MBX_IOCB_TYPE;;
981 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
982 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
983 opts = lio->flags & SRB_LOGIN_COND_PLOGI ? BIT_0: 0;
984 opts |= lio->flags & SRB_LOGIN_SKIP_PRLI ? BIT_1: 0;
985 if (HAS_EXTENDED_IDS(ha)) {
986 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
987 mbx->mb10 = cpu_to_le16(opts);
988 } else {
989 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
991 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
992 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
993 sp->fcport->d_id.b.al_pa);
994 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
997 static void
998 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1000 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1001 logio->control_flags =
1002 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1003 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1004 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1005 logio->port_id[1] = sp->fcport->d_id.b.area;
1006 logio->port_id[2] = sp->fcport->d_id.b.domain;
1007 logio->vp_index = sp->fcport->vp_idx;
1010 static void
1011 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1013 struct qla_hw_data *ha = sp->fcport->vha->hw;
1015 mbx->entry_type = MBX_IOCB_TYPE;;
1016 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1017 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1018 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1019 cpu_to_le16(sp->fcport->loop_id):
1020 cpu_to_le16(sp->fcport->loop_id << 8);
1021 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1022 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1023 sp->fcport->d_id.b.al_pa);
1024 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1025 /* Implicit: mbx->mbx10 = 0. */
1028 static void
1029 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1031 struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
1033 els_iocb->entry_type = ELS_IOCB_TYPE;
1034 els_iocb->entry_count = 1;
1035 els_iocb->sys_define = 0;
1036 els_iocb->entry_status = 0;
1037 els_iocb->handle = sp->handle;
1038 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1039 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1040 els_iocb->vp_index = sp->fcport->vp_idx;
1041 els_iocb->sof_type = EST_SOFI3;
1042 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1044 els_iocb->opcode =(((struct srb_bsg*)sp->ctx)->ctx.type == SRB_ELS_CMD_RPT) ?
1045 bsg_job->request->rqst_data.r_els.els_code : bsg_job->request->rqst_data.h_els.command_code;
1046 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1047 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1048 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1049 els_iocb->control_flags = 0;
1050 els_iocb->rx_byte_count =
1051 cpu_to_le32(bsg_job->reply_payload.payload_len);
1052 els_iocb->tx_byte_count =
1053 cpu_to_le32(bsg_job->request_payload.payload_len);
1055 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1056 (bsg_job->request_payload.sg_list)));
1057 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
1058 (bsg_job->request_payload.sg_list)));
1059 els_iocb->tx_len = cpu_to_le32(sg_dma_len
1060 (bsg_job->request_payload.sg_list));
1062 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
1063 (bsg_job->reply_payload.sg_list)));
1064 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
1065 (bsg_job->reply_payload.sg_list)));
1066 els_iocb->rx_len = cpu_to_le32(sg_dma_len
1067 (bsg_job->reply_payload.sg_list));
1070 static void
1071 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1073 uint16_t avail_dsds;
1074 uint32_t *cur_dsd;
1075 struct scatterlist *sg;
1076 int index;
1077 uint16_t tot_dsds;
1078 scsi_qla_host_t *vha = sp->fcport->vha;
1079 struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
1080 int loop_iterartion = 0;
1081 int cont_iocb_prsnt = 0;
1082 int entry_count = 1;
1084 ct_iocb->entry_type = CT_IOCB_TYPE;
1085 ct_iocb->entry_status = 0;
1086 ct_iocb->sys_define = 0;
1087 ct_iocb->handle = sp->handle;
1089 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1090 ct_iocb->vp_index = sp->fcport->vp_idx;
1091 ct_iocb->comp_status = __constant_cpu_to_le16(0);
1093 ct_iocb->cmd_dsd_count =
1094 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1095 ct_iocb->timeout = 0;
1096 ct_iocb->rsp_dsd_count =
1097 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1098 ct_iocb->rsp_byte_count =
1099 cpu_to_le32(bsg_job->reply_payload.payload_len);
1100 ct_iocb->cmd_byte_count =
1101 cpu_to_le32(bsg_job->request_payload.payload_len);
1102 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
1103 (bsg_job->request_payload.sg_list)));
1104 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
1105 (bsg_job->request_payload.sg_list)));
1106 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
1107 (bsg_job->request_payload.sg_list));
1109 avail_dsds = 1;
1110 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
1111 index = 0;
1112 tot_dsds = bsg_job->reply_payload.sg_cnt;
1114 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1115 dma_addr_t sle_dma;
1116 cont_a64_entry_t *cont_pkt;
1118 /* Allocate additional continuation packets? */
1119 if (avail_dsds == 0) {
1121 * Five DSDs are available in the Cont.
1122 * Type 1 IOCB.
1124 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1125 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1126 avail_dsds = 5;
1127 cont_iocb_prsnt = 1;
1128 entry_count++;
1131 sle_dma = sg_dma_address(sg);
1132 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1133 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1134 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1135 loop_iterartion++;
1136 avail_dsds--;
1138 ct_iocb->entry_count = entry_count;
1142 qla2x00_start_sp(srb_t *sp)
1144 int rval;
1145 struct qla_hw_data *ha = sp->fcport->vha->hw;
1146 void *pkt;
1147 struct srb_ctx *ctx = sp->ctx;
1148 unsigned long flags;
1150 rval = QLA_FUNCTION_FAILED;
1151 spin_lock_irqsave(&ha->hardware_lock, flags);
1152 pkt = qla2x00_alloc_iocbs(sp);
1153 if (!pkt)
1154 goto done;
1156 rval = QLA_SUCCESS;
1157 switch (ctx->type) {
1158 case SRB_LOGIN_CMD:
1159 IS_FWI2_CAPABLE(ha) ?
1160 qla24xx_login_iocb(sp, pkt):
1161 qla2x00_login_iocb(sp, pkt);
1162 break;
1163 case SRB_LOGOUT_CMD:
1164 IS_FWI2_CAPABLE(ha) ?
1165 qla24xx_logout_iocb(sp, pkt):
1166 qla2x00_logout_iocb(sp, pkt);
1167 break;
1168 case SRB_ELS_CMD_RPT:
1169 case SRB_ELS_CMD_HST:
1170 qla24xx_els_iocb(sp, pkt);
1171 break;
1172 case SRB_CT_CMD:
1173 qla24xx_ct_iocb(sp, pkt);
1174 break;
1175 default:
1176 break;
1179 wmb();
1180 qla2x00_start_iocbs(sp);
1181 done:
1182 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1183 return rval;