1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2005 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_tcq.h>
29 #include <scsi/scsi_transport_fc.h>
31 #include "lpfc_version.h"
34 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
40 #define LPFC_RESET_WAIT 2
41 #define LPFC_ABORT_WAIT 2
45 * This routine allocates a scsi buffer, which contains all the necessary
46 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
47 * contains information to build the IOCB. The DMAable region contains
48 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
49 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
50 * and the BPL BDE is setup in the IOCB.
52 static struct lpfc_scsi_buf
*
53 lpfc_new_scsi_buf(struct lpfc_hba
* phba
)
55 struct lpfc_scsi_buf
*psb
;
56 struct ulp_bde64
*bpl
;
61 psb
= kmalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
64 memset(psb
, 0, sizeof (struct lpfc_scsi_buf
));
68 * Get memory from the pci pool to map the virt space to pci bus space
69 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
70 * struct fcp_rsp and the number of bde's necessary to support the
73 psb
->data
= pci_pool_alloc(phba
->lpfc_scsi_dma_buf_pool
, GFP_KERNEL
,
80 /* Initialize virtual ptrs to dma_buf region. */
81 memset(psb
->data
, 0, phba
->cfg_sg_dma_buf_size
);
83 /* Allocate iotag for psb->cur_iocbq. */
84 iotag
= lpfc_sli_next_iotag(phba
, &psb
->cur_iocbq
);
86 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
,
87 psb
->data
, psb
->dma_handle
);
91 psb
->cur_iocbq
.iocb_flag
|= LPFC_IO_FCP
;
93 psb
->fcp_cmnd
= psb
->data
;
94 psb
->fcp_rsp
= psb
->data
+ sizeof(struct fcp_cmnd
);
95 psb
->fcp_bpl
= psb
->data
+ sizeof(struct fcp_cmnd
) +
96 sizeof(struct fcp_rsp
);
98 /* Initialize local short-hand pointers. */
100 pdma_phys
= psb
->dma_handle
;
103 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
104 * list bdes. Initialize the first two and leave the rest for
107 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
108 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
109 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_cmnd
);
110 bpl
->tus
.f
.bdeFlags
= BUFF_USE_CMND
;
111 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
114 /* Setup the physical region for the FCP RSP */
115 pdma_phys
+= sizeof (struct fcp_cmnd
);
116 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
117 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
118 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_rsp
);
119 bpl
->tus
.f
.bdeFlags
= (BUFF_USE_CMND
| BUFF_USE_RCV
);
120 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
123 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
124 * initialize it with all known data now.
126 pdma_phys
+= (sizeof (struct fcp_rsp
));
127 iocb
= &psb
->cur_iocbq
.iocb
;
128 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
129 iocb
->un
.fcpi64
.bdl
.addrHigh
= putPaddrHigh(pdma_phys
);
130 iocb
->un
.fcpi64
.bdl
.addrLow
= putPaddrLow(pdma_phys
);
131 iocb
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
132 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDL
;
133 iocb
->ulpBdeCount
= 1;
134 iocb
->ulpClass
= CLASS3
;
139 struct lpfc_scsi_buf
*
140 lpfc_sli_get_scsi_buf(struct lpfc_hba
* phba
)
142 struct lpfc_scsi_buf
* lpfc_cmd
= NULL
;
143 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
145 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
150 lpfc_release_scsi_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* psb
)
153 * There are only two special cases to consider. (1) the scsi command
154 * requested scatter-gather usage or (2) the scsi command allocated
155 * a request buffer, but did not request use_sg. There is a third
156 * case, but it does not require resource deallocation.
158 if ((psb
->seg_cnt
> 0) && (psb
->pCmd
->use_sg
)) {
159 dma_unmap_sg(&phba
->pcidev
->dev
, psb
->pCmd
->request_buffer
,
160 psb
->seg_cnt
, psb
->pCmd
->sc_data_direction
);
162 if ((psb
->nonsg_phys
) && (psb
->pCmd
->request_bufflen
)) {
163 dma_unmap_single(&phba
->pcidev
->dev
, psb
->nonsg_phys
,
164 psb
->pCmd
->request_bufflen
,
165 psb
->pCmd
->sc_data_direction
);
170 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list
);
174 lpfc_scsi_prep_dma_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* lpfc_cmd
)
176 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
177 struct scatterlist
*sgel
= NULL
;
178 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
179 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
180 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
182 uint32_t i
, num_bde
= 0;
183 int datadir
= scsi_cmnd
->sc_data_direction
;
187 * There are three possibilities here - use scatter-gather segment, use
188 * the single mapping, or neither. Start the lpfc command prep by
189 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
193 if (scsi_cmnd
->use_sg
) {
195 * The driver stores the segment count returned from pci_map_sg
196 * because this a count of dma-mappings used to map the use_sg
197 * pages. They are not guaranteed to be the same for those
198 * architectures that implement an IOMMU.
200 sgel
= (struct scatterlist
*)scsi_cmnd
->request_buffer
;
201 lpfc_cmd
->seg_cnt
= dma_map_sg(&phba
->pcidev
->dev
, sgel
,
202 scsi_cmnd
->use_sg
, datadir
);
203 if (lpfc_cmd
->seg_cnt
== 0)
206 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
207 printk(KERN_ERR
"%s: Too many sg segments from "
208 "dma_map_sg. Config %d, seg_cnt %d",
209 __FUNCTION__
, phba
->cfg_sg_seg_cnt
,
211 dma_unmap_sg(&phba
->pcidev
->dev
, sgel
,
212 lpfc_cmd
->seg_cnt
, datadir
);
217 * The driver established a maximum scatter-gather segment count
218 * during probe that limits the number of sg elements in any
219 * single scsi command. Just run through the seg_cnt and format
222 for (i
= 0; i
< lpfc_cmd
->seg_cnt
; i
++) {
223 physaddr
= sg_dma_address(sgel
);
224 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
225 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
226 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
227 if (datadir
== DMA_TO_DEVICE
)
228 bpl
->tus
.f
.bdeFlags
= 0;
230 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
231 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
236 } else if (scsi_cmnd
->request_buffer
&& scsi_cmnd
->request_bufflen
) {
237 physaddr
= dma_map_single(&phba
->pcidev
->dev
,
238 scsi_cmnd
->request_buffer
,
239 scsi_cmnd
->request_bufflen
,
241 dma_error
= dma_mapping_error(physaddr
);
243 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
244 "%d:0718 Unable to dma_map_single "
245 "request_buffer: x%x\n",
246 phba
->brd_no
, dma_error
);
250 lpfc_cmd
->nonsg_phys
= physaddr
;
251 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
252 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
253 bpl
->tus
.f
.bdeSize
= scsi_cmnd
->request_bufflen
;
254 if (datadir
== DMA_TO_DEVICE
)
255 bpl
->tus
.f
.bdeFlags
= 0;
257 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
258 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
264 * Finish initializing those IOCB fields that are dependent on the
265 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
266 * reinitialized since all iocb memory resources are used many times
267 * for transmit, receive, and continuation bpl's.
269 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
270 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
+=
271 (num_bde
* sizeof (struct ulp_bde64
));
272 iocb_cmd
->ulpBdeCount
= 1;
274 fcp_cmnd
->fcpDl
= be32_to_cpu(scsi_cmnd
->request_bufflen
);
279 lpfc_handle_fcp_err(struct lpfc_scsi_buf
*lpfc_cmd
)
281 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
282 struct fcp_cmnd
*fcpcmd
= lpfc_cmd
->fcp_cmnd
;
283 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
284 struct lpfc_hba
*phba
= lpfc_cmd
->scsi_hba
;
285 uint32_t fcpi_parm
= lpfc_cmd
->cur_iocbq
.iocb
.un
.fcpi
.fcpi_parm
;
286 uint32_t resp_info
= fcprsp
->rspStatus2
;
287 uint32_t scsi_status
= fcprsp
->rspStatus3
;
288 uint32_t host_status
= DID_OK
;
292 * If this is a task management command, there is no
293 * scsi packet associated with this lpfc_cmd. The driver
296 if (fcpcmd
->fcpCntl2
) {
301 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
302 "%d:0730 FCP command failed: RSP "
303 "Data: x%x x%x x%x x%x x%x x%x\n",
304 phba
->brd_no
, resp_info
, scsi_status
,
305 be32_to_cpu(fcprsp
->rspResId
),
306 be32_to_cpu(fcprsp
->rspSnsLen
),
307 be32_to_cpu(fcprsp
->rspRspLen
),
310 if (resp_info
& RSP_LEN_VALID
) {
311 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
312 if ((rsplen
!= 0 && rsplen
!= 4 && rsplen
!= 8) ||
313 (fcprsp
->rspInfo3
!= RSP_NO_FAILURE
)) {
314 host_status
= DID_ERROR
;
319 if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
) {
320 uint32_t snslen
= be32_to_cpu(fcprsp
->rspSnsLen
);
321 if (snslen
> SCSI_SENSE_BUFFERSIZE
)
322 snslen
= SCSI_SENSE_BUFFERSIZE
;
324 memcpy(cmnd
->sense_buffer
, &fcprsp
->rspInfo0
+ rsplen
, snslen
);
328 if (resp_info
& RESID_UNDER
) {
329 cmnd
->resid
= be32_to_cpu(fcprsp
->rspResId
);
331 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
332 "%d:0716 FCP Read Underrun, expected %d, "
333 "residual %d Data: x%x x%x x%x\n", phba
->brd_no
,
334 be32_to_cpu(fcpcmd
->fcpDl
), cmnd
->resid
,
335 fcpi_parm
, cmnd
->cmnd
[0], cmnd
->underflow
);
338 * The cmnd->underflow is the minimum number of bytes that must
339 * be transfered for this command. Provided a sense condition
340 * is not present, make sure the actual amount transferred is at
341 * least the underflow value or fail.
343 if (!(resp_info
& SNS_LEN_VALID
) &&
344 (scsi_status
== SAM_STAT_GOOD
) &&
345 (cmnd
->request_bufflen
- cmnd
->resid
) < cmnd
->underflow
) {
346 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
347 "%d:0717 FCP command x%x residual "
348 "underrun converted to error "
349 "Data: x%x x%x x%x\n", phba
->brd_no
,
350 cmnd
->cmnd
[0], cmnd
->request_bufflen
,
351 cmnd
->resid
, cmnd
->underflow
);
353 host_status
= DID_ERROR
;
355 } else if (resp_info
& RESID_OVER
) {
356 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
357 "%d:0720 FCP command x%x residual "
358 "overrun error. Data: x%x x%x \n",
359 phba
->brd_no
, cmnd
->cmnd
[0],
360 cmnd
->request_bufflen
, cmnd
->resid
);
361 host_status
= DID_ERROR
;
364 * Check SLI validation that all the transfer was actually done
365 * (fcpi_parm should be zero). Apply check only to reads.
367 } else if ((scsi_status
== SAM_STAT_GOOD
) && fcpi_parm
&&
368 (cmnd
->sc_data_direction
== DMA_FROM_DEVICE
)) {
369 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
370 "%d:0734 FCP Read Check Error Data: "
371 "x%x x%x x%x x%x\n", phba
->brd_no
,
372 be32_to_cpu(fcpcmd
->fcpDl
),
373 be32_to_cpu(fcprsp
->rspResId
),
374 fcpi_parm
, cmnd
->cmnd
[0]);
375 host_status
= DID_ERROR
;
376 cmnd
->resid
= cmnd
->request_bufflen
;
380 cmnd
->result
= ScsiResult(host_status
, scsi_status
);
384 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
385 struct lpfc_iocbq
*pIocbOut
)
387 struct lpfc_scsi_buf
*lpfc_cmd
=
388 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
389 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
390 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
391 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
394 lpfc_cmd
->result
= pIocbOut
->iocb
.un
.ulpWord
[4];
395 lpfc_cmd
->status
= pIocbOut
->iocb
.ulpStatus
;
397 if (lpfc_cmd
->status
) {
398 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
399 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
400 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
401 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
402 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
404 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
405 "%d:0729 FCP cmd x%x failed <%d/%d> status: "
406 "x%x result: x%x Data: x%x x%x\n",
407 phba
->brd_no
, cmd
->cmnd
[0], cmd
->device
->id
,
408 cmd
->device
->lun
, lpfc_cmd
->status
,
409 lpfc_cmd
->result
, pIocbOut
->iocb
.ulpContext
,
410 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
412 switch (lpfc_cmd
->status
) {
413 case IOSTAT_FCP_RSP_ERROR
:
414 /* Call FCP RSP handler to determine result */
415 lpfc_handle_fcp_err(lpfc_cmd
);
417 case IOSTAT_NPORT_BSY
:
418 case IOSTAT_FABRIC_BSY
:
419 cmd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
422 cmd
->result
= ScsiResult(DID_ERROR
, 0);
427 || (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
428 cmd
->result
= ScsiResult(DID_BUS_BUSY
, SAM_STAT_BUSY
);
430 cmd
->result
= ScsiResult(DID_OK
, 0);
433 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
434 uint32_t *lp
= (uint32_t *)cmd
->sense_buffer
;
436 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
437 "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
438 "SNS x%x x%x Data: x%x x%x\n",
439 phba
->brd_no
, cmd
->device
->id
,
440 cmd
->device
->lun
, cmd
, cmd
->result
,
441 *lp
, *(lp
+ 3), cmd
->retries
, cmd
->resid
);
446 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
447 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
448 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
452 lpfc_scsi_prep_cmnd(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* lpfc_cmd
,
453 struct lpfc_nodelist
*pnode
)
455 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
456 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
457 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
458 struct lpfc_iocbq
*piocbq
= &(lpfc_cmd
->cur_iocbq
);
459 int datadir
= scsi_cmnd
->sc_data_direction
;
461 lpfc_cmd
->fcp_rsp
->rspSnsLen
= 0;
462 /* clear task management bits */
463 lpfc_cmd
->fcp_cmnd
->fcpCntl2
= 0;
465 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
466 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
468 memcpy(&fcp_cmnd
->fcpCdb
[0], scsi_cmnd
->cmnd
, 16);
470 if (scsi_cmnd
->device
->tagged_supported
) {
471 switch (scsi_cmnd
->tag
) {
472 case HEAD_OF_QUEUE_TAG
:
473 fcp_cmnd
->fcpCntl1
= HEAD_OF_Q
;
475 case ORDERED_QUEUE_TAG
:
476 fcp_cmnd
->fcpCntl1
= ORDERED_Q
;
479 fcp_cmnd
->fcpCntl1
= SIMPLE_Q
;
483 fcp_cmnd
->fcpCntl1
= 0;
486 * There are three possibilities here - use scatter-gather segment, use
487 * the single mapping, or neither. Start the lpfc command prep by
488 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
491 if (scsi_cmnd
->use_sg
) {
492 if (datadir
== DMA_TO_DEVICE
) {
493 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
494 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
496 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
497 phba
->fc4OutputRequests
++;
499 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
500 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
501 iocb_cmd
->un
.fcpi
.fcpi_parm
=
502 scsi_cmnd
->request_bufflen
;
503 fcp_cmnd
->fcpCntl3
= READ_DATA
;
504 phba
->fc4InputRequests
++;
506 } else if (scsi_cmnd
->request_buffer
&& scsi_cmnd
->request_bufflen
) {
507 if (datadir
== DMA_TO_DEVICE
) {
508 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
509 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
511 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
512 phba
->fc4OutputRequests
++;
514 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
515 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
516 iocb_cmd
->un
.fcpi
.fcpi_parm
=
517 scsi_cmnd
->request_bufflen
;
518 fcp_cmnd
->fcpCntl3
= READ_DATA
;
519 phba
->fc4InputRequests
++;
522 iocb_cmd
->ulpCommand
= CMD_FCP_ICMND64_CR
;
523 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
525 fcp_cmnd
->fcpCntl3
= 0;
526 phba
->fc4ControlRequests
++;
530 * Finish initializing those IOCB fields that are independent
531 * of the scsi_cmnd request_buffer
533 piocbq
->iocb
.ulpContext
= pnode
->nlp_rpi
;
534 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
535 piocbq
->iocb
.ulpFCP2Rcvy
= 1;
537 piocbq
->iocb
.ulpClass
= (pnode
->nlp_fcp_info
& 0x0f);
538 piocbq
->context1
= lpfc_cmd
;
539 piocbq
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
540 piocbq
->iocb
.ulpTimeout
= lpfc_cmd
->timeout
;
544 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba
*phba
,
545 struct lpfc_scsi_buf
*lpfc_cmd
,
546 uint8_t task_mgmt_cmd
)
548 struct lpfc_sli
*psli
;
549 struct lpfc_iocbq
*piocbq
;
551 struct fcp_cmnd
*fcp_cmnd
;
552 struct scsi_device
*scsi_dev
= lpfc_cmd
->pCmd
->device
;
553 struct lpfc_rport_data
*rdata
= scsi_dev
->hostdata
;
554 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
556 if ((ndlp
== NULL
) || (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)) {
561 piocbq
= &(lpfc_cmd
->cur_iocbq
);
562 piocb
= &piocbq
->iocb
;
564 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
565 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
566 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
567 fcp_cmnd
->fcpCntl2
= task_mgmt_cmd
;
569 piocb
->ulpCommand
= CMD_FCP_ICMND64_CR
;
571 piocb
->ulpContext
= ndlp
->nlp_rpi
;
572 if (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) {
573 piocb
->ulpFCP2Rcvy
= 1;
575 piocb
->ulpClass
= (ndlp
->nlp_fcp_info
& 0x0f);
577 /* ulpTimeout is only one byte */
578 if (lpfc_cmd
->timeout
> 0xff) {
580 * Do not timeout the command at the firmware level.
581 * The driver will provide the timeout mechanism.
583 piocb
->ulpTimeout
= 0;
585 piocb
->ulpTimeout
= lpfc_cmd
->timeout
;
588 lpfc_cmd
->rdata
= rdata
;
590 switch (task_mgmt_cmd
) {
592 /* Issue LUN Reset to TGT <num> LUN <num> */
593 lpfc_printf_log(phba
,
596 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
599 scsi_dev
->id
, scsi_dev
->lun
,
600 ndlp
->nlp_rpi
, ndlp
->nlp_flag
);
603 case FCP_ABORT_TASK_SET
:
604 /* Issue Abort Task Set to TGT <num> LUN <num> */
605 lpfc_printf_log(phba
,
608 "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
611 scsi_dev
->id
, scsi_dev
->lun
,
612 ndlp
->nlp_rpi
, ndlp
->nlp_flag
);
615 case FCP_TARGET_RESET
:
616 /* Issue Target Reset to TGT <num> */
617 lpfc_printf_log(phba
,
620 "%d:0702 Issue Target Reset to TGT %d "
623 scsi_dev
->id
, ndlp
->nlp_rpi
,
632 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf
* lpfc_cmd
, struct lpfc_hba
* phba
)
634 struct lpfc_iocbq
*iocbq
;
635 struct lpfc_iocbq
*iocbqrsp
;
638 ret
= lpfc_scsi_prep_task_mgmt_cmd(phba
, lpfc_cmd
, FCP_TARGET_RESET
);
642 lpfc_cmd
->scsi_hba
= phba
;
643 iocbq
= &lpfc_cmd
->cur_iocbq
;
644 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
649 ret
= lpfc_sli_issue_iocb_wait(phba
,
650 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
651 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
652 if (ret
!= IOCB_SUCCESS
) {
653 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
657 lpfc_cmd
->result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
658 lpfc_cmd
->status
= iocbqrsp
->iocb
.ulpStatus
;
659 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
660 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
661 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
664 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
669 lpfc_info(struct Scsi_Host
*host
)
671 struct lpfc_hba
*phba
= (struct lpfc_hba
*) host
->hostdata
[0];
673 static char lpfcinfobuf
[384];
675 memset(lpfcinfobuf
,0,384);
676 if (phba
&& phba
->pcidev
){
677 strncpy(lpfcinfobuf
, phba
->ModelDesc
, 256);
678 len
= strlen(lpfcinfobuf
);
679 snprintf(lpfcinfobuf
+ len
,
681 " on PCI bus %02x device %02x irq %d",
682 phba
->pcidev
->bus
->number
,
685 len
= strlen(lpfcinfobuf
);
687 snprintf(lpfcinfobuf
+ len
,
697 lpfc_queuecommand(struct scsi_cmnd
*cmnd
, void (*done
) (struct scsi_cmnd
*))
699 struct lpfc_hba
*phba
=
700 (struct lpfc_hba
*) cmnd
->device
->host
->hostdata
[0];
701 struct lpfc_sli
*psli
= &phba
->sli
;
702 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
703 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
704 struct lpfc_scsi_buf
*lpfc_cmd
;
705 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
708 err
= fc_remote_port_chkready(rport
);
711 goto out_fail_command
;
715 * Catch race where our node has transitioned, but the
716 * transport is still transitioning.
719 cmnd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
720 goto out_fail_command
;
722 lpfc_cmd
= lpfc_sli_get_scsi_buf (phba
);
723 if (lpfc_cmd
== NULL
) {
724 printk(KERN_WARNING
"%s: No buffer available - list empty, "
725 "total count %d\n", __FUNCTION__
, phba
->total_scsi_bufs
);
730 * Store the midlayer's command structure for the completion phase
731 * and complete the command initialization.
733 lpfc_cmd
->pCmd
= cmnd
;
734 lpfc_cmd
->rdata
= rdata
;
735 lpfc_cmd
->timeout
= 0;
736 cmnd
->host_scribble
= (unsigned char *)lpfc_cmd
;
737 cmnd
->scsi_done
= done
;
739 err
= lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
741 goto out_host_busy_free_buf
;
743 lpfc_scsi_prep_cmnd(phba
, lpfc_cmd
, ndlp
);
745 err
= lpfc_sli_issue_iocb(phba
, &phba
->sli
.ring
[psli
->fcp_ring
],
746 &lpfc_cmd
->cur_iocbq
, SLI_IOCB_RET_IOCB
);
748 goto out_host_busy_free_buf
;
751 out_host_busy_free_buf
:
752 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
753 cmnd
->host_scribble
= NULL
;
755 return SCSI_MLQUEUE_HOST_BUSY
;
763 __lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
765 struct lpfc_hba
*phba
=
766 (struct lpfc_hba
*)cmnd
->device
->host
->hostdata
[0];
767 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[phba
->sli
.fcp_ring
];
768 struct lpfc_iocbq
*iocb
;
769 struct lpfc_iocbq
*abtsiocb
;
770 struct lpfc_scsi_buf
*lpfc_cmd
;
772 unsigned int loop_count
= 0;
776 lpfc_cmd
= (struct lpfc_scsi_buf
*)cmnd
->host_scribble
;
780 * If pCmd field of the corresponding lpfc_scsi_buf structure
781 * points to a different SCSI command, then the driver has
782 * already completed this command, but the midlayer did not
783 * see the completion before the eh fired. Just return
786 iocb
= &lpfc_cmd
->cur_iocbq
;
787 if (lpfc_cmd
->pCmd
!= cmnd
)
790 BUG_ON(iocb
->context1
!= lpfc_cmd
);
792 abtsiocb
= lpfc_sli_get_iocbq(phba
);
793 if (abtsiocb
== NULL
) {
799 * The scsi command can not be in txq and it is in flight because the
800 * pCmd is still pointig at the SCSI command we have to abort. There
801 * is no need to search the txcmplq. Just send an abort to the FW.
805 icmd
= &abtsiocb
->iocb
;
806 icmd
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
807 icmd
->un
.acxri
.abortContextTag
= cmd
->ulpContext
;
808 icmd
->un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
811 icmd
->ulpClass
= cmd
->ulpClass
;
812 if (phba
->hba_state
>= LPFC_LINK_UP
)
813 icmd
->ulpCommand
= CMD_ABORT_XRI_CN
;
815 icmd
->ulpCommand
= CMD_CLOSE_XRI_CN
;
817 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
818 if (lpfc_sli_issue_iocb(phba
, pring
, abtsiocb
, 0) == IOCB_ERROR
) {
819 lpfc_sli_release_iocbq(phba
, abtsiocb
);
824 /* Wait for abort to complete */
825 while (lpfc_cmd
->pCmd
== cmnd
)
827 spin_unlock_irq(phba
->host
->host_lock
);
828 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT
*HZ
);
829 spin_lock_irq(phba
->host
->host_lock
);
831 > (2 * phba
->cfg_nodev_tmo
)/LPFC_ABORT_WAIT
)
835 if (lpfc_cmd
->pCmd
== cmnd
) {
837 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
838 "%d:0748 abort handler timed out waiting for "
839 "abort to complete: ret %#x, ID %d, LUN %d, "
841 phba
->brd_no
, ret
, cmnd
->device
->id
,
842 cmnd
->device
->lun
, cmnd
->serial_number
);
846 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
847 "%d:0749 SCSI layer issued abort device: ret %#x, "
848 "ID %d, LUN %d, snum %#lx\n",
849 phba
->brd_no
, ret
, cmnd
->device
->id
,
850 cmnd
->device
->lun
, cmnd
->serial_number
);
856 lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
859 spin_lock_irq(cmnd
->device
->host
->host_lock
);
860 rc
= __lpfc_abort_handler(cmnd
);
861 spin_unlock_irq(cmnd
->device
->host
->host_lock
);
866 __lpfc_reset_lun_handler(struct scsi_cmnd
*cmnd
)
868 struct Scsi_Host
*shost
= cmnd
->device
->host
;
869 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
[0];
870 struct lpfc_scsi_buf
*lpfc_cmd
;
871 struct lpfc_iocbq
*iocbq
, *iocbqrsp
;
872 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
873 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
878 * If target is not in a MAPPED state, delay the reset until
879 * target is rediscovered or nodev timeout expires.
885 if (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
) {
886 spin_unlock_irq(phba
->host
->host_lock
);
887 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
888 spin_lock_irq(phba
->host
->host_lock
);
890 if ((pnode
) && (pnode
->nlp_state
== NLP_STE_MAPPED_NODE
))
894 lpfc_cmd
= lpfc_sli_get_scsi_buf (phba
);
895 if (lpfc_cmd
== NULL
)
898 lpfc_cmd
->pCmd
= cmnd
;
899 lpfc_cmd
->timeout
= 60;
900 lpfc_cmd
->scsi_hba
= phba
;
902 ret
= lpfc_scsi_prep_task_mgmt_cmd(phba
, lpfc_cmd
, FCP_LUN_RESET
);
904 goto out_free_scsi_buf
;
906 iocbq
= &lpfc_cmd
->cur_iocbq
;
908 /* get a buffer for this IOCB command response */
909 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
910 if (iocbqrsp
== NULL
)
911 goto out_free_scsi_buf
;
913 ret
= lpfc_sli_issue_iocb_wait(phba
,
914 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
915 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
916 if (ret
== IOCB_SUCCESS
)
919 lpfc_cmd
->result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
920 lpfc_cmd
->status
= iocbqrsp
->iocb
.ulpStatus
;
921 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
)
922 if (lpfc_cmd
->result
& IOERR_DRVR_MASK
)
923 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
926 * All outstanding txcmplq I/Os should have been aborted by the target.
927 * Unfortunately, some targets do not abide by this forcing the driver
930 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
931 cmnd
->device
->id
, cmnd
->device
->lun
, 0,
935 while((cnt
= lpfc_sli_sum_iocb(phba
,
936 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
937 cmnd
->device
->id
, cmnd
->device
->lun
,
939 spin_unlock_irq(phba
->host
->host_lock
);
940 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
941 spin_lock_irq(phba
->host
->host_lock
);
944 > (2 * phba
->cfg_nodev_tmo
)/LPFC_RESET_WAIT
)
949 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
950 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
955 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
958 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
959 "%d:0713 SCSI layer issued LUN reset (%d, %d) "
960 "Data: x%x x%x x%x\n",
961 phba
->brd_no
, lpfc_cmd
->pCmd
->device
->id
,
962 lpfc_cmd
->pCmd
->device
->lun
, ret
, lpfc_cmd
->status
,
964 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
970 lpfc_reset_lun_handler(struct scsi_cmnd
*cmnd
)
973 spin_lock_irq(cmnd
->device
->host
->host_lock
);
974 rc
= __lpfc_reset_lun_handler(cmnd
);
975 spin_unlock_irq(cmnd
->device
->host
->host_lock
);
980 * Note: midlayer calls this function with the host_lock held
983 __lpfc_reset_bus_handler(struct scsi_cmnd
*cmnd
)
985 struct Scsi_Host
*shost
= cmnd
->device
->host
;
986 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
[0];
987 struct lpfc_nodelist
*ndlp
= NULL
;
989 int ret
= FAILED
, i
, err_count
= 0;
991 unsigned int midlayer_id
= 0;
992 struct lpfc_scsi_buf
* lpfc_cmd
;
994 lpfc_cmd
= lpfc_sli_get_scsi_buf (phba
);
995 if (lpfc_cmd
== NULL
)
998 /* The lpfc_cmd storage is reused. Set all loop invariants. */
999 lpfc_cmd
->timeout
= 60;
1000 lpfc_cmd
->pCmd
= cmnd
;
1001 lpfc_cmd
->scsi_hba
= phba
;
1004 * Since the driver manages a single bus device, reset all
1005 * targets known to the driver. Should any target reset
1006 * fail, this routine returns failure to the midlayer.
1008 midlayer_id
= cmnd
->device
->id
;
1009 for (i
= 0; i
< MAX_FCP_TARGET
; i
++) {
1010 /* Search the mapped list for this target ID */
1012 list_for_each_entry(ndlp
, &phba
->fc_nlpmap_list
, nlp_listp
) {
1013 if ((i
== ndlp
->nlp_sid
) && ndlp
->rport
) {
1021 lpfc_cmd
->pCmd
->device
->id
= i
;
1022 lpfc_cmd
->pCmd
->device
->hostdata
= ndlp
->rport
->dd_data
;
1023 ret
= lpfc_scsi_tgt_reset(lpfc_cmd
, phba
);
1024 if (ret
!= SUCCESS
) {
1025 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
1026 "%d:0713 Bus Reset on target %d failed\n",
1032 cmnd
->device
->id
= midlayer_id
;
1034 while((cnt
= lpfc_sli_sum_iocb(phba
,
1035 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1036 0, 0, LPFC_CTX_HOST
))) {
1037 spin_unlock_irq(phba
->host
->host_lock
);
1038 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1039 spin_lock_irq(phba
->host
->host_lock
);
1042 > (2 * phba
->cfg_nodev_tmo
)/LPFC_RESET_WAIT
)
1047 /* flush all outstanding commands on the host */
1048 i
= lpfc_sli_abort_iocb(phba
,
1049 &phba
->sli
.ring
[phba
->sli
.fcp_ring
], 0, 0, 0,
1052 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
1053 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
1054 phba
->brd_no
, cnt
, i
);
1062 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1063 lpfc_printf_log(phba
,
1066 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1073 lpfc_reset_bus_handler(struct scsi_cmnd
*cmnd
)
1076 spin_lock_irq(cmnd
->device
->host
->host_lock
);
1077 rc
= __lpfc_reset_bus_handler(cmnd
);
1078 spin_unlock_irq(cmnd
->device
->host
->host_lock
);
1083 lpfc_slave_alloc(struct scsi_device
*sdev
)
1085 struct lpfc_hba
*phba
= (struct lpfc_hba
*)sdev
->host
->hostdata
[0];
1086 struct lpfc_scsi_buf
*scsi_buf
= NULL
;
1087 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
1088 uint32_t total
= 0, i
;
1089 uint32_t num_to_alloc
= 0;
1090 unsigned long flags
;
1092 if (!rport
|| fc_remote_port_chkready(rport
))
1095 sdev
->hostdata
= rport
->dd_data
;
1098 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1099 * available list of scsi buffers. Don't allocate more than the
1100 * HBA limit conveyed to the midlayer via the host structure. The
1101 * formula accounts for the lun_queue_depth + error handlers + 1
1102 * extra. This list of scsi bufs exists for the lifetime of the driver.
1104 total
= phba
->total_scsi_bufs
;
1105 num_to_alloc
= phba
->cfg_lun_queue_depth
+ 2;
1106 if (total
>= phba
->cfg_hba_queue_depth
) {
1107 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
1108 "%d:0704 At limitation of %d preallocated "
1109 "command buffers\n", phba
->brd_no
, total
);
1111 } else if (total
+ num_to_alloc
> phba
->cfg_hba_queue_depth
) {
1112 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
1113 "%d:0705 Allocation request of %d command "
1114 "buffers will exceed max of %d. Reducing "
1115 "allocation request to %d.\n", phba
->brd_no
,
1116 num_to_alloc
, phba
->cfg_hba_queue_depth
,
1117 (phba
->cfg_hba_queue_depth
- total
));
1118 num_to_alloc
= phba
->cfg_hba_queue_depth
- total
;
1121 for (i
= 0; i
< num_to_alloc
; i
++) {
1122 scsi_buf
= lpfc_new_scsi_buf(phba
);
1124 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1125 "%d:0706 Failed to allocate command "
1126 "buffer\n", phba
->brd_no
);
1130 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
1131 phba
->total_scsi_bufs
++;
1132 list_add_tail(&scsi_buf
->list
, &phba
->lpfc_scsi_buf_list
);
1133 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
1139 lpfc_slave_configure(struct scsi_device
*sdev
)
1141 struct lpfc_hba
*phba
= (struct lpfc_hba
*) sdev
->host
->hostdata
[0];
1142 struct fc_rport
*rport
= starget_to_rport(sdev
->sdev_target
);
1144 if (sdev
->tagged_supported
)
1145 scsi_activate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1147 scsi_deactivate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1150 * Initialize the fc transport attributes for the target
1151 * containing this scsi device. Also note that the driver's
1152 * target pointer is stored in the starget_data for the
1153 * driver's sysfs entry point functions.
1155 rport
->dev_loss_tmo
= phba
->cfg_nodev_tmo
+ 5;
1161 lpfc_slave_destroy(struct scsi_device
*sdev
)
1163 sdev
->hostdata
= NULL
;
1167 struct scsi_host_template lpfc_template
= {
1168 .module
= THIS_MODULE
,
1169 .name
= LPFC_DRIVER_NAME
,
1171 .queuecommand
= lpfc_queuecommand
,
1172 .eh_abort_handler
= lpfc_abort_handler
,
1173 .eh_device_reset_handler
= lpfc_reset_lun_handler
,
1174 .eh_bus_reset_handler
= lpfc_reset_bus_handler
,
1175 .slave_alloc
= lpfc_slave_alloc
,
1176 .slave_configure
= lpfc_slave_configure
,
1177 .slave_destroy
= lpfc_slave_destroy
,
1179 .sg_tablesize
= LPFC_SG_SEG_CNT
,
1180 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
1181 .use_clustering
= ENABLE_CLUSTERING
,
1182 .shost_attrs
= lpfc_host_attrs
,
1183 .max_sectors
= 0xFFFF,