1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
32 #include "lpfc_version.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
38 #include "lpfc_logmsg.h"
39 #include "lpfc_crtn.h"
41 #define LPFC_RESET_WAIT 2
42 #define LPFC_ABORT_WAIT 2
46 * This routine allocates a scsi buffer, which contains all the necessary
47 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
48 * contains information to build the IOCB. The DMAable region contains
49 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
50 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
51 * and the BPL BDE is setup in the IOCB.
53 static struct lpfc_scsi_buf
*
54 lpfc_new_scsi_buf(struct lpfc_hba
* phba
)
56 struct lpfc_scsi_buf
*psb
;
57 struct ulp_bde64
*bpl
;
62 psb
= kmalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
65 memset(psb
, 0, sizeof (struct lpfc_scsi_buf
));
69 * Get memory from the pci pool to map the virt space to pci bus space
70 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
71 * struct fcp_rsp and the number of bde's necessary to support the
74 psb
->data
= pci_pool_alloc(phba
->lpfc_scsi_dma_buf_pool
, GFP_KERNEL
,
81 /* Initialize virtual ptrs to dma_buf region. */
82 memset(psb
->data
, 0, phba
->cfg_sg_dma_buf_size
);
84 /* Allocate iotag for psb->cur_iocbq. */
85 iotag
= lpfc_sli_next_iotag(phba
, &psb
->cur_iocbq
);
87 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
,
88 psb
->data
, psb
->dma_handle
);
92 psb
->cur_iocbq
.iocb_flag
|= LPFC_IO_FCP
;
94 psb
->fcp_cmnd
= psb
->data
;
95 psb
->fcp_rsp
= psb
->data
+ sizeof(struct fcp_cmnd
);
96 psb
->fcp_bpl
= psb
->data
+ sizeof(struct fcp_cmnd
) +
97 sizeof(struct fcp_rsp
);
99 /* Initialize local short-hand pointers. */
101 pdma_phys
= psb
->dma_handle
;
104 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
105 * list bdes. Initialize the first two and leave the rest for
108 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
109 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
110 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_cmnd
);
111 bpl
->tus
.f
.bdeFlags
= BUFF_USE_CMND
;
112 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
115 /* Setup the physical region for the FCP RSP */
116 pdma_phys
+= sizeof (struct fcp_cmnd
);
117 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
118 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
119 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_rsp
);
120 bpl
->tus
.f
.bdeFlags
= (BUFF_USE_CMND
| BUFF_USE_RCV
);
121 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
124 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
125 * initialize it with all known data now.
127 pdma_phys
+= (sizeof (struct fcp_rsp
));
128 iocb
= &psb
->cur_iocbq
.iocb
;
129 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
130 iocb
->un
.fcpi64
.bdl
.addrHigh
= putPaddrHigh(pdma_phys
);
131 iocb
->un
.fcpi64
.bdl
.addrLow
= putPaddrLow(pdma_phys
);
132 iocb
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
133 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDL
;
134 iocb
->ulpBdeCount
= 1;
135 iocb
->ulpClass
= CLASS3
;
140 static struct lpfc_scsi_buf
*
141 lpfc_get_scsi_buf(struct lpfc_hba
* phba
)
143 struct lpfc_scsi_buf
* lpfc_cmd
= NULL
;
144 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
145 unsigned long iflag
= 0;
147 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
148 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
150 lpfc_cmd
->seg_cnt
= 0;
151 lpfc_cmd
->nonsg_phys
= 0;
153 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
158 lpfc_release_scsi_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* psb
)
160 unsigned long iflag
= 0;
162 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
164 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list
);
165 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
169 lpfc_scsi_prep_dma_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* lpfc_cmd
)
171 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
172 struct scatterlist
*sgel
= NULL
;
173 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
174 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
175 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
177 uint32_t i
, num_bde
= 0;
178 int datadir
= scsi_cmnd
->sc_data_direction
;
182 * There are three possibilities here - use scatter-gather segment, use
183 * the single mapping, or neither. Start the lpfc command prep by
184 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
188 if (scsi_cmnd
->use_sg
) {
190 * The driver stores the segment count returned from pci_map_sg
191 * because this a count of dma-mappings used to map the use_sg
192 * pages. They are not guaranteed to be the same for those
193 * architectures that implement an IOMMU.
195 sgel
= (struct scatterlist
*)scsi_cmnd
->request_buffer
;
196 lpfc_cmd
->seg_cnt
= dma_map_sg(&phba
->pcidev
->dev
, sgel
,
197 scsi_cmnd
->use_sg
, datadir
);
198 if (lpfc_cmd
->seg_cnt
== 0)
201 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
202 printk(KERN_ERR
"%s: Too many sg segments from "
203 "dma_map_sg. Config %d, seg_cnt %d",
204 __FUNCTION__
, phba
->cfg_sg_seg_cnt
,
206 dma_unmap_sg(&phba
->pcidev
->dev
, sgel
,
207 lpfc_cmd
->seg_cnt
, datadir
);
212 * The driver established a maximum scatter-gather segment count
213 * during probe that limits the number of sg elements in any
214 * single scsi command. Just run through the seg_cnt and format
217 for (i
= 0; i
< lpfc_cmd
->seg_cnt
; i
++) {
218 physaddr
= sg_dma_address(sgel
);
219 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
220 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
221 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
222 if (datadir
== DMA_TO_DEVICE
)
223 bpl
->tus
.f
.bdeFlags
= 0;
225 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
226 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
231 } else if (scsi_cmnd
->request_buffer
&& scsi_cmnd
->request_bufflen
) {
232 physaddr
= dma_map_single(&phba
->pcidev
->dev
,
233 scsi_cmnd
->request_buffer
,
234 scsi_cmnd
->request_bufflen
,
236 dma_error
= dma_mapping_error(physaddr
);
238 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
239 "%d:0718 Unable to dma_map_single "
240 "request_buffer: x%x\n",
241 phba
->brd_no
, dma_error
);
245 lpfc_cmd
->nonsg_phys
= physaddr
;
246 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
247 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
248 bpl
->tus
.f
.bdeSize
= scsi_cmnd
->request_bufflen
;
249 if (datadir
== DMA_TO_DEVICE
)
250 bpl
->tus
.f
.bdeFlags
= 0;
252 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
253 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
259 * Finish initializing those IOCB fields that are dependent on the
260 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
261 * reinitialized since all iocb memory resources are used many times
262 * for transmit, receive, and continuation bpl's.
264 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
265 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
+=
266 (num_bde
* sizeof (struct ulp_bde64
));
267 iocb_cmd
->ulpBdeCount
= 1;
269 fcp_cmnd
->fcpDl
= be32_to_cpu(scsi_cmnd
->request_bufflen
);
274 lpfc_scsi_unprep_dma_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* psb
)
277 * There are only two special cases to consider. (1) the scsi command
278 * requested scatter-gather usage or (2) the scsi command allocated
279 * a request buffer, but did not request use_sg. There is a third
280 * case, but it does not require resource deallocation.
282 if ((psb
->seg_cnt
> 0) && (psb
->pCmd
->use_sg
)) {
283 dma_unmap_sg(&phba
->pcidev
->dev
, psb
->pCmd
->request_buffer
,
284 psb
->seg_cnt
, psb
->pCmd
->sc_data_direction
);
286 if ((psb
->nonsg_phys
) && (psb
->pCmd
->request_bufflen
)) {
287 dma_unmap_single(&phba
->pcidev
->dev
, psb
->nonsg_phys
,
288 psb
->pCmd
->request_bufflen
,
289 psb
->pCmd
->sc_data_direction
);
295 lpfc_handle_fcp_err(struct lpfc_scsi_buf
*lpfc_cmd
, struct lpfc_iocbq
*rsp_iocb
)
297 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
298 struct fcp_cmnd
*fcpcmd
= lpfc_cmd
->fcp_cmnd
;
299 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
300 struct lpfc_hba
*phba
= lpfc_cmd
->scsi_hba
;
301 uint32_t fcpi_parm
= rsp_iocb
->iocb
.un
.fcpi
.fcpi_parm
;
302 uint32_t resp_info
= fcprsp
->rspStatus2
;
303 uint32_t scsi_status
= fcprsp
->rspStatus3
;
305 uint32_t host_status
= DID_OK
;
307 uint32_t logit
= LOG_FCP
| LOG_FCP_ERROR
;
310 * If this is a task management command, there is no
311 * scsi packet associated with this lpfc_cmd. The driver
314 if (fcpcmd
->fcpCntl2
) {
319 if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
) {
320 uint32_t snslen
= be32_to_cpu(fcprsp
->rspSnsLen
);
321 if (snslen
> SCSI_SENSE_BUFFERSIZE
)
322 snslen
= SCSI_SENSE_BUFFERSIZE
;
324 if (resp_info
& RSP_LEN_VALID
)
325 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
326 memcpy(cmnd
->sense_buffer
, &fcprsp
->rspInfo0
+ rsplen
, snslen
);
328 lp
= (uint32_t *)cmnd
->sense_buffer
;
330 if (!scsi_status
&& (resp_info
& RESID_UNDER
))
333 lpfc_printf_log(phba
, KERN_WARNING
, logit
,
334 "%d:0730 FCP command x%x failed: x%x SNS x%x x%x "
335 "Data: x%x x%x x%x x%x x%x\n",
336 phba
->brd_no
, cmnd
->cmnd
[0], scsi_status
,
337 be32_to_cpu(*lp
), be32_to_cpu(*(lp
+ 3)), resp_info
,
338 be32_to_cpu(fcprsp
->rspResId
),
339 be32_to_cpu(fcprsp
->rspSnsLen
),
340 be32_to_cpu(fcprsp
->rspRspLen
),
343 if (resp_info
& RSP_LEN_VALID
) {
344 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
345 if ((rsplen
!= 0 && rsplen
!= 4 && rsplen
!= 8) ||
346 (fcprsp
->rspInfo3
!= RSP_NO_FAILURE
)) {
347 host_status
= DID_ERROR
;
353 if (resp_info
& RESID_UNDER
) {
354 cmnd
->resid
= be32_to_cpu(fcprsp
->rspResId
);
356 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
357 "%d:0716 FCP Read Underrun, expected %d, "
358 "residual %d Data: x%x x%x x%x\n", phba
->brd_no
,
359 be32_to_cpu(fcpcmd
->fcpDl
), cmnd
->resid
,
360 fcpi_parm
, cmnd
->cmnd
[0], cmnd
->underflow
);
363 * If there is an under run check if under run reported by
364 * storage array is same as the under run reported by HBA.
365 * If this is not same, there is a dropped frame.
367 if ((cmnd
->sc_data_direction
== DMA_FROM_DEVICE
) &&
369 (cmnd
->resid
!= fcpi_parm
)) {
370 lpfc_printf_log(phba
, KERN_WARNING
,
371 LOG_FCP
| LOG_FCP_ERROR
,
372 "%d:0735 FCP Read Check Error and Underrun "
373 "Data: x%x x%x x%x x%x\n", phba
->brd_no
,
374 be32_to_cpu(fcpcmd
->fcpDl
),
376 fcpi_parm
, cmnd
->cmnd
[0]);
377 cmnd
->resid
= cmnd
->request_bufflen
;
378 host_status
= DID_ERROR
;
381 * The cmnd->underflow is the minimum number of bytes that must
382 * be transfered for this command. Provided a sense condition
383 * is not present, make sure the actual amount transferred is at
384 * least the underflow value or fail.
386 if (!(resp_info
& SNS_LEN_VALID
) &&
387 (scsi_status
== SAM_STAT_GOOD
) &&
388 (cmnd
->request_bufflen
- cmnd
->resid
) < cmnd
->underflow
) {
389 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
390 "%d:0717 FCP command x%x residual "
391 "underrun converted to error "
392 "Data: x%x x%x x%x\n", phba
->brd_no
,
393 cmnd
->cmnd
[0], cmnd
->request_bufflen
,
394 cmnd
->resid
, cmnd
->underflow
);
396 host_status
= DID_ERROR
;
398 } else if (resp_info
& RESID_OVER
) {
399 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
400 "%d:0720 FCP command x%x residual "
401 "overrun error. Data: x%x x%x \n",
402 phba
->brd_no
, cmnd
->cmnd
[0],
403 cmnd
->request_bufflen
, cmnd
->resid
);
404 host_status
= DID_ERROR
;
407 * Check SLI validation that all the transfer was actually done
408 * (fcpi_parm should be zero). Apply check only to reads.
410 } else if ((scsi_status
== SAM_STAT_GOOD
) && fcpi_parm
&&
411 (cmnd
->sc_data_direction
== DMA_FROM_DEVICE
)) {
412 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_FCP_ERROR
,
413 "%d:0734 FCP Read Check Error Data: "
414 "x%x x%x x%x x%x\n", phba
->brd_no
,
415 be32_to_cpu(fcpcmd
->fcpDl
),
416 be32_to_cpu(fcprsp
->rspResId
),
417 fcpi_parm
, cmnd
->cmnd
[0]);
418 host_status
= DID_ERROR
;
419 cmnd
->resid
= cmnd
->request_bufflen
;
423 cmnd
->result
= ScsiResult(host_status
, scsi_status
);
427 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
428 struct lpfc_iocbq
*pIocbOut
)
430 struct lpfc_scsi_buf
*lpfc_cmd
=
431 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
432 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
433 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
434 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
436 struct scsi_device
*sdev
, *tmp_sdev
;
439 lpfc_cmd
->result
= pIocbOut
->iocb
.un
.ulpWord
[4];
440 lpfc_cmd
->status
= pIocbOut
->iocb
.ulpStatus
;
442 if (lpfc_cmd
->status
) {
443 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
444 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
445 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
446 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
447 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
449 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
450 "%d:0729 FCP cmd x%x failed <%d/%d> status: "
451 "x%x result: x%x Data: x%x x%x\n",
452 phba
->brd_no
, cmd
->cmnd
[0], cmd
->device
->id
,
453 cmd
->device
->lun
, lpfc_cmd
->status
,
454 lpfc_cmd
->result
, pIocbOut
->iocb
.ulpContext
,
455 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
457 switch (lpfc_cmd
->status
) {
458 case IOSTAT_FCP_RSP_ERROR
:
459 /* Call FCP RSP handler to determine result */
460 lpfc_handle_fcp_err(lpfc_cmd
,pIocbOut
);
462 case IOSTAT_NPORT_BSY
:
463 case IOSTAT_FABRIC_BSY
:
464 cmd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
467 cmd
->result
= ScsiResult(DID_ERROR
, 0);
472 || (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
473 cmd
->result
= ScsiResult(DID_BUS_BUSY
, SAM_STAT_BUSY
);
475 cmd
->result
= ScsiResult(DID_OK
, 0);
478 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
479 uint32_t *lp
= (uint32_t *)cmd
->sense_buffer
;
481 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
482 "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
483 "SNS x%x x%x Data: x%x x%x\n",
484 phba
->brd_no
, cmd
->device
->id
,
485 cmd
->device
->lun
, cmd
, cmd
->result
,
486 *lp
, *(lp
+ 3), cmd
->retries
, cmd
->resid
);
489 result
= cmd
->result
;
491 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
494 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
495 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
499 if (!result
&& pnode
!= NULL
&&
500 ((jiffies
- pnode
->last_ramp_up_time
) >
501 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
502 ((jiffies
- pnode
->last_q_full_time
) >
503 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
504 (phba
->cfg_lun_queue_depth
> sdev
->queue_depth
)) {
505 shost_for_each_device(tmp_sdev
, sdev
->host
) {
506 if (phba
->cfg_lun_queue_depth
> tmp_sdev
->queue_depth
) {
507 if (tmp_sdev
->id
!= sdev
->id
)
509 if (tmp_sdev
->ordered_tags
)
510 scsi_adjust_queue_depth(tmp_sdev
,
512 tmp_sdev
->queue_depth
+1);
514 scsi_adjust_queue_depth(tmp_sdev
,
516 tmp_sdev
->queue_depth
+1);
518 pnode
->last_ramp_up_time
= jiffies
;
524 * Check for queue full. If the lun is reporting queue full, then
525 * back off the lun queue depth to prevent target overloads.
527 if (result
== SAM_STAT_TASK_SET_FULL
&& pnode
!= NULL
) {
528 pnode
->last_q_full_time
= jiffies
;
530 shost_for_each_device(tmp_sdev
, sdev
->host
) {
531 if (tmp_sdev
->id
!= sdev
->id
)
533 depth
= scsi_track_queue_full(tmp_sdev
,
534 tmp_sdev
->queue_depth
- 1);
537 * The queue depth cannot be lowered any more.
538 * Modify the returned error code to store
539 * the final depth value set by
540 * scsi_track_queue_full.
543 depth
= sdev
->host
->cmd_per_lun
;
546 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
547 "%d:0711 detected queue full - lun queue depth "
548 " adjusted to %d.\n", phba
->brd_no
, depth
);
552 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
556 lpfc_scsi_prep_cmnd(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* lpfc_cmd
,
557 struct lpfc_nodelist
*pnode
)
559 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
560 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
561 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
562 struct lpfc_iocbq
*piocbq
= &(lpfc_cmd
->cur_iocbq
);
563 int datadir
= scsi_cmnd
->sc_data_direction
;
565 lpfc_cmd
->fcp_rsp
->rspSnsLen
= 0;
566 /* clear task management bits */
567 lpfc_cmd
->fcp_cmnd
->fcpCntl2
= 0;
569 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
570 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
572 memcpy(&fcp_cmnd
->fcpCdb
[0], scsi_cmnd
->cmnd
, 16);
574 if (scsi_cmnd
->device
->tagged_supported
) {
575 switch (scsi_cmnd
->tag
) {
576 case HEAD_OF_QUEUE_TAG
:
577 fcp_cmnd
->fcpCntl1
= HEAD_OF_Q
;
579 case ORDERED_QUEUE_TAG
:
580 fcp_cmnd
->fcpCntl1
= ORDERED_Q
;
583 fcp_cmnd
->fcpCntl1
= SIMPLE_Q
;
587 fcp_cmnd
->fcpCntl1
= 0;
590 * There are three possibilities here - use scatter-gather segment, use
591 * the single mapping, or neither. Start the lpfc command prep by
592 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
595 if (scsi_cmnd
->use_sg
) {
596 if (datadir
== DMA_TO_DEVICE
) {
597 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
598 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
600 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
601 phba
->fc4OutputRequests
++;
603 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
604 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
605 iocb_cmd
->un
.fcpi
.fcpi_parm
=
606 scsi_cmnd
->request_bufflen
;
607 fcp_cmnd
->fcpCntl3
= READ_DATA
;
608 phba
->fc4InputRequests
++;
610 } else if (scsi_cmnd
->request_buffer
&& scsi_cmnd
->request_bufflen
) {
611 if (datadir
== DMA_TO_DEVICE
) {
612 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
613 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
615 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
616 phba
->fc4OutputRequests
++;
618 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
619 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
620 iocb_cmd
->un
.fcpi
.fcpi_parm
=
621 scsi_cmnd
->request_bufflen
;
622 fcp_cmnd
->fcpCntl3
= READ_DATA
;
623 phba
->fc4InputRequests
++;
626 iocb_cmd
->ulpCommand
= CMD_FCP_ICMND64_CR
;
627 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
629 fcp_cmnd
->fcpCntl3
= 0;
630 phba
->fc4ControlRequests
++;
634 * Finish initializing those IOCB fields that are independent
635 * of the scsi_cmnd request_buffer
637 piocbq
->iocb
.ulpContext
= pnode
->nlp_rpi
;
638 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
639 piocbq
->iocb
.ulpFCP2Rcvy
= 1;
641 piocbq
->iocb
.ulpClass
= (pnode
->nlp_fcp_info
& 0x0f);
642 piocbq
->context1
= lpfc_cmd
;
643 piocbq
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
644 piocbq
->iocb
.ulpTimeout
= lpfc_cmd
->timeout
;
648 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba
*phba
,
649 struct lpfc_scsi_buf
*lpfc_cmd
,
651 uint8_t task_mgmt_cmd
)
653 struct lpfc_sli
*psli
;
654 struct lpfc_iocbq
*piocbq
;
656 struct fcp_cmnd
*fcp_cmnd
;
657 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
658 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
660 if ((ndlp
== NULL
) || (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)) {
665 piocbq
= &(lpfc_cmd
->cur_iocbq
);
666 piocb
= &piocbq
->iocb
;
668 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
669 int_to_scsilun(lun
, &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
670 fcp_cmnd
->fcpCntl2
= task_mgmt_cmd
;
672 piocb
->ulpCommand
= CMD_FCP_ICMND64_CR
;
674 piocb
->ulpContext
= ndlp
->nlp_rpi
;
675 if (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) {
676 piocb
->ulpFCP2Rcvy
= 1;
678 piocb
->ulpClass
= (ndlp
->nlp_fcp_info
& 0x0f);
680 /* ulpTimeout is only one byte */
681 if (lpfc_cmd
->timeout
> 0xff) {
683 * Do not timeout the command at the firmware level.
684 * The driver will provide the timeout mechanism.
686 piocb
->ulpTimeout
= 0;
688 piocb
->ulpTimeout
= lpfc_cmd
->timeout
;
695 lpfc_tskmgmt_def_cmpl(struct lpfc_hba
*phba
,
696 struct lpfc_iocbq
*cmdiocbq
,
697 struct lpfc_iocbq
*rspiocbq
)
699 struct lpfc_scsi_buf
*lpfc_cmd
=
700 (struct lpfc_scsi_buf
*) cmdiocbq
->context1
;
702 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
707 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf
* lpfc_cmd
, struct lpfc_hba
* phba
,
708 unsigned tgt_id
, unsigned int lun
,
709 struct lpfc_rport_data
*rdata
)
711 struct lpfc_iocbq
*iocbq
;
712 struct lpfc_iocbq
*iocbqrsp
;
718 lpfc_cmd
->rdata
= rdata
;
719 ret
= lpfc_scsi_prep_task_mgmt_cmd(phba
, lpfc_cmd
, lun
,
724 lpfc_cmd
->scsi_hba
= phba
;
725 iocbq
= &lpfc_cmd
->cur_iocbq
;
726 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
731 /* Issue Target Reset to TGT <num> */
732 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
733 "%d:0702 Issue Target Reset to TGT %d "
735 phba
->brd_no
, tgt_id
, rdata
->pnode
->nlp_rpi
,
736 rdata
->pnode
->nlp_flag
);
738 ret
= lpfc_sli_issue_iocb_wait(phba
,
739 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
740 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
741 if (ret
!= IOCB_SUCCESS
) {
742 if (ret
== IOCB_TIMEDOUT
)
743 iocbq
->iocb_cmpl
= lpfc_tskmgmt_def_cmpl
;
744 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
747 lpfc_cmd
->result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
748 lpfc_cmd
->status
= iocbqrsp
->iocb
.ulpStatus
;
749 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
750 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
751 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
754 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
759 lpfc_info(struct Scsi_Host
*host
)
761 struct lpfc_hba
*phba
= (struct lpfc_hba
*) host
->hostdata
;
763 static char lpfcinfobuf
[384];
765 memset(lpfcinfobuf
,0,384);
766 if (phba
&& phba
->pcidev
){
767 strncpy(lpfcinfobuf
, phba
->ModelDesc
, 256);
768 len
= strlen(lpfcinfobuf
);
769 snprintf(lpfcinfobuf
+ len
,
771 " on PCI bus %02x device %02x irq %d",
772 phba
->pcidev
->bus
->number
,
775 len
= strlen(lpfcinfobuf
);
777 snprintf(lpfcinfobuf
+ len
,
786 static __inline__
void lpfc_poll_rearm_timer(struct lpfc_hba
* phba
)
788 unsigned long poll_tmo_expires
=
789 (jiffies
+ msecs_to_jiffies(phba
->cfg_poll_tmo
));
791 if (phba
->sli
.ring
[LPFC_FCP_RING
].txcmplq_cnt
)
792 mod_timer(&phba
->fcp_poll_timer
,
796 void lpfc_poll_start_timer(struct lpfc_hba
* phba
)
798 lpfc_poll_rearm_timer(phba
);
801 void lpfc_poll_timeout(unsigned long ptr
)
803 struct lpfc_hba
*phba
= (struct lpfc_hba
*)ptr
;
806 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
808 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
809 lpfc_sli_poll_fcp_ring (phba
);
810 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
811 lpfc_poll_rearm_timer(phba
);
814 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
818 lpfc_queuecommand(struct scsi_cmnd
*cmnd
, void (*done
) (struct scsi_cmnd
*))
820 struct lpfc_hba
*phba
=
821 (struct lpfc_hba
*) cmnd
->device
->host
->hostdata
;
822 struct lpfc_sli
*psli
= &phba
->sli
;
823 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
824 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
825 struct lpfc_scsi_buf
*lpfc_cmd
;
826 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
829 err
= fc_remote_port_chkready(rport
);
832 goto out_fail_command
;
836 * Catch race where our node has transitioned, but the
837 * transport is still transitioning.
840 cmnd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
841 goto out_fail_command
;
843 lpfc_cmd
= lpfc_get_scsi_buf (phba
);
844 if (lpfc_cmd
== NULL
) {
845 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
846 "%d:0707 driver's buffer pool is empty, "
847 "IO busied\n", phba
->brd_no
);
852 * Store the midlayer's command structure for the completion phase
853 * and complete the command initialization.
855 lpfc_cmd
->pCmd
= cmnd
;
856 lpfc_cmd
->rdata
= rdata
;
857 lpfc_cmd
->timeout
= 0;
858 cmnd
->host_scribble
= (unsigned char *)lpfc_cmd
;
859 cmnd
->scsi_done
= done
;
861 err
= lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
863 goto out_host_busy_free_buf
;
865 lpfc_scsi_prep_cmnd(phba
, lpfc_cmd
, ndlp
);
867 err
= lpfc_sli_issue_iocb(phba
, &phba
->sli
.ring
[psli
->fcp_ring
],
868 &lpfc_cmd
->cur_iocbq
, SLI_IOCB_RET_IOCB
);
870 goto out_host_busy_free_buf
;
872 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
873 lpfc_sli_poll_fcp_ring(phba
);
874 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
875 lpfc_poll_rearm_timer(phba
);
880 out_host_busy_free_buf
:
881 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
882 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
884 return SCSI_MLQUEUE_HOST_BUSY
;
892 lpfc_block_error_handler(struct scsi_cmnd
*cmnd
)
894 struct Scsi_Host
*shost
= cmnd
->device
->host
;
895 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
897 spin_lock_irq(shost
->host_lock
);
898 while (rport
->port_state
== FC_PORTSTATE_BLOCKED
) {
899 spin_unlock_irq(shost
->host_lock
);
901 spin_lock_irq(shost
->host_lock
);
903 spin_unlock_irq(shost
->host_lock
);
908 lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
910 struct Scsi_Host
*shost
= cmnd
->device
->host
;
911 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
;
912 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[phba
->sli
.fcp_ring
];
913 struct lpfc_iocbq
*iocb
;
914 struct lpfc_iocbq
*abtsiocb
;
915 struct lpfc_scsi_buf
*lpfc_cmd
;
917 unsigned int loop_count
= 0;
920 lpfc_block_error_handler(cmnd
);
921 spin_lock_irq(shost
->host_lock
);
923 lpfc_cmd
= (struct lpfc_scsi_buf
*)cmnd
->host_scribble
;
927 * If pCmd field of the corresponding lpfc_scsi_buf structure
928 * points to a different SCSI command, then the driver has
929 * already completed this command, but the midlayer did not
930 * see the completion before the eh fired. Just return
933 iocb
= &lpfc_cmd
->cur_iocbq
;
934 if (lpfc_cmd
->pCmd
!= cmnd
)
937 BUG_ON(iocb
->context1
!= lpfc_cmd
);
939 abtsiocb
= lpfc_sli_get_iocbq(phba
);
940 if (abtsiocb
== NULL
) {
946 * The scsi command can not be in txq and it is in flight because the
947 * pCmd is still pointig at the SCSI command we have to abort. There
948 * is no need to search the txcmplq. Just send an abort to the FW.
952 icmd
= &abtsiocb
->iocb
;
953 icmd
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
954 icmd
->un
.acxri
.abortContextTag
= cmd
->ulpContext
;
955 icmd
->un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
958 icmd
->ulpClass
= cmd
->ulpClass
;
959 if (phba
->hba_state
>= LPFC_LINK_UP
)
960 icmd
->ulpCommand
= CMD_ABORT_XRI_CN
;
962 icmd
->ulpCommand
= CMD_CLOSE_XRI_CN
;
964 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
965 if (lpfc_sli_issue_iocb(phba
, pring
, abtsiocb
, 0) == IOCB_ERROR
) {
966 lpfc_sli_release_iocbq(phba
, abtsiocb
);
971 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
972 lpfc_sli_poll_fcp_ring (phba
);
974 /* Wait for abort to complete */
975 while (lpfc_cmd
->pCmd
== cmnd
)
977 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
978 lpfc_sli_poll_fcp_ring (phba
);
980 spin_unlock_irq(phba
->host
->host_lock
);
981 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT
*HZ
);
982 spin_lock_irq(phba
->host
->host_lock
);
984 > (2 * phba
->cfg_devloss_tmo
)/LPFC_ABORT_WAIT
)
988 if (lpfc_cmd
->pCmd
== cmnd
) {
990 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
991 "%d:0748 abort handler timed out waiting for "
992 "abort to complete: ret %#x, ID %d, LUN %d, "
994 phba
->brd_no
, ret
, cmnd
->device
->id
,
995 cmnd
->device
->lun
, cmnd
->serial_number
);
999 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
1000 "%d:0749 SCSI Layer I/O Abort Request "
1001 "Status x%x ID %d LUN %d snum %#lx\n",
1002 phba
->brd_no
, ret
, cmnd
->device
->id
,
1003 cmnd
->device
->lun
, cmnd
->serial_number
);
1005 spin_unlock_irq(shost
->host_lock
);
1011 lpfc_device_reset_handler(struct scsi_cmnd
*cmnd
)
1013 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1014 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
;
1015 struct lpfc_scsi_buf
*lpfc_cmd
;
1016 struct lpfc_iocbq
*iocbq
, *iocbqrsp
;
1017 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
1018 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
1019 uint32_t cmd_result
= 0, cmd_status
= 0;
1021 int iocb_status
= IOCB_SUCCESS
;
1024 lpfc_block_error_handler(cmnd
);
1025 spin_lock_irq(shost
->host_lock
);
1028 * If target is not in a MAPPED state, delay the reset until
1029 * target is rediscovered or devloss timeout expires.
1035 if (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
) {
1036 spin_unlock_irq(phba
->host
->host_lock
);
1037 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1038 spin_lock_irq(phba
->host
->host_lock
);
1040 rdata
= cmnd
->device
->hostdata
;
1042 (loopcnt
> ((phba
->cfg_devloss_tmo
* 2) + 1))) {
1043 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1044 "%d:0721 LUN Reset rport failure:"
1045 " cnt x%x rdata x%p\n",
1046 phba
->brd_no
, loopcnt
, rdata
);
1049 pnode
= rdata
->pnode
;
1053 if (pnode
->nlp_state
== NLP_STE_MAPPED_NODE
)
1057 lpfc_cmd
= lpfc_get_scsi_buf (phba
);
1058 if (lpfc_cmd
== NULL
)
1061 lpfc_cmd
->timeout
= 60;
1062 lpfc_cmd
->scsi_hba
= phba
;
1063 lpfc_cmd
->rdata
= rdata
;
1065 ret
= lpfc_scsi_prep_task_mgmt_cmd(phba
, lpfc_cmd
, cmnd
->device
->lun
,
1068 goto out_free_scsi_buf
;
1070 iocbq
= &lpfc_cmd
->cur_iocbq
;
1072 /* get a buffer for this IOCB command response */
1073 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
1074 if (iocbqrsp
== NULL
)
1075 goto out_free_scsi_buf
;
1077 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
1078 "%d:0703 Issue target reset to TGT %d LUN %d rpi x%x "
1079 "nlp_flag x%x\n", phba
->brd_no
, cmnd
->device
->id
,
1080 cmnd
->device
->lun
, pnode
->nlp_rpi
, pnode
->nlp_flag
);
1082 iocb_status
= lpfc_sli_issue_iocb_wait(phba
,
1083 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1084 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
1086 if (iocb_status
== IOCB_TIMEDOUT
)
1087 iocbq
->iocb_cmpl
= lpfc_tskmgmt_def_cmpl
;
1089 if (iocb_status
== IOCB_SUCCESS
)
1094 cmd_result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
1095 cmd_status
= iocbqrsp
->iocb
.ulpStatus
;
1097 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
1100 * All outstanding txcmplq I/Os should have been aborted by the device.
1101 * Unfortunately, some targets do not abide by this forcing the driver
1104 cnt
= lpfc_sli_sum_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1105 cmnd
->device
->id
, cmnd
->device
->lun
,
1108 lpfc_sli_abort_iocb(phba
,
1109 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1110 cmnd
->device
->id
, cmnd
->device
->lun
,
1114 spin_unlock_irq(phba
->host
->host_lock
);
1115 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1116 spin_lock_irq(phba
->host
->host_lock
);
1119 > (2 * phba
->cfg_devloss_tmo
)/LPFC_RESET_WAIT
)
1122 cnt
= lpfc_sli_sum_iocb(phba
,
1123 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1124 cmnd
->device
->id
, cmnd
->device
->lun
,
1129 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1130 "%d:0719 device reset I/O flush failure: cnt x%x\n",
1136 if (iocb_status
!= IOCB_TIMEDOUT
) {
1137 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1139 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1140 "%d:0713 SCSI layer issued device reset (%d, %d) "
1141 "return x%x status x%x result x%x\n",
1142 phba
->brd_no
, cmnd
->device
->id
, cmnd
->device
->lun
,
1143 ret
, cmd_status
, cmd_result
);
1146 spin_unlock_irq(shost
->host_lock
);
1151 lpfc_bus_reset_handler(struct scsi_cmnd
*cmnd
)
1153 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1154 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
;
1155 struct lpfc_nodelist
*ndlp
= NULL
;
1157 int ret
= FAILED
, i
, err_count
= 0;
1159 struct lpfc_scsi_buf
* lpfc_cmd
;
1161 lpfc_block_error_handler(cmnd
);
1162 spin_lock_irq(shost
->host_lock
);
1164 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
1165 if (lpfc_cmd
== NULL
)
1168 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1169 lpfc_cmd
->timeout
= 60;
1170 lpfc_cmd
->scsi_hba
= phba
;
1173 * Since the driver manages a single bus device, reset all
1174 * targets known to the driver. Should any target reset
1175 * fail, this routine returns failure to the midlayer.
1177 for (i
= 0; i
< LPFC_MAX_TARGET
; i
++) {
1178 /* Search for mapped node by target ID */
1180 list_for_each_entry(ndlp
, &phba
->fc_nodes
, nlp_listp
) {
1181 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
&&
1182 i
== ndlp
->nlp_sid
&&
1191 ret
= lpfc_scsi_tgt_reset(lpfc_cmd
, phba
, i
, cmnd
->device
->lun
,
1192 ndlp
->rport
->dd_data
);
1193 if (ret
!= SUCCESS
) {
1194 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1195 "%d:0700 Bus Reset on target %d failed\n",
1202 if (ret
!= IOCB_TIMEDOUT
)
1203 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1211 * All outstanding txcmplq I/Os should have been aborted by
1212 * the targets. Unfortunately, some targets do not abide by
1213 * this forcing the driver to double check.
1215 cnt
= lpfc_sli_sum_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1216 0, 0, LPFC_CTX_HOST
);
1218 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1219 0, 0, 0, LPFC_CTX_HOST
);
1222 spin_unlock_irq(phba
->host
->host_lock
);
1223 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1224 spin_lock_irq(phba
->host
->host_lock
);
1227 > (2 * phba
->cfg_devloss_tmo
)/LPFC_RESET_WAIT
)
1230 cnt
= lpfc_sli_sum_iocb(phba
,
1231 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1232 0, 0, LPFC_CTX_HOST
);
1236 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1237 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
1238 phba
->brd_no
, cnt
, i
);
1242 lpfc_printf_log(phba
,
1245 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1248 spin_unlock_irq(shost
->host_lock
);
1253 lpfc_slave_alloc(struct scsi_device
*sdev
)
1255 struct lpfc_hba
*phba
= (struct lpfc_hba
*)sdev
->host
->hostdata
;
1256 struct lpfc_scsi_buf
*scsi_buf
= NULL
;
1257 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
1258 uint32_t total
= 0, i
;
1259 uint32_t num_to_alloc
= 0;
1260 unsigned long flags
;
1262 if (!rport
|| fc_remote_port_chkready(rport
))
1265 sdev
->hostdata
= rport
->dd_data
;
1268 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1269 * available list of scsi buffers. Don't allocate more than the
1270 * HBA limit conveyed to the midlayer via the host structure. The
1271 * formula accounts for the lun_queue_depth + error handlers + 1
1272 * extra. This list of scsi bufs exists for the lifetime of the driver.
1274 total
= phba
->total_scsi_bufs
;
1275 num_to_alloc
= phba
->cfg_lun_queue_depth
+ 2;
1276 if (total
>= phba
->cfg_hba_queue_depth
) {
1277 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
1278 "%d:0704 At limitation of %d preallocated "
1279 "command buffers\n", phba
->brd_no
, total
);
1281 } else if (total
+ num_to_alloc
> phba
->cfg_hba_queue_depth
) {
1282 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
1283 "%d:0705 Allocation request of %d command "
1284 "buffers will exceed max of %d. Reducing "
1285 "allocation request to %d.\n", phba
->brd_no
,
1286 num_to_alloc
, phba
->cfg_hba_queue_depth
,
1287 (phba
->cfg_hba_queue_depth
- total
));
1288 num_to_alloc
= phba
->cfg_hba_queue_depth
- total
;
1291 for (i
= 0; i
< num_to_alloc
; i
++) {
1292 scsi_buf
= lpfc_new_scsi_buf(phba
);
1294 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1295 "%d:0706 Failed to allocate command "
1296 "buffer\n", phba
->brd_no
);
1300 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, flags
);
1301 phba
->total_scsi_bufs
++;
1302 list_add_tail(&scsi_buf
->list
, &phba
->lpfc_scsi_buf_list
);
1303 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, flags
);
1309 lpfc_slave_configure(struct scsi_device
*sdev
)
1311 struct lpfc_hba
*phba
= (struct lpfc_hba
*) sdev
->host
->hostdata
;
1312 struct fc_rport
*rport
= starget_to_rport(sdev
->sdev_target
);
1314 if (sdev
->tagged_supported
)
1315 scsi_activate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1317 scsi_deactivate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1320 * Initialize the fc transport attributes for the target
1321 * containing this scsi device. Also note that the driver's
1322 * target pointer is stored in the starget_data for the
1323 * driver's sysfs entry point functions.
1325 rport
->dev_loss_tmo
= phba
->cfg_devloss_tmo
;
1327 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
1328 lpfc_sli_poll_fcp_ring(phba
);
1329 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1330 lpfc_poll_rearm_timer(phba
);
1337 lpfc_slave_destroy(struct scsi_device
*sdev
)
1339 sdev
->hostdata
= NULL
;
1343 struct scsi_host_template lpfc_template
= {
1344 .module
= THIS_MODULE
,
1345 .name
= LPFC_DRIVER_NAME
,
1347 .queuecommand
= lpfc_queuecommand
,
1348 .eh_abort_handler
= lpfc_abort_handler
,
1349 .eh_device_reset_handler
= lpfc_device_reset_handler
,
1350 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
1351 .slave_alloc
= lpfc_slave_alloc
,
1352 .slave_configure
= lpfc_slave_configure
,
1353 .slave_destroy
= lpfc_slave_destroy
,
1354 .scan_finished
= lpfc_scan_finished
,
1355 .scan_start
= lpfc_scan_start
,
1357 .sg_tablesize
= LPFC_SG_SEG_CNT
,
1358 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
1359 .use_clustering
= ENABLE_CLUSTERING
,
1360 .shost_attrs
= lpfc_host_attrs
,
1361 .max_sectors
= 0xFFFF,