1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
32 #include "lpfc_version.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
38 #include "lpfc_logmsg.h"
39 #include "lpfc_crtn.h"
41 #define LPFC_RESET_WAIT 2
42 #define LPFC_ABORT_WAIT 2
46 * This routine allocates a scsi buffer, which contains all the necessary
47 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
48 * contains information to build the IOCB. The DMAable region contains
49 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
50 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
51 * and the BPL BDE is setup in the IOCB.
53 static struct lpfc_scsi_buf
*
54 lpfc_new_scsi_buf(struct lpfc_hba
* phba
)
56 struct lpfc_scsi_buf
*psb
;
57 struct ulp_bde64
*bpl
;
62 psb
= kmalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
65 memset(psb
, 0, sizeof (struct lpfc_scsi_buf
));
69 * Get memory from the pci pool to map the virt space to pci bus space
70 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
71 * struct fcp_rsp and the number of bde's necessary to support the
74 psb
->data
= pci_pool_alloc(phba
->lpfc_scsi_dma_buf_pool
, GFP_KERNEL
,
81 /* Initialize virtual ptrs to dma_buf region. */
82 memset(psb
->data
, 0, phba
->cfg_sg_dma_buf_size
);
84 /* Allocate iotag for psb->cur_iocbq. */
85 iotag
= lpfc_sli_next_iotag(phba
, &psb
->cur_iocbq
);
87 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
,
88 psb
->data
, psb
->dma_handle
);
92 psb
->cur_iocbq
.iocb_flag
|= LPFC_IO_FCP
;
94 psb
->fcp_cmnd
= psb
->data
;
95 psb
->fcp_rsp
= psb
->data
+ sizeof(struct fcp_cmnd
);
96 psb
->fcp_bpl
= psb
->data
+ sizeof(struct fcp_cmnd
) +
97 sizeof(struct fcp_rsp
);
99 /* Initialize local short-hand pointers. */
101 pdma_phys
= psb
->dma_handle
;
104 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
105 * list bdes. Initialize the first two and leave the rest for
108 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
109 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
110 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_cmnd
);
111 bpl
->tus
.f
.bdeFlags
= BUFF_USE_CMND
;
112 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
115 /* Setup the physical region for the FCP RSP */
116 pdma_phys
+= sizeof (struct fcp_cmnd
);
117 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
118 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
119 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_rsp
);
120 bpl
->tus
.f
.bdeFlags
= (BUFF_USE_CMND
| BUFF_USE_RCV
);
121 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
124 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
125 * initialize it with all known data now.
127 pdma_phys
+= (sizeof (struct fcp_rsp
));
128 iocb
= &psb
->cur_iocbq
.iocb
;
129 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
130 iocb
->un
.fcpi64
.bdl
.addrHigh
= putPaddrHigh(pdma_phys
);
131 iocb
->un
.fcpi64
.bdl
.addrLow
= putPaddrLow(pdma_phys
);
132 iocb
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
133 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDL
;
134 iocb
->ulpBdeCount
= 1;
135 iocb
->ulpClass
= CLASS3
;
140 static struct lpfc_scsi_buf
*
141 lpfc_get_scsi_buf(struct lpfc_hba
* phba
)
143 struct lpfc_scsi_buf
* lpfc_cmd
= NULL
;
144 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
145 unsigned long iflag
= 0;
147 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
148 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
149 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
154 lpfc_release_scsi_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* psb
)
156 unsigned long iflag
= 0;
158 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
160 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list
);
161 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
165 lpfc_scsi_prep_dma_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* lpfc_cmd
)
167 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
168 struct scatterlist
*sgel
= NULL
;
169 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
170 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
171 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
173 uint32_t i
, num_bde
= 0;
174 int datadir
= scsi_cmnd
->sc_data_direction
;
178 * There are three possibilities here - use scatter-gather segment, use
179 * the single mapping, or neither. Start the lpfc command prep by
180 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
184 if (scsi_cmnd
->use_sg
) {
186 * The driver stores the segment count returned from pci_map_sg
187 * because this a count of dma-mappings used to map the use_sg
188 * pages. They are not guaranteed to be the same for those
189 * architectures that implement an IOMMU.
191 sgel
= (struct scatterlist
*)scsi_cmnd
->request_buffer
;
192 lpfc_cmd
->seg_cnt
= dma_map_sg(&phba
->pcidev
->dev
, sgel
,
193 scsi_cmnd
->use_sg
, datadir
);
194 if (lpfc_cmd
->seg_cnt
== 0)
197 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
198 printk(KERN_ERR
"%s: Too many sg segments from "
199 "dma_map_sg. Config %d, seg_cnt %d",
200 __FUNCTION__
, phba
->cfg_sg_seg_cnt
,
202 dma_unmap_sg(&phba
->pcidev
->dev
, sgel
,
203 lpfc_cmd
->seg_cnt
, datadir
);
208 * The driver established a maximum scatter-gather segment count
209 * during probe that limits the number of sg elements in any
210 * single scsi command. Just run through the seg_cnt and format
213 for (i
= 0; i
< lpfc_cmd
->seg_cnt
; i
++) {
214 physaddr
= sg_dma_address(sgel
);
215 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
216 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
217 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
218 if (datadir
== DMA_TO_DEVICE
)
219 bpl
->tus
.f
.bdeFlags
= 0;
221 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
222 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
227 } else if (scsi_cmnd
->request_buffer
&& scsi_cmnd
->request_bufflen
) {
228 physaddr
= dma_map_single(&phba
->pcidev
->dev
,
229 scsi_cmnd
->request_buffer
,
230 scsi_cmnd
->request_bufflen
,
232 dma_error
= dma_mapping_error(physaddr
);
234 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
235 "%d:0718 Unable to dma_map_single "
236 "request_buffer: x%x\n",
237 phba
->brd_no
, dma_error
);
241 lpfc_cmd
->nonsg_phys
= physaddr
;
242 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
243 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
244 bpl
->tus
.f
.bdeSize
= scsi_cmnd
->request_bufflen
;
245 if (datadir
== DMA_TO_DEVICE
)
246 bpl
->tus
.f
.bdeFlags
= 0;
248 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
249 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
255 * Finish initializing those IOCB fields that are dependent on the
256 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
257 * reinitialized since all iocb memory resources are used many times
258 * for transmit, receive, and continuation bpl's.
260 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
261 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
+=
262 (num_bde
* sizeof (struct ulp_bde64
));
263 iocb_cmd
->ulpBdeCount
= 1;
265 fcp_cmnd
->fcpDl
= be32_to_cpu(scsi_cmnd
->request_bufflen
);
270 lpfc_scsi_unprep_dma_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* psb
)
273 * There are only two special cases to consider. (1) the scsi command
274 * requested scatter-gather usage or (2) the scsi command allocated
275 * a request buffer, but did not request use_sg. There is a third
276 * case, but it does not require resource deallocation.
278 if ((psb
->seg_cnt
> 0) && (psb
->pCmd
->use_sg
)) {
279 dma_unmap_sg(&phba
->pcidev
->dev
, psb
->pCmd
->request_buffer
,
280 psb
->seg_cnt
, psb
->pCmd
->sc_data_direction
);
282 if ((psb
->nonsg_phys
) && (psb
->pCmd
->request_bufflen
)) {
283 dma_unmap_single(&phba
->pcidev
->dev
, psb
->nonsg_phys
,
284 psb
->pCmd
->request_bufflen
,
285 psb
->pCmd
->sc_data_direction
);
291 lpfc_handle_fcp_err(struct lpfc_scsi_buf
*lpfc_cmd
)
293 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
294 struct fcp_cmnd
*fcpcmd
= lpfc_cmd
->fcp_cmnd
;
295 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
296 struct lpfc_hba
*phba
= lpfc_cmd
->scsi_hba
;
297 uint32_t fcpi_parm
= lpfc_cmd
->cur_iocbq
.iocb
.un
.fcpi
.fcpi_parm
;
298 uint32_t resp_info
= fcprsp
->rspStatus2
;
299 uint32_t scsi_status
= fcprsp
->rspStatus3
;
300 uint32_t host_status
= DID_OK
;
304 * If this is a task management command, there is no
305 * scsi packet associated with this lpfc_cmd. The driver
308 if (fcpcmd
->fcpCntl2
) {
313 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
314 "%d:0730 FCP command failed: RSP "
315 "Data: x%x x%x x%x x%x x%x x%x\n",
316 phba
->brd_no
, resp_info
, scsi_status
,
317 be32_to_cpu(fcprsp
->rspResId
),
318 be32_to_cpu(fcprsp
->rspSnsLen
),
319 be32_to_cpu(fcprsp
->rspRspLen
),
322 if (resp_info
& RSP_LEN_VALID
) {
323 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
324 if ((rsplen
!= 0 && rsplen
!= 4 && rsplen
!= 8) ||
325 (fcprsp
->rspInfo3
!= RSP_NO_FAILURE
)) {
326 host_status
= DID_ERROR
;
331 if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
) {
332 uint32_t snslen
= be32_to_cpu(fcprsp
->rspSnsLen
);
333 if (snslen
> SCSI_SENSE_BUFFERSIZE
)
334 snslen
= SCSI_SENSE_BUFFERSIZE
;
336 memcpy(cmnd
->sense_buffer
, &fcprsp
->rspInfo0
+ rsplen
, snslen
);
340 if (resp_info
& RESID_UNDER
) {
341 cmnd
->resid
= be32_to_cpu(fcprsp
->rspResId
);
343 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
344 "%d:0716 FCP Read Underrun, expected %d, "
345 "residual %d Data: x%x x%x x%x\n", phba
->brd_no
,
346 be32_to_cpu(fcpcmd
->fcpDl
), cmnd
->resid
,
347 fcpi_parm
, cmnd
->cmnd
[0], cmnd
->underflow
);
350 * The cmnd->underflow is the minimum number of bytes that must
351 * be transfered for this command. Provided a sense condition
352 * is not present, make sure the actual amount transferred is at
353 * least the underflow value or fail.
355 if (!(resp_info
& SNS_LEN_VALID
) &&
356 (scsi_status
== SAM_STAT_GOOD
) &&
357 (cmnd
->request_bufflen
- cmnd
->resid
) < cmnd
->underflow
) {
358 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
359 "%d:0717 FCP command x%x residual "
360 "underrun converted to error "
361 "Data: x%x x%x x%x\n", phba
->brd_no
,
362 cmnd
->cmnd
[0], cmnd
->request_bufflen
,
363 cmnd
->resid
, cmnd
->underflow
);
365 host_status
= DID_ERROR
;
367 } else if (resp_info
& RESID_OVER
) {
368 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
369 "%d:0720 FCP command x%x residual "
370 "overrun error. Data: x%x x%x \n",
371 phba
->brd_no
, cmnd
->cmnd
[0],
372 cmnd
->request_bufflen
, cmnd
->resid
);
373 host_status
= DID_ERROR
;
376 * Check SLI validation that all the transfer was actually done
377 * (fcpi_parm should be zero). Apply check only to reads.
379 } else if ((scsi_status
== SAM_STAT_GOOD
) && fcpi_parm
&&
380 (cmnd
->sc_data_direction
== DMA_FROM_DEVICE
)) {
381 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
382 "%d:0734 FCP Read Check Error Data: "
383 "x%x x%x x%x x%x\n", phba
->brd_no
,
384 be32_to_cpu(fcpcmd
->fcpDl
),
385 be32_to_cpu(fcprsp
->rspResId
),
386 fcpi_parm
, cmnd
->cmnd
[0]);
387 host_status
= DID_ERROR
;
388 cmnd
->resid
= cmnd
->request_bufflen
;
392 cmnd
->result
= ScsiResult(host_status
, scsi_status
);
396 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
397 struct lpfc_iocbq
*pIocbOut
)
399 struct lpfc_scsi_buf
*lpfc_cmd
=
400 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
401 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
402 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
403 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
405 struct scsi_device
*sdev
, *tmp_sdev
;
408 lpfc_cmd
->result
= pIocbOut
->iocb
.un
.ulpWord
[4];
409 lpfc_cmd
->status
= pIocbOut
->iocb
.ulpStatus
;
411 if (lpfc_cmd
->status
) {
412 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
413 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
414 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
415 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
416 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
418 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
419 "%d:0729 FCP cmd x%x failed <%d/%d> status: "
420 "x%x result: x%x Data: x%x x%x\n",
421 phba
->brd_no
, cmd
->cmnd
[0], cmd
->device
->id
,
422 cmd
->device
->lun
, lpfc_cmd
->status
,
423 lpfc_cmd
->result
, pIocbOut
->iocb
.ulpContext
,
424 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
426 switch (lpfc_cmd
->status
) {
427 case IOSTAT_FCP_RSP_ERROR
:
428 /* Call FCP RSP handler to determine result */
429 lpfc_handle_fcp_err(lpfc_cmd
);
431 case IOSTAT_NPORT_BSY
:
432 case IOSTAT_FABRIC_BSY
:
433 cmd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
436 cmd
->result
= ScsiResult(DID_ERROR
, 0);
441 || (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
442 cmd
->result
= ScsiResult(DID_BUS_BUSY
, SAM_STAT_BUSY
);
444 cmd
->result
= ScsiResult(DID_OK
, 0);
447 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
448 uint32_t *lp
= (uint32_t *)cmd
->sense_buffer
;
450 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
451 "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
452 "SNS x%x x%x Data: x%x x%x\n",
453 phba
->brd_no
, cmd
->device
->id
,
454 cmd
->device
->lun
, cmd
, cmd
->result
,
455 *lp
, *(lp
+ 3), cmd
->retries
, cmd
->resid
);
458 result
= cmd
->result
;
462 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
463 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
464 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
468 if (!result
&& pnode
!= NULL
&&
469 ((jiffies
- pnode
->last_ramp_up_time
) >
470 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
471 ((jiffies
- pnode
->last_q_full_time
) >
472 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
473 (phba
->cfg_lun_queue_depth
> sdev
->queue_depth
)) {
474 shost_for_each_device(tmp_sdev
, sdev
->host
) {
475 if (phba
->cfg_lun_queue_depth
> tmp_sdev
->queue_depth
) {
476 if (tmp_sdev
->id
!= sdev
->id
)
478 if (tmp_sdev
->ordered_tags
)
479 scsi_adjust_queue_depth(tmp_sdev
,
481 tmp_sdev
->queue_depth
+1);
483 scsi_adjust_queue_depth(tmp_sdev
,
485 tmp_sdev
->queue_depth
+1);
487 pnode
->last_ramp_up_time
= jiffies
;
493 * Check for queue full. If the lun is reporting queue full, then
494 * back off the lun queue depth to prevent target overloads.
496 if (result
== SAM_STAT_TASK_SET_FULL
&& pnode
!= NULL
) {
497 pnode
->last_q_full_time
= jiffies
;
499 shost_for_each_device(tmp_sdev
, sdev
->host
) {
500 if (tmp_sdev
->id
!= sdev
->id
)
502 depth
= scsi_track_queue_full(tmp_sdev
,
503 tmp_sdev
->queue_depth
- 1);
506 * The queue depth cannot be lowered any more.
507 * Modify the returned error code to store
508 * the final depth value set by
509 * scsi_track_queue_full.
512 depth
= sdev
->host
->cmd_per_lun
;
515 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
516 "%d:0711 detected queue full - lun queue depth "
517 " adjusted to %d.\n", phba
->brd_no
, depth
);
521 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
522 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
526 lpfc_scsi_prep_cmnd(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* lpfc_cmd
,
527 struct lpfc_nodelist
*pnode
)
529 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
530 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
531 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
532 struct lpfc_iocbq
*piocbq
= &(lpfc_cmd
->cur_iocbq
);
533 int datadir
= scsi_cmnd
->sc_data_direction
;
535 lpfc_cmd
->fcp_rsp
->rspSnsLen
= 0;
536 /* clear task management bits */
537 lpfc_cmd
->fcp_cmnd
->fcpCntl2
= 0;
539 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
540 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
542 memcpy(&fcp_cmnd
->fcpCdb
[0], scsi_cmnd
->cmnd
, 16);
544 if (scsi_cmnd
->device
->tagged_supported
) {
545 switch (scsi_cmnd
->tag
) {
546 case HEAD_OF_QUEUE_TAG
:
547 fcp_cmnd
->fcpCntl1
= HEAD_OF_Q
;
549 case ORDERED_QUEUE_TAG
:
550 fcp_cmnd
->fcpCntl1
= ORDERED_Q
;
553 fcp_cmnd
->fcpCntl1
= SIMPLE_Q
;
557 fcp_cmnd
->fcpCntl1
= 0;
560 * There are three possibilities here - use scatter-gather segment, use
561 * the single mapping, or neither. Start the lpfc command prep by
562 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
565 if (scsi_cmnd
->use_sg
) {
566 if (datadir
== DMA_TO_DEVICE
) {
567 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
568 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
570 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
571 phba
->fc4OutputRequests
++;
573 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
574 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
575 iocb_cmd
->un
.fcpi
.fcpi_parm
=
576 scsi_cmnd
->request_bufflen
;
577 fcp_cmnd
->fcpCntl3
= READ_DATA
;
578 phba
->fc4InputRequests
++;
580 } else if (scsi_cmnd
->request_buffer
&& scsi_cmnd
->request_bufflen
) {
581 if (datadir
== DMA_TO_DEVICE
) {
582 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
583 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
585 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
586 phba
->fc4OutputRequests
++;
588 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
589 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
590 iocb_cmd
->un
.fcpi
.fcpi_parm
=
591 scsi_cmnd
->request_bufflen
;
592 fcp_cmnd
->fcpCntl3
= READ_DATA
;
593 phba
->fc4InputRequests
++;
596 iocb_cmd
->ulpCommand
= CMD_FCP_ICMND64_CR
;
597 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
599 fcp_cmnd
->fcpCntl3
= 0;
600 phba
->fc4ControlRequests
++;
604 * Finish initializing those IOCB fields that are independent
605 * of the scsi_cmnd request_buffer
607 piocbq
->iocb
.ulpContext
= pnode
->nlp_rpi
;
608 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
609 piocbq
->iocb
.ulpFCP2Rcvy
= 1;
611 piocbq
->iocb
.ulpClass
= (pnode
->nlp_fcp_info
& 0x0f);
612 piocbq
->context1
= lpfc_cmd
;
613 piocbq
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
614 piocbq
->iocb
.ulpTimeout
= lpfc_cmd
->timeout
;
618 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba
*phba
,
619 struct lpfc_scsi_buf
*lpfc_cmd
,
621 uint8_t task_mgmt_cmd
)
623 struct lpfc_sli
*psli
;
624 struct lpfc_iocbq
*piocbq
;
626 struct fcp_cmnd
*fcp_cmnd
;
627 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
628 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
630 if ((ndlp
== NULL
) || (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)) {
635 piocbq
= &(lpfc_cmd
->cur_iocbq
);
636 piocb
= &piocbq
->iocb
;
638 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
639 int_to_scsilun(lun
, &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
640 fcp_cmnd
->fcpCntl2
= task_mgmt_cmd
;
642 piocb
->ulpCommand
= CMD_FCP_ICMND64_CR
;
644 piocb
->ulpContext
= ndlp
->nlp_rpi
;
645 if (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) {
646 piocb
->ulpFCP2Rcvy
= 1;
648 piocb
->ulpClass
= (ndlp
->nlp_fcp_info
& 0x0f);
650 /* ulpTimeout is only one byte */
651 if (lpfc_cmd
->timeout
> 0xff) {
653 * Do not timeout the command at the firmware level.
654 * The driver will provide the timeout mechanism.
656 piocb
->ulpTimeout
= 0;
658 piocb
->ulpTimeout
= lpfc_cmd
->timeout
;
665 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf
* lpfc_cmd
, struct lpfc_hba
* phba
,
666 unsigned tgt_id
, unsigned int lun
,
667 struct lpfc_rport_data
*rdata
)
669 struct lpfc_iocbq
*iocbq
;
670 struct lpfc_iocbq
*iocbqrsp
;
673 lpfc_cmd
->rdata
= rdata
;
674 ret
= lpfc_scsi_prep_task_mgmt_cmd(phba
, lpfc_cmd
, lun
,
679 lpfc_cmd
->scsi_hba
= phba
;
680 iocbq
= &lpfc_cmd
->cur_iocbq
;
681 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
686 /* Issue Target Reset to TGT <num> */
687 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
688 "%d:0702 Issue Target Reset to TGT %d "
690 phba
->brd_no
, tgt_id
, rdata
->pnode
->nlp_rpi
,
691 rdata
->pnode
->nlp_flag
);
693 ret
= lpfc_sli_issue_iocb_wait(phba
,
694 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
695 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
696 if (ret
!= IOCB_SUCCESS
) {
697 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
701 lpfc_cmd
->result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
702 lpfc_cmd
->status
= iocbqrsp
->iocb
.ulpStatus
;
703 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
704 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
705 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
708 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
713 lpfc_info(struct Scsi_Host
*host
)
715 struct lpfc_hba
*phba
= (struct lpfc_hba
*) host
->hostdata
;
717 static char lpfcinfobuf
[384];
719 memset(lpfcinfobuf
,0,384);
720 if (phba
&& phba
->pcidev
){
721 strncpy(lpfcinfobuf
, phba
->ModelDesc
, 256);
722 len
= strlen(lpfcinfobuf
);
723 snprintf(lpfcinfobuf
+ len
,
725 " on PCI bus %02x device %02x irq %d",
726 phba
->pcidev
->bus
->number
,
729 len
= strlen(lpfcinfobuf
);
731 snprintf(lpfcinfobuf
+ len
,
740 static __inline__
void lpfc_poll_rearm_timer(struct lpfc_hba
* phba
)
742 unsigned long poll_tmo_expires
=
743 (jiffies
+ msecs_to_jiffies(phba
->cfg_poll_tmo
));
745 if (phba
->sli
.ring
[LPFC_FCP_RING
].txcmplq_cnt
)
746 mod_timer(&phba
->fcp_poll_timer
,
750 void lpfc_poll_start_timer(struct lpfc_hba
* phba
)
752 lpfc_poll_rearm_timer(phba
);
755 void lpfc_poll_timeout(unsigned long ptr
)
757 struct lpfc_hba
*phba
= (struct lpfc_hba
*)ptr
;
760 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
762 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
763 lpfc_sli_poll_fcp_ring (phba
);
764 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
765 lpfc_poll_rearm_timer(phba
);
768 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
772 lpfc_queuecommand(struct scsi_cmnd
*cmnd
, void (*done
) (struct scsi_cmnd
*))
774 struct lpfc_hba
*phba
=
775 (struct lpfc_hba
*) cmnd
->device
->host
->hostdata
;
776 struct lpfc_sli
*psli
= &phba
->sli
;
777 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
778 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
779 struct lpfc_scsi_buf
*lpfc_cmd
;
780 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
783 err
= fc_remote_port_chkready(rport
);
786 goto out_fail_command
;
790 * Catch race where our node has transitioned, but the
791 * transport is still transitioning.
794 cmnd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
795 goto out_fail_command
;
797 lpfc_cmd
= lpfc_get_scsi_buf (phba
);
798 if (lpfc_cmd
== NULL
) {
799 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
800 "%d:0707 driver's buffer pool is empty, "
801 "IO busied\n", phba
->brd_no
);
806 * Store the midlayer's command structure for the completion phase
807 * and complete the command initialization.
809 lpfc_cmd
->pCmd
= cmnd
;
810 lpfc_cmd
->rdata
= rdata
;
811 lpfc_cmd
->timeout
= 0;
812 cmnd
->host_scribble
= (unsigned char *)lpfc_cmd
;
813 cmnd
->scsi_done
= done
;
815 err
= lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
817 goto out_host_busy_free_buf
;
819 lpfc_scsi_prep_cmnd(phba
, lpfc_cmd
, ndlp
);
821 err
= lpfc_sli_issue_iocb(phba
, &phba
->sli
.ring
[psli
->fcp_ring
],
822 &lpfc_cmd
->cur_iocbq
, SLI_IOCB_RET_IOCB
);
824 goto out_host_busy_free_buf
;
826 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
827 lpfc_sli_poll_fcp_ring(phba
);
828 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
829 lpfc_poll_rearm_timer(phba
);
834 out_host_busy_free_buf
:
835 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
836 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
838 return SCSI_MLQUEUE_HOST_BUSY
;
846 lpfc_block_error_handler(struct scsi_cmnd
*cmnd
)
848 struct Scsi_Host
*shost
= cmnd
->device
->host
;
849 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
851 spin_lock_irq(shost
->host_lock
);
852 while (rport
->port_state
== FC_PORTSTATE_BLOCKED
) {
853 spin_unlock_irq(shost
->host_lock
);
855 spin_lock_irq(shost
->host_lock
);
857 spin_unlock_irq(shost
->host_lock
);
862 lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
864 struct Scsi_Host
*shost
= cmnd
->device
->host
;
865 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
;
866 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[phba
->sli
.fcp_ring
];
867 struct lpfc_iocbq
*iocb
;
868 struct lpfc_iocbq
*abtsiocb
;
869 struct lpfc_scsi_buf
*lpfc_cmd
;
871 unsigned int loop_count
= 0;
874 lpfc_block_error_handler(cmnd
);
875 spin_lock_irq(shost
->host_lock
);
877 lpfc_cmd
= (struct lpfc_scsi_buf
*)cmnd
->host_scribble
;
881 * If pCmd field of the corresponding lpfc_scsi_buf structure
882 * points to a different SCSI command, then the driver has
883 * already completed this command, but the midlayer did not
884 * see the completion before the eh fired. Just return
887 iocb
= &lpfc_cmd
->cur_iocbq
;
888 if (lpfc_cmd
->pCmd
!= cmnd
)
891 BUG_ON(iocb
->context1
!= lpfc_cmd
);
893 abtsiocb
= lpfc_sli_get_iocbq(phba
);
894 if (abtsiocb
== NULL
) {
900 * The scsi command can not be in txq and it is in flight because the
901 * pCmd is still pointig at the SCSI command we have to abort. There
902 * is no need to search the txcmplq. Just send an abort to the FW.
906 icmd
= &abtsiocb
->iocb
;
907 icmd
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
908 icmd
->un
.acxri
.abortContextTag
= cmd
->ulpContext
;
909 icmd
->un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
912 icmd
->ulpClass
= cmd
->ulpClass
;
913 if (phba
->hba_state
>= LPFC_LINK_UP
)
914 icmd
->ulpCommand
= CMD_ABORT_XRI_CN
;
916 icmd
->ulpCommand
= CMD_CLOSE_XRI_CN
;
918 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
919 if (lpfc_sli_issue_iocb(phba
, pring
, abtsiocb
, 0) == IOCB_ERROR
) {
920 lpfc_sli_release_iocbq(phba
, abtsiocb
);
925 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
926 lpfc_sli_poll_fcp_ring (phba
);
928 /* Wait for abort to complete */
929 while (lpfc_cmd
->pCmd
== cmnd
)
931 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
932 lpfc_sli_poll_fcp_ring (phba
);
934 spin_unlock_irq(phba
->host
->host_lock
);
935 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT
*HZ
);
936 spin_lock_irq(phba
->host
->host_lock
);
938 > (2 * phba
->cfg_devloss_tmo
)/LPFC_ABORT_WAIT
)
942 if (lpfc_cmd
->pCmd
== cmnd
) {
944 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
945 "%d:0748 abort handler timed out waiting for "
946 "abort to complete: ret %#x, ID %d, LUN %d, "
948 phba
->brd_no
, ret
, cmnd
->device
->id
,
949 cmnd
->device
->lun
, cmnd
->serial_number
);
953 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
954 "%d:0749 SCSI Layer I/O Abort Request "
955 "Status x%x ID %d LUN %d snum %#lx\n",
956 phba
->brd_no
, ret
, cmnd
->device
->id
,
957 cmnd
->device
->lun
, cmnd
->serial_number
);
959 spin_unlock_irq(shost
->host_lock
);
965 lpfc_reset_lun_handler(struct scsi_cmnd
*cmnd
)
967 struct Scsi_Host
*shost
= cmnd
->device
->host
;
968 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
;
969 struct lpfc_scsi_buf
*lpfc_cmd
;
970 struct lpfc_iocbq
*iocbq
, *iocbqrsp
;
971 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
972 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
973 uint32_t cmd_result
= 0, cmd_status
= 0;
977 lpfc_block_error_handler(cmnd
);
978 spin_lock_irq(shost
->host_lock
);
980 * If target is not in a MAPPED state, delay the reset until
981 * target is rediscovered or devloss timeout expires.
987 if (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
) {
988 spin_unlock_irq(phba
->host
->host_lock
);
989 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
990 spin_lock_irq(phba
->host
->host_lock
);
992 if ((pnode
) && (pnode
->nlp_state
== NLP_STE_MAPPED_NODE
))
996 lpfc_cmd
= lpfc_get_scsi_buf (phba
);
997 if (lpfc_cmd
== NULL
)
1000 lpfc_cmd
->timeout
= 60;
1001 lpfc_cmd
->scsi_hba
= phba
;
1002 lpfc_cmd
->rdata
= rdata
;
1004 ret
= lpfc_scsi_prep_task_mgmt_cmd(phba
, lpfc_cmd
, cmnd
->device
->lun
,
1007 goto out_free_scsi_buf
;
1009 iocbq
= &lpfc_cmd
->cur_iocbq
;
1011 /* get a buffer for this IOCB command response */
1012 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
1013 if (iocbqrsp
== NULL
)
1014 goto out_free_scsi_buf
;
1016 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
1017 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
1018 "Data: x%x x%x\n", phba
->brd_no
, cmnd
->device
->id
,
1019 cmnd
->device
->lun
, pnode
->nlp_rpi
, pnode
->nlp_flag
);
1021 ret
= lpfc_sli_issue_iocb_wait(phba
,
1022 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1023 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
1024 if (ret
== IOCB_SUCCESS
)
1028 cmd_result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
1029 cmd_status
= iocbqrsp
->iocb
.ulpStatus
;
1031 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
1034 * All outstanding txcmplq I/Os should have been aborted by the device.
1035 * Unfortunately, some targets do not abide by this forcing the driver
1038 cnt
= lpfc_sli_sum_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1039 cmnd
->device
->id
, cmnd
->device
->lun
,
1042 lpfc_sli_abort_iocb(phba
,
1043 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1044 cmnd
->device
->id
, cmnd
->device
->lun
,
1048 spin_unlock_irq(phba
->host
->host_lock
);
1049 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1050 spin_lock_irq(phba
->host
->host_lock
);
1053 > (2 * phba
->cfg_devloss_tmo
)/LPFC_RESET_WAIT
)
1056 cnt
= lpfc_sli_sum_iocb(phba
,
1057 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1058 cmnd
->device
->id
, cmnd
->device
->lun
,
1063 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1064 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
1070 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1072 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1073 "%d:0713 SCSI layer issued LUN reset (%d, %d) "
1074 "Data: x%x x%x x%x\n",
1075 phba
->brd_no
, cmnd
->device
->id
,cmnd
->device
->lun
,
1076 ret
, cmd_status
, cmd_result
);
1079 spin_unlock_irq(shost
->host_lock
);
1084 lpfc_reset_bus_handler(struct scsi_cmnd
*cmnd
)
1086 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1087 struct lpfc_hba
*phba
= (struct lpfc_hba
*)shost
->hostdata
;
1088 struct lpfc_nodelist
*ndlp
= NULL
;
1090 int ret
= FAILED
, i
, err_count
= 0;
1092 struct lpfc_scsi_buf
* lpfc_cmd
;
1094 lpfc_block_error_handler(cmnd
);
1095 spin_lock_irq(shost
->host_lock
);
1097 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
1098 if (lpfc_cmd
== NULL
)
1101 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1102 lpfc_cmd
->timeout
= 60;
1103 lpfc_cmd
->scsi_hba
= phba
;
1106 * Since the driver manages a single bus device, reset all
1107 * targets known to the driver. Should any target reset
1108 * fail, this routine returns failure to the midlayer.
1110 for (i
= 0; i
< LPFC_MAX_TARGET
; i
++) {
1111 /* Search the mapped list for this target ID */
1113 list_for_each_entry(ndlp
, &phba
->fc_nlpmap_list
, nlp_listp
) {
1114 if ((i
== ndlp
->nlp_sid
) && ndlp
->rport
) {
1122 ret
= lpfc_scsi_tgt_reset(lpfc_cmd
, phba
, i
, cmnd
->device
->lun
,
1123 ndlp
->rport
->dd_data
);
1124 if (ret
!= SUCCESS
) {
1125 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1126 "%d:0700 Bus Reset on target %d failed\n",
1135 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1138 * All outstanding txcmplq I/Os should have been aborted by
1139 * the targets. Unfortunately, some targets do not abide by
1140 * this forcing the driver to double check.
1142 cnt
= lpfc_sli_sum_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1143 0, 0, LPFC_CTX_HOST
);
1145 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1146 0, 0, 0, LPFC_CTX_HOST
);
1149 spin_unlock_irq(phba
->host
->host_lock
);
1150 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1151 spin_lock_irq(phba
->host
->host_lock
);
1154 > (2 * phba
->cfg_devloss_tmo
)/LPFC_RESET_WAIT
)
1157 cnt
= lpfc_sli_sum_iocb(phba
,
1158 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1159 0, 0, LPFC_CTX_HOST
);
1163 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1164 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
1165 phba
->brd_no
, cnt
, i
);
1169 lpfc_printf_log(phba
,
1172 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1175 spin_unlock_irq(shost
->host_lock
);
1180 lpfc_slave_alloc(struct scsi_device
*sdev
)
1182 struct lpfc_hba
*phba
= (struct lpfc_hba
*)sdev
->host
->hostdata
;
1183 struct lpfc_scsi_buf
*scsi_buf
= NULL
;
1184 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
1185 uint32_t total
= 0, i
;
1186 uint32_t num_to_alloc
= 0;
1187 unsigned long flags
;
1189 if (!rport
|| fc_remote_port_chkready(rport
))
1192 sdev
->hostdata
= rport
->dd_data
;
1195 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1196 * available list of scsi buffers. Don't allocate more than the
1197 * HBA limit conveyed to the midlayer via the host structure. The
1198 * formula accounts for the lun_queue_depth + error handlers + 1
1199 * extra. This list of scsi bufs exists for the lifetime of the driver.
1201 total
= phba
->total_scsi_bufs
;
1202 num_to_alloc
= phba
->cfg_lun_queue_depth
+ 2;
1203 if (total
>= phba
->cfg_hba_queue_depth
) {
1204 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
1205 "%d:0704 At limitation of %d preallocated "
1206 "command buffers\n", phba
->brd_no
, total
);
1208 } else if (total
+ num_to_alloc
> phba
->cfg_hba_queue_depth
) {
1209 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
1210 "%d:0705 Allocation request of %d command "
1211 "buffers will exceed max of %d. Reducing "
1212 "allocation request to %d.\n", phba
->brd_no
,
1213 num_to_alloc
, phba
->cfg_hba_queue_depth
,
1214 (phba
->cfg_hba_queue_depth
- total
));
1215 num_to_alloc
= phba
->cfg_hba_queue_depth
- total
;
1218 for (i
= 0; i
< num_to_alloc
; i
++) {
1219 scsi_buf
= lpfc_new_scsi_buf(phba
);
1221 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1222 "%d:0706 Failed to allocate command "
1223 "buffer\n", phba
->brd_no
);
1227 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, flags
);
1228 phba
->total_scsi_bufs
++;
1229 list_add_tail(&scsi_buf
->list
, &phba
->lpfc_scsi_buf_list
);
1230 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, flags
);
1236 lpfc_slave_configure(struct scsi_device
*sdev
)
1238 struct lpfc_hba
*phba
= (struct lpfc_hba
*) sdev
->host
->hostdata
;
1239 struct fc_rport
*rport
= starget_to_rport(sdev
->sdev_target
);
1241 if (sdev
->tagged_supported
)
1242 scsi_activate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1244 scsi_deactivate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1247 * Initialize the fc transport attributes for the target
1248 * containing this scsi device. Also note that the driver's
1249 * target pointer is stored in the starget_data for the
1250 * driver's sysfs entry point functions.
1252 rport
->dev_loss_tmo
= phba
->cfg_devloss_tmo
;
1254 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
1255 lpfc_sli_poll_fcp_ring(phba
);
1256 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1257 lpfc_poll_rearm_timer(phba
);
1264 lpfc_slave_destroy(struct scsi_device
*sdev
)
1266 sdev
->hostdata
= NULL
;
1270 struct scsi_host_template lpfc_template
= {
1271 .module
= THIS_MODULE
,
1272 .name
= LPFC_DRIVER_NAME
,
1274 .queuecommand
= lpfc_queuecommand
,
1275 .eh_abort_handler
= lpfc_abort_handler
,
1276 .eh_device_reset_handler
= lpfc_reset_lun_handler
,
1277 .eh_bus_reset_handler
= lpfc_reset_bus_handler
,
1278 .slave_alloc
= lpfc_slave_alloc
,
1279 .slave_configure
= lpfc_slave_configure
,
1280 .slave_destroy
= lpfc_slave_destroy
,
1282 .sg_tablesize
= LPFC_SG_SEG_CNT
,
1283 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
1284 .use_clustering
= ENABLE_CLUSTERING
,
1285 .shost_attrs
= lpfc_host_attrs
,
1286 .max_sectors
= 0xFFFF,