1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/fc/fc_fs.h>
34 #include <linux/aer.h>
39 #include "lpfc_sli4.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_compat.h"
47 #include "lpfc_debugfs.h"
48 #include "lpfc_vport.h"
50 /* There are only four IOCB completion types. */
51 typedef enum _lpfc_iocb_type
{
59 /* Provide function prototypes local to this module. */
60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba
*, LPFC_MBOXQ_t
*,
62 static int lpfc_sli4_read_rev(struct lpfc_hba
*, LPFC_MBOXQ_t
*,
63 uint8_t *, uint32_t *);
64 static struct lpfc_iocbq
*lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba
*,
66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport
*,
69 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq
*iocbq
)
75 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
76 * @q: The Work Queue to operate on.
77 * @wqe: The work Queue Entry to put on the Work queue.
79 * This routine will copy the contents of @wqe to the next available entry on
80 * the @q. This function will then ring the Work Queue Doorbell to signal the
81 * HBA to start processing the Work Queue Entry. This function returns 0 if
82 * successful. If no entries are available on @q then this function will return
84 * The caller is expected to hold the hbalock when calling this routine.
87 lpfc_sli4_wq_put(struct lpfc_queue
*q
, union lpfc_wqe
*wqe
)
89 union lpfc_wqe
*temp_wqe
= q
->qe
[q
->host_index
].wqe
;
90 struct lpfc_register doorbell
;
93 /* If the host has not yet processed the next entry then we are done */
94 if (((q
->host_index
+ 1) % q
->entry_count
) == q
->hba_index
)
96 /* set consumption flag every once in a while */
97 if (!((q
->host_index
+ 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL
))
98 bf_set(wqe_wqec
, &wqe
->generic
.wqe_com
, 1);
100 lpfc_sli_pcimem_bcopy(wqe
, temp_wqe
, q
->entry_size
);
102 /* Update the host index before invoking device */
103 host_index
= q
->host_index
;
104 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
108 bf_set(lpfc_wq_doorbell_num_posted
, &doorbell
, 1);
109 bf_set(lpfc_wq_doorbell_index
, &doorbell
, host_index
);
110 bf_set(lpfc_wq_doorbell_id
, &doorbell
, q
->queue_id
);
111 writel(doorbell
.word0
, q
->phba
->sli4_hba
.WQDBregaddr
);
112 readl(q
->phba
->sli4_hba
.WQDBregaddr
); /* Flush */
118 * lpfc_sli4_wq_release - Updates internal hba index for WQ
119 * @q: The Work Queue to operate on.
120 * @index: The index to advance the hba index to.
122 * This routine will update the HBA index of a queue to reflect consumption of
123 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
124 * an entry the host calls this function to update the queue's internal
125 * pointers. This routine returns the number of entries that were consumed by
129 lpfc_sli4_wq_release(struct lpfc_queue
*q
, uint32_t index
)
131 uint32_t released
= 0;
133 if (q
->hba_index
== index
)
136 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
138 } while (q
->hba_index
!= index
);
143 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
144 * @q: The Mailbox Queue to operate on.
145 * @wqe: The Mailbox Queue Entry to put on the Work queue.
147 * This routine will copy the contents of @mqe to the next available entry on
148 * the @q. This function will then ring the Work Queue Doorbell to signal the
149 * HBA to start processing the Work Queue Entry. This function returns 0 if
150 * successful. If no entries are available on @q then this function will return
152 * The caller is expected to hold the hbalock when calling this routine.
155 lpfc_sli4_mq_put(struct lpfc_queue
*q
, struct lpfc_mqe
*mqe
)
157 struct lpfc_mqe
*temp_mqe
= q
->qe
[q
->host_index
].mqe
;
158 struct lpfc_register doorbell
;
161 /* If the host has not yet processed the next entry then we are done */
162 if (((q
->host_index
+ 1) % q
->entry_count
) == q
->hba_index
)
164 lpfc_sli_pcimem_bcopy(mqe
, temp_mqe
, q
->entry_size
);
165 /* Save off the mailbox pointer for completion */
166 q
->phba
->mbox
= (MAILBOX_t
*)temp_mqe
;
168 /* Update the host index before invoking device */
169 host_index
= q
->host_index
;
170 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
174 bf_set(lpfc_mq_doorbell_num_posted
, &doorbell
, 1);
175 bf_set(lpfc_mq_doorbell_id
, &doorbell
, q
->queue_id
);
176 writel(doorbell
.word0
, q
->phba
->sli4_hba
.MQDBregaddr
);
177 readl(q
->phba
->sli4_hba
.MQDBregaddr
); /* Flush */
182 * lpfc_sli4_mq_release - Updates internal hba index for MQ
183 * @q: The Mailbox Queue to operate on.
185 * This routine will update the HBA index of a queue to reflect consumption of
186 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
187 * an entry the host calls this function to update the queue's internal
188 * pointers. This routine returns the number of entries that were consumed by
192 lpfc_sli4_mq_release(struct lpfc_queue
*q
)
194 /* Clear the mailbox pointer for completion */
195 q
->phba
->mbox
= NULL
;
196 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
201 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
202 * @q: The Event Queue to get the first valid EQE from
204 * This routine will get the first valid Event Queue Entry from @q, update
205 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
206 * the Queue (no more work to do), or the Queue is full of EQEs that have been
207 * processed, but not popped back to the HBA then this routine will return NULL.
209 static struct lpfc_eqe
*
210 lpfc_sli4_eq_get(struct lpfc_queue
*q
)
212 struct lpfc_eqe
*eqe
= q
->qe
[q
->hba_index
].eqe
;
214 /* If the next EQE is not valid then we are done */
215 if (!bf_get_le32(lpfc_eqe_valid
, eqe
))
217 /* If the host has not yet processed the next entry then we are done */
218 if (((q
->hba_index
+ 1) % q
->entry_count
) == q
->host_index
)
221 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
226 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
227 * @q: The Event Queue that the host has completed processing for.
228 * @arm: Indicates whether the host wants to arms this CQ.
230 * This routine will mark all Event Queue Entries on @q, from the last
231 * known completed entry to the last entry that was processed, as completed
232 * by clearing the valid bit for each completion queue entry. Then it will
233 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
234 * The internal host index in the @q will be updated by this routine to indicate
235 * that the host has finished processing the entries. The @arm parameter
236 * indicates that the queue should be rearmed when ringing the doorbell.
238 * This function will return the number of EQEs that were popped.
241 lpfc_sli4_eq_release(struct lpfc_queue
*q
, bool arm
)
243 uint32_t released
= 0;
244 struct lpfc_eqe
*temp_eqe
;
245 struct lpfc_register doorbell
;
247 /* while there are valid entries */
248 while (q
->hba_index
!= q
->host_index
) {
249 temp_eqe
= q
->qe
[q
->host_index
].eqe
;
250 bf_set_le32(lpfc_eqe_valid
, temp_eqe
, 0);
252 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
254 if (unlikely(released
== 0 && !arm
))
257 /* ring doorbell for number popped */
260 bf_set(lpfc_eqcq_doorbell_arm
, &doorbell
, 1);
261 bf_set(lpfc_eqcq_doorbell_eqci
, &doorbell
, 1);
263 bf_set(lpfc_eqcq_doorbell_num_released
, &doorbell
, released
);
264 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_EVENT
);
265 bf_set(lpfc_eqcq_doorbell_eqid
, &doorbell
, q
->queue_id
);
266 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQCQDBregaddr
);
267 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
268 if ((q
->phba
->intr_type
== INTx
) && (arm
== LPFC_QUEUE_REARM
))
269 readl(q
->phba
->sli4_hba
.EQCQDBregaddr
);
274 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
275 * @q: The Completion Queue to get the first valid CQE from
277 * This routine will get the first valid Completion Queue Entry from @q, update
278 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
279 * the Queue (no more work to do), or the Queue is full of CQEs that have been
280 * processed, but not popped back to the HBA then this routine will return NULL.
282 static struct lpfc_cqe
*
283 lpfc_sli4_cq_get(struct lpfc_queue
*q
)
285 struct lpfc_cqe
*cqe
;
287 /* If the next CQE is not valid then we are done */
288 if (!bf_get_le32(lpfc_cqe_valid
, q
->qe
[q
->hba_index
].cqe
))
290 /* If the host has not yet processed the next entry then we are done */
291 if (((q
->hba_index
+ 1) % q
->entry_count
) == q
->host_index
)
294 cqe
= q
->qe
[q
->hba_index
].cqe
;
295 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
300 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
301 * @q: The Completion Queue that the host has completed processing for.
302 * @arm: Indicates whether the host wants to arms this CQ.
304 * This routine will mark all Completion queue entries on @q, from the last
305 * known completed entry to the last entry that was processed, as completed
306 * by clearing the valid bit for each completion queue entry. Then it will
307 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
308 * The internal host index in the @q will be updated by this routine to indicate
309 * that the host has finished processing the entries. The @arm parameter
310 * indicates that the queue should be rearmed when ringing the doorbell.
312 * This function will return the number of CQEs that were released.
315 lpfc_sli4_cq_release(struct lpfc_queue
*q
, bool arm
)
317 uint32_t released
= 0;
318 struct lpfc_cqe
*temp_qe
;
319 struct lpfc_register doorbell
;
321 /* while there are valid entries */
322 while (q
->hba_index
!= q
->host_index
) {
323 temp_qe
= q
->qe
[q
->host_index
].cqe
;
324 bf_set_le32(lpfc_cqe_valid
, temp_qe
, 0);
326 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
328 if (unlikely(released
== 0 && !arm
))
331 /* ring doorbell for number popped */
334 bf_set(lpfc_eqcq_doorbell_arm
, &doorbell
, 1);
335 bf_set(lpfc_eqcq_doorbell_num_released
, &doorbell
, released
);
336 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_COMPLETION
);
337 bf_set(lpfc_eqcq_doorbell_cqid
, &doorbell
, q
->queue_id
);
338 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQCQDBregaddr
);
343 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
344 * @q: The Header Receive Queue to operate on.
345 * @wqe: The Receive Queue Entry to put on the Receive queue.
347 * This routine will copy the contents of @wqe to the next available entry on
348 * the @q. This function will then ring the Receive Queue Doorbell to signal the
349 * HBA to start processing the Receive Queue Entry. This function returns the
350 * index that the rqe was copied to if successful. If no entries are available
351 * on @q then this function will return -ENOMEM.
352 * The caller is expected to hold the hbalock when calling this routine.
355 lpfc_sli4_rq_put(struct lpfc_queue
*hq
, struct lpfc_queue
*dq
,
356 struct lpfc_rqe
*hrqe
, struct lpfc_rqe
*drqe
)
358 struct lpfc_rqe
*temp_hrqe
= hq
->qe
[hq
->host_index
].rqe
;
359 struct lpfc_rqe
*temp_drqe
= dq
->qe
[dq
->host_index
].rqe
;
360 struct lpfc_register doorbell
;
361 int put_index
= hq
->host_index
;
363 if (hq
->type
!= LPFC_HRQ
|| dq
->type
!= LPFC_DRQ
)
365 if (hq
->host_index
!= dq
->host_index
)
367 /* If the host has not yet processed the next entry then we are done */
368 if (((hq
->host_index
+ 1) % hq
->entry_count
) == hq
->hba_index
)
370 lpfc_sli_pcimem_bcopy(hrqe
, temp_hrqe
, hq
->entry_size
);
371 lpfc_sli_pcimem_bcopy(drqe
, temp_drqe
, dq
->entry_size
);
373 /* Update the host index to point to the next slot */
374 hq
->host_index
= ((hq
->host_index
+ 1) % hq
->entry_count
);
375 dq
->host_index
= ((dq
->host_index
+ 1) % dq
->entry_count
);
377 /* Ring The Header Receive Queue Doorbell */
378 if (!(hq
->host_index
% LPFC_RQ_POST_BATCH
)) {
380 bf_set(lpfc_rq_doorbell_num_posted
, &doorbell
,
382 bf_set(lpfc_rq_doorbell_id
, &doorbell
, hq
->queue_id
);
383 writel(doorbell
.word0
, hq
->phba
->sli4_hba
.RQDBregaddr
);
389 * lpfc_sli4_rq_release - Updates internal hba index for RQ
390 * @q: The Header Receive Queue to operate on.
392 * This routine will update the HBA index of a queue to reflect consumption of
393 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
394 * consumed an entry the host calls this function to update the queue's
395 * internal pointers. This routine returns the number of entries that were
396 * consumed by the HBA.
399 lpfc_sli4_rq_release(struct lpfc_queue
*hq
, struct lpfc_queue
*dq
)
401 if ((hq
->type
!= LPFC_HRQ
) || (dq
->type
!= LPFC_DRQ
))
403 hq
->hba_index
= ((hq
->hba_index
+ 1) % hq
->entry_count
);
404 dq
->hba_index
= ((dq
->hba_index
+ 1) % dq
->entry_count
);
409 * lpfc_cmd_iocb - Get next command iocb entry in the ring
410 * @phba: Pointer to HBA context object.
411 * @pring: Pointer to driver SLI ring object.
413 * This function returns pointer to next command iocb entry
414 * in the command ring. The caller must hold hbalock to prevent
415 * other threads consume the next command iocb.
416 * SLI-2/SLI-3 provide different sized iocbs.
418 static inline IOCB_t
*
419 lpfc_cmd_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
421 return (IOCB_t
*) (((char *) pring
->cmdringaddr
) +
422 pring
->cmdidx
* phba
->iocb_cmd_size
);
426 * lpfc_resp_iocb - Get next response iocb entry in the ring
427 * @phba: Pointer to HBA context object.
428 * @pring: Pointer to driver SLI ring object.
430 * This function returns pointer to next response iocb entry
431 * in the response ring. The caller must hold hbalock to make sure
432 * that no other thread consume the next response iocb.
433 * SLI-2/SLI-3 provide different sized iocbs.
435 static inline IOCB_t
*
436 lpfc_resp_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
438 return (IOCB_t
*) (((char *) pring
->rspringaddr
) +
439 pring
->rspidx
* phba
->iocb_rsp_size
);
443 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
444 * @phba: Pointer to HBA context object.
446 * This function is called with hbalock held. This function
447 * allocates a new driver iocb object from the iocb pool. If the
448 * allocation is successful, it returns pointer to the newly
449 * allocated iocb object else it returns NULL.
451 static struct lpfc_iocbq
*
452 __lpfc_sli_get_iocbq(struct lpfc_hba
*phba
)
454 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
455 struct lpfc_iocbq
* iocbq
= NULL
;
457 list_remove_head(lpfc_iocb_list
, iocbq
, struct lpfc_iocbq
, list
);
461 if (phba
->iocb_cnt
> phba
->iocb_max
)
462 phba
->iocb_max
= phba
->iocb_cnt
;
467 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
468 * @phba: Pointer to HBA context object.
469 * @xritag: XRI value.
471 * This function clears the sglq pointer from the array of acive
472 * sglq's. The xritag that is passed in is used to index into the
473 * array. Before the xritag can be used it needs to be adjusted
474 * by subtracting the xribase.
476 * Returns sglq ponter = success, NULL = Failure.
478 static struct lpfc_sglq
*
479 __lpfc_clear_active_sglq(struct lpfc_hba
*phba
, uint16_t xritag
)
482 struct lpfc_sglq
*sglq
;
483 adj_xri
= xritag
- phba
->sli4_hba
.max_cfg_param
.xri_base
;
484 if (adj_xri
> phba
->sli4_hba
.max_cfg_param
.max_xri
)
486 sglq
= phba
->sli4_hba
.lpfc_sglq_active_list
[adj_xri
];
487 phba
->sli4_hba
.lpfc_sglq_active_list
[adj_xri
] = NULL
;
492 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
493 * @phba: Pointer to HBA context object.
494 * @xritag: XRI value.
496 * This function returns the sglq pointer from the array of acive
497 * sglq's. The xritag that is passed in is used to index into the
498 * array. Before the xritag can be used it needs to be adjusted
499 * by subtracting the xribase.
501 * Returns sglq ponter = success, NULL = Failure.
504 __lpfc_get_active_sglq(struct lpfc_hba
*phba
, uint16_t xritag
)
507 struct lpfc_sglq
*sglq
;
508 adj_xri
= xritag
- phba
->sli4_hba
.max_cfg_param
.xri_base
;
509 if (adj_xri
> phba
->sli4_hba
.max_cfg_param
.max_xri
)
511 sglq
= phba
->sli4_hba
.lpfc_sglq_active_list
[adj_xri
];
516 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
517 * @phba: Pointer to HBA context object.
519 * This function is called with hbalock held. This function
520 * Gets a new driver sglq object from the sglq list. If the
521 * list is not empty then it is successful, it returns pointer to the newly
522 * allocated sglq object else it returns NULL.
524 static struct lpfc_sglq
*
525 __lpfc_sli_get_sglq(struct lpfc_hba
*phba
)
527 struct list_head
*lpfc_sgl_list
= &phba
->sli4_hba
.lpfc_sgl_list
;
528 struct lpfc_sglq
*sglq
= NULL
;
530 list_remove_head(lpfc_sgl_list
, sglq
, struct lpfc_sglq
, list
);
533 adj_xri
= sglq
->sli4_xritag
- phba
->sli4_hba
.max_cfg_param
.xri_base
;
534 phba
->sli4_hba
.lpfc_sglq_active_list
[adj_xri
] = sglq
;
535 sglq
->state
= SGL_ALLOCATED
;
540 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
541 * @phba: Pointer to HBA context object.
543 * This function is called with no lock held. This function
544 * allocates a new driver iocb object from the iocb pool. If the
545 * allocation is successful, it returns pointer to the newly
546 * allocated iocb object else it returns NULL.
549 lpfc_sli_get_iocbq(struct lpfc_hba
*phba
)
551 struct lpfc_iocbq
* iocbq
= NULL
;
552 unsigned long iflags
;
554 spin_lock_irqsave(&phba
->hbalock
, iflags
);
555 iocbq
= __lpfc_sli_get_iocbq(phba
);
556 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
561 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
562 * @phba: Pointer to HBA context object.
563 * @iocbq: Pointer to driver iocb object.
565 * This function is called with hbalock held to release driver
566 * iocb object to the iocb pool. The iotag in the iocb object
567 * does not change for each use of the iocb object. This function
568 * clears all other fields of the iocb object when it is freed.
569 * The sqlq structure that holds the xritag and phys and virtual
570 * mappings for the scatter gather list is retrieved from the
571 * active array of sglq. The get of the sglq pointer also clears
572 * the entry in the array. If the status of the IO indiactes that
573 * this IO was aborted then the sglq entry it put on the
574 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
575 * IO has good status or fails for any other reason then the sglq
576 * entry is added to the free list (lpfc_sgl_list).
579 __lpfc_sli_release_iocbq_s4(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
581 struct lpfc_sglq
*sglq
;
582 size_t start_clean
= offsetof(struct lpfc_iocbq
, iocb
);
583 unsigned long iflag
= 0;
584 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
586 if (iocbq
->sli4_xritag
== NO_XRI
)
589 sglq
= __lpfc_clear_active_sglq(phba
, iocbq
->sli4_xritag
);
591 if ((iocbq
->iocb_flag
& LPFC_EXCHANGE_BUSY
) &&
592 (sglq
->state
!= SGL_XRI_ABORTED
)) {
593 spin_lock_irqsave(&phba
->sli4_hba
.abts_sgl_list_lock
,
595 list_add(&sglq
->list
,
596 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
597 spin_unlock_irqrestore(
598 &phba
->sli4_hba
.abts_sgl_list_lock
, iflag
);
600 sglq
->state
= SGL_FREED
;
601 list_add(&sglq
->list
, &phba
->sli4_hba
.lpfc_sgl_list
);
603 /* Check if TXQ queue needs to be serviced */
605 lpfc_worker_wake_up(phba
);
611 * Clean all volatile data fields, preserve iotag and node struct.
613 memset((char *)iocbq
+ start_clean
, 0, sizeof(*iocbq
) - start_clean
);
614 iocbq
->sli4_xritag
= NO_XRI
;
615 list_add_tail(&iocbq
->list
, &phba
->lpfc_iocb_list
);
620 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
621 * @phba: Pointer to HBA context object.
622 * @iocbq: Pointer to driver iocb object.
624 * This function is called with hbalock held to release driver
625 * iocb object to the iocb pool. The iotag in the iocb object
626 * does not change for each use of the iocb object. This function
627 * clears all other fields of the iocb object when it is freed.
630 __lpfc_sli_release_iocbq_s3(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
632 size_t start_clean
= offsetof(struct lpfc_iocbq
, iocb
);
635 * Clean all volatile data fields, preserve iotag and node struct.
637 memset((char*)iocbq
+ start_clean
, 0, sizeof(*iocbq
) - start_clean
);
638 iocbq
->sli4_xritag
= NO_XRI
;
639 list_add_tail(&iocbq
->list
, &phba
->lpfc_iocb_list
);
643 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
644 * @phba: Pointer to HBA context object.
645 * @iocbq: Pointer to driver iocb object.
647 * This function is called with hbalock held to release driver
648 * iocb object to the iocb pool. The iotag in the iocb object
649 * does not change for each use of the iocb object. This function
650 * clears all other fields of the iocb object when it is freed.
653 __lpfc_sli_release_iocbq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
655 phba
->__lpfc_sli_release_iocbq(phba
, iocbq
);
660 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
661 * @phba: Pointer to HBA context object.
662 * @iocbq: Pointer to driver iocb object.
664 * This function is called with no lock held to release the iocb to
668 lpfc_sli_release_iocbq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
670 unsigned long iflags
;
673 * Clean all volatile data fields, preserve iotag and node struct.
675 spin_lock_irqsave(&phba
->hbalock
, iflags
);
676 __lpfc_sli_release_iocbq(phba
, iocbq
);
677 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
681 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
682 * @phba: Pointer to HBA context object.
683 * @iocblist: List of IOCBs.
684 * @ulpstatus: ULP status in IOCB command field.
685 * @ulpWord4: ULP word-4 in IOCB command field.
687 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
688 * on the list by invoking the complete callback function associated with the
689 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
693 lpfc_sli_cancel_iocbs(struct lpfc_hba
*phba
, struct list_head
*iocblist
,
694 uint32_t ulpstatus
, uint32_t ulpWord4
)
696 struct lpfc_iocbq
*piocb
;
698 while (!list_empty(iocblist
)) {
699 list_remove_head(iocblist
, piocb
, struct lpfc_iocbq
, list
);
701 if (!piocb
->iocb_cmpl
)
702 lpfc_sli_release_iocbq(phba
, piocb
);
704 piocb
->iocb
.ulpStatus
= ulpstatus
;
705 piocb
->iocb
.un
.ulpWord
[4] = ulpWord4
;
706 (piocb
->iocb_cmpl
) (phba
, piocb
, piocb
);
713 * lpfc_sli_iocb_cmd_type - Get the iocb type
714 * @iocb_cmnd: iocb command code.
716 * This function is called by ring event handler function to get the iocb type.
717 * This function translates the iocb command to an iocb command type used to
718 * decide the final disposition of each completed IOCB.
719 * The function returns
720 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
721 * LPFC_SOL_IOCB if it is a solicited iocb completion
722 * LPFC_ABORT_IOCB if it is an abort iocb
723 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
725 * The caller is not required to hold any lock.
727 static lpfc_iocb_type
728 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd
)
730 lpfc_iocb_type type
= LPFC_UNKNOWN_IOCB
;
732 if (iocb_cmnd
> CMD_MAX_IOCB_CMD
)
736 case CMD_XMIT_SEQUENCE_CR
:
737 case CMD_XMIT_SEQUENCE_CX
:
738 case CMD_XMIT_BCAST_CN
:
739 case CMD_XMIT_BCAST_CX
:
740 case CMD_ELS_REQUEST_CR
:
741 case CMD_ELS_REQUEST_CX
:
742 case CMD_CREATE_XRI_CR
:
743 case CMD_CREATE_XRI_CX
:
745 case CMD_XMIT_ELS_RSP_CX
:
747 case CMD_FCP_IWRITE_CR
:
748 case CMD_FCP_IWRITE_CX
:
749 case CMD_FCP_IREAD_CR
:
750 case CMD_FCP_IREAD_CX
:
751 case CMD_FCP_ICMND_CR
:
752 case CMD_FCP_ICMND_CX
:
753 case CMD_FCP_TSEND_CX
:
754 case CMD_FCP_TRSP_CX
:
755 case CMD_FCP_TRECEIVE_CX
:
756 case CMD_FCP_AUTO_TRSP_CX
:
757 case CMD_ADAPTER_MSG
:
758 case CMD_ADAPTER_DUMP
:
759 case CMD_XMIT_SEQUENCE64_CR
:
760 case CMD_XMIT_SEQUENCE64_CX
:
761 case CMD_XMIT_BCAST64_CN
:
762 case CMD_XMIT_BCAST64_CX
:
763 case CMD_ELS_REQUEST64_CR
:
764 case CMD_ELS_REQUEST64_CX
:
765 case CMD_FCP_IWRITE64_CR
:
766 case CMD_FCP_IWRITE64_CX
:
767 case CMD_FCP_IREAD64_CR
:
768 case CMD_FCP_IREAD64_CX
:
769 case CMD_FCP_ICMND64_CR
:
770 case CMD_FCP_ICMND64_CX
:
771 case CMD_FCP_TSEND64_CX
:
772 case CMD_FCP_TRSP64_CX
:
773 case CMD_FCP_TRECEIVE64_CX
:
774 case CMD_GEN_REQUEST64_CR
:
775 case CMD_GEN_REQUEST64_CX
:
776 case CMD_XMIT_ELS_RSP64_CX
:
777 case DSSCMD_IWRITE64_CR
:
778 case DSSCMD_IWRITE64_CX
:
779 case DSSCMD_IREAD64_CR
:
780 case DSSCMD_IREAD64_CX
:
781 type
= LPFC_SOL_IOCB
;
783 case CMD_ABORT_XRI_CN
:
784 case CMD_ABORT_XRI_CX
:
785 case CMD_CLOSE_XRI_CN
:
786 case CMD_CLOSE_XRI_CX
:
787 case CMD_XRI_ABORTED_CX
:
788 case CMD_ABORT_MXRI64_CN
:
789 case CMD_XMIT_BLS_RSP64_CX
:
790 type
= LPFC_ABORT_IOCB
;
792 case CMD_RCV_SEQUENCE_CX
:
793 case CMD_RCV_ELS_REQ_CX
:
794 case CMD_RCV_SEQUENCE64_CX
:
795 case CMD_RCV_ELS_REQ64_CX
:
796 case CMD_ASYNC_STATUS
:
797 case CMD_IOCB_RCV_SEQ64_CX
:
798 case CMD_IOCB_RCV_ELS64_CX
:
799 case CMD_IOCB_RCV_CONT64_CX
:
800 case CMD_IOCB_RET_XRI64_CX
:
801 type
= LPFC_UNSOL_IOCB
;
803 case CMD_IOCB_XMIT_MSEQ64_CR
:
804 case CMD_IOCB_XMIT_MSEQ64_CX
:
805 case CMD_IOCB_RCV_SEQ_LIST64_CX
:
806 case CMD_IOCB_RCV_ELS_LIST64_CX
:
807 case CMD_IOCB_CLOSE_EXTENDED_CN
:
808 case CMD_IOCB_ABORT_EXTENDED_CN
:
809 case CMD_IOCB_RET_HBQE64_CN
:
810 case CMD_IOCB_FCP_IBIDIR64_CR
:
811 case CMD_IOCB_FCP_IBIDIR64_CX
:
812 case CMD_IOCB_FCP_ITASKMGT64_CX
:
813 case CMD_IOCB_LOGENTRY_CN
:
814 case CMD_IOCB_LOGENTRY_ASYNC_CN
:
815 printk("%s - Unhandled SLI-3 Command x%x\n",
816 __func__
, iocb_cmnd
);
817 type
= LPFC_UNKNOWN_IOCB
;
820 type
= LPFC_UNKNOWN_IOCB
;
828 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
829 * @phba: Pointer to HBA context object.
831 * This function is called from SLI initialization code
832 * to configure every ring of the HBA's SLI interface. The
833 * caller is not required to hold any lock. This function issues
834 * a config_ring mailbox command for each ring.
835 * This function returns zero if successful else returns a negative
839 lpfc_sli_ring_map(struct lpfc_hba
*phba
)
841 struct lpfc_sli
*psli
= &phba
->sli
;
846 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
850 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
851 for (i
= 0; i
< psli
->num_rings
; i
++) {
852 lpfc_config_ring(phba
, i
, pmb
);
853 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
854 if (rc
!= MBX_SUCCESS
) {
855 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
856 "0446 Adapter failed to init (%d), "
857 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
859 rc
, pmbox
->mbxCommand
,
860 pmbox
->mbxStatus
, i
);
861 phba
->link_state
= LPFC_HBA_ERROR
;
866 mempool_free(pmb
, phba
->mbox_mem_pool
);
871 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
872 * @phba: Pointer to HBA context object.
873 * @pring: Pointer to driver SLI ring object.
874 * @piocb: Pointer to the driver iocb object.
876 * This function is called with hbalock held. The function adds the
877 * new iocb to txcmplq of the given ring. This function always returns
878 * 0. If this function is called for ELS ring, this function checks if
879 * there is a vport associated with the ELS command. This function also
880 * starts els_tmofunc timer if this is an ELS command.
883 lpfc_sli_ringtxcmpl_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
884 struct lpfc_iocbq
*piocb
)
886 list_add_tail(&piocb
->list
, &pring
->txcmplq
);
887 piocb
->iocb_flag
|= LPFC_IO_ON_Q
;
888 pring
->txcmplq_cnt
++;
889 if (pring
->txcmplq_cnt
> pring
->txcmplq_max
)
890 pring
->txcmplq_max
= pring
->txcmplq_cnt
;
892 if ((unlikely(pring
->ringno
== LPFC_ELS_RING
)) &&
893 (piocb
->iocb
.ulpCommand
!= CMD_ABORT_XRI_CN
) &&
894 (piocb
->iocb
.ulpCommand
!= CMD_CLOSE_XRI_CN
)) {
898 mod_timer(&piocb
->vport
->els_tmofunc
,
899 jiffies
+ HZ
* (phba
->fc_ratov
<< 1));
907 * lpfc_sli_ringtx_get - Get first element of the txq
908 * @phba: Pointer to HBA context object.
909 * @pring: Pointer to driver SLI ring object.
911 * This function is called with hbalock held to get next
912 * iocb in txq of the given ring. If there is any iocb in
913 * the txq, the function returns first iocb in the list after
914 * removing the iocb from the list, else it returns NULL.
917 lpfc_sli_ringtx_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
919 struct lpfc_iocbq
*cmd_iocb
;
921 list_remove_head((&pring
->txq
), cmd_iocb
, struct lpfc_iocbq
, list
);
922 if (cmd_iocb
!= NULL
)
928 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
929 * @phba: Pointer to HBA context object.
930 * @pring: Pointer to driver SLI ring object.
932 * This function is called with hbalock held and the caller must post the
933 * iocb without releasing the lock. If the caller releases the lock,
934 * iocb slot returned by the function is not guaranteed to be available.
935 * The function returns pointer to the next available iocb slot if there
936 * is available slot in the ring, else it returns NULL.
937 * If the get index of the ring is ahead of the put index, the function
938 * will post an error attention event to the worker thread to take the
939 * HBA to offline state.
942 lpfc_sli_next_iocb_slot (struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
944 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
945 uint32_t max_cmd_idx
= pring
->numCiocb
;
946 if ((pring
->next_cmdidx
== pring
->cmdidx
) &&
947 (++pring
->next_cmdidx
>= max_cmd_idx
))
948 pring
->next_cmdidx
= 0;
950 if (unlikely(pring
->local_getidx
== pring
->next_cmdidx
)) {
952 pring
->local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
954 if (unlikely(pring
->local_getidx
>= max_cmd_idx
)) {
955 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
956 "0315 Ring %d issue: portCmdGet %d "
957 "is bigger than cmd ring %d\n",
959 pring
->local_getidx
, max_cmd_idx
);
961 phba
->link_state
= LPFC_HBA_ERROR
;
963 * All error attention handlers are posted to
966 phba
->work_ha
|= HA_ERATT
;
967 phba
->work_hs
= HS_FFER3
;
969 lpfc_worker_wake_up(phba
);
974 if (pring
->local_getidx
== pring
->next_cmdidx
)
978 return lpfc_cmd_iocb(phba
, pring
);
982 * lpfc_sli_next_iotag - Get an iotag for the iocb
983 * @phba: Pointer to HBA context object.
984 * @iocbq: Pointer to driver iocb object.
986 * This function gets an iotag for the iocb. If there is no unused iotag and
987 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
988 * array and assigns a new iotag.
989 * The function returns the allocated iotag if successful, else returns zero.
990 * Zero is not a valid iotag.
991 * The caller is not required to hold any lock.
994 lpfc_sli_next_iotag(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
996 struct lpfc_iocbq
**new_arr
;
997 struct lpfc_iocbq
**old_arr
;
999 struct lpfc_sli
*psli
= &phba
->sli
;
1002 spin_lock_irq(&phba
->hbalock
);
1003 iotag
= psli
->last_iotag
;
1004 if(++iotag
< psli
->iocbq_lookup_len
) {
1005 psli
->last_iotag
= iotag
;
1006 psli
->iocbq_lookup
[iotag
] = iocbq
;
1007 spin_unlock_irq(&phba
->hbalock
);
1008 iocbq
->iotag
= iotag
;
1010 } else if (psli
->iocbq_lookup_len
< (0xffff
1011 - LPFC_IOCBQ_LOOKUP_INCREMENT
)) {
1012 new_len
= psli
->iocbq_lookup_len
+ LPFC_IOCBQ_LOOKUP_INCREMENT
;
1013 spin_unlock_irq(&phba
->hbalock
);
1014 new_arr
= kzalloc(new_len
* sizeof (struct lpfc_iocbq
*),
1017 spin_lock_irq(&phba
->hbalock
);
1018 old_arr
= psli
->iocbq_lookup
;
1019 if (new_len
<= psli
->iocbq_lookup_len
) {
1020 /* highly unprobable case */
1022 iotag
= psli
->last_iotag
;
1023 if(++iotag
< psli
->iocbq_lookup_len
) {
1024 psli
->last_iotag
= iotag
;
1025 psli
->iocbq_lookup
[iotag
] = iocbq
;
1026 spin_unlock_irq(&phba
->hbalock
);
1027 iocbq
->iotag
= iotag
;
1030 spin_unlock_irq(&phba
->hbalock
);
1033 if (psli
->iocbq_lookup
)
1034 memcpy(new_arr
, old_arr
,
1035 ((psli
->last_iotag
+ 1) *
1036 sizeof (struct lpfc_iocbq
*)));
1037 psli
->iocbq_lookup
= new_arr
;
1038 psli
->iocbq_lookup_len
= new_len
;
1039 psli
->last_iotag
= iotag
;
1040 psli
->iocbq_lookup
[iotag
] = iocbq
;
1041 spin_unlock_irq(&phba
->hbalock
);
1042 iocbq
->iotag
= iotag
;
1047 spin_unlock_irq(&phba
->hbalock
);
1049 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
1050 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1057 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1058 * @phba: Pointer to HBA context object.
1059 * @pring: Pointer to driver SLI ring object.
1060 * @iocb: Pointer to iocb slot in the ring.
1061 * @nextiocb: Pointer to driver iocb object which need to be
1062 * posted to firmware.
1064 * This function is called with hbalock held to post a new iocb to
1065 * the firmware. This function copies the new iocb to ring iocb slot and
1066 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1067 * a completion call back for this iocb else the function will free the
1071 lpfc_sli_submit_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1072 IOCB_t
*iocb
, struct lpfc_iocbq
*nextiocb
)
1077 nextiocb
->iocb
.ulpIoTag
= (nextiocb
->iocb_cmpl
) ? nextiocb
->iotag
: 0;
1080 if (pring
->ringno
== LPFC_ELS_RING
) {
1081 lpfc_debugfs_slow_ring_trc(phba
,
1082 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1083 *(((uint32_t *) &nextiocb
->iocb
) + 4),
1084 *(((uint32_t *) &nextiocb
->iocb
) + 6),
1085 *(((uint32_t *) &nextiocb
->iocb
) + 7));
1089 * Issue iocb command to adapter
1091 lpfc_sli_pcimem_bcopy(&nextiocb
->iocb
, iocb
, phba
->iocb_cmd_size
);
1093 pring
->stats
.iocb_cmd
++;
1096 * If there is no completion routine to call, we can release the
1097 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1098 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1100 if (nextiocb
->iocb_cmpl
)
1101 lpfc_sli_ringtxcmpl_put(phba
, pring
, nextiocb
);
1103 __lpfc_sli_release_iocbq(phba
, nextiocb
);
1106 * Let the HBA know what IOCB slot will be the next one the
1107 * driver will put a command into.
1109 pring
->cmdidx
= pring
->next_cmdidx
;
1110 writel(pring
->cmdidx
, &phba
->host_gp
[pring
->ringno
].cmdPutInx
);
1114 * lpfc_sli_update_full_ring - Update the chip attention register
1115 * @phba: Pointer to HBA context object.
1116 * @pring: Pointer to driver SLI ring object.
1118 * The caller is not required to hold any lock for calling this function.
1119 * This function updates the chip attention bits for the ring to inform firmware
1120 * that there are pending work to be done for this ring and requests an
1121 * interrupt when there is space available in the ring. This function is
1122 * called when the driver is unable to post more iocbs to the ring due
1123 * to unavailability of space in the ring.
1126 lpfc_sli_update_full_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1128 int ringno
= pring
->ringno
;
1130 pring
->flag
|= LPFC_CALL_RING_AVAILABLE
;
1135 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1136 * The HBA will tell us when an IOCB entry is available.
1138 writel((CA_R0ATT
|CA_R0CE_REQ
) << (ringno
*4), phba
->CAregaddr
);
1139 readl(phba
->CAregaddr
); /* flush */
1141 pring
->stats
.iocb_cmd_full
++;
1145 * lpfc_sli_update_ring - Update chip attention register
1146 * @phba: Pointer to HBA context object.
1147 * @pring: Pointer to driver SLI ring object.
1149 * This function updates the chip attention register bit for the
1150 * given ring to inform HBA that there is more work to be done
1151 * in this ring. The caller is not required to hold any lock.
1154 lpfc_sli_update_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1156 int ringno
= pring
->ringno
;
1159 * Tell the HBA that there is work to do in this ring.
1161 if (!(phba
->sli3_options
& LPFC_SLI3_CRP_ENABLED
)) {
1163 writel(CA_R0ATT
<< (ringno
* 4), phba
->CAregaddr
);
1164 readl(phba
->CAregaddr
); /* flush */
1169 * lpfc_sli_resume_iocb - Process iocbs in the txq
1170 * @phba: Pointer to HBA context object.
1171 * @pring: Pointer to driver SLI ring object.
1173 * This function is called with hbalock held to post pending iocbs
1174 * in the txq to the firmware. This function is called when driver
1175 * detects space available in the ring.
1178 lpfc_sli_resume_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1181 struct lpfc_iocbq
*nextiocb
;
1185 * (a) there is anything on the txq to send
1187 * (c) link attention events can be processed (fcp ring only)
1188 * (d) IOCB processing is not blocked by the outstanding mbox command.
1190 if (pring
->txq_cnt
&&
1191 lpfc_is_link_up(phba
) &&
1192 (pring
->ringno
!= phba
->sli
.fcp_ring
||
1193 phba
->sli
.sli_flag
& LPFC_PROCESS_LA
)) {
1195 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
1196 (nextiocb
= lpfc_sli_ringtx_get(phba
, pring
)))
1197 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
1200 lpfc_sli_update_ring(phba
, pring
);
1202 lpfc_sli_update_full_ring(phba
, pring
);
1209 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1210 * @phba: Pointer to HBA context object.
1211 * @hbqno: HBQ number.
1213 * This function is called with hbalock held to get the next
1214 * available slot for the given HBQ. If there is free slot
1215 * available for the HBQ it will return pointer to the next available
1216 * HBQ entry else it will return NULL.
1218 static struct lpfc_hbq_entry
*
1219 lpfc_sli_next_hbq_slot(struct lpfc_hba
*phba
, uint32_t hbqno
)
1221 struct hbq_s
*hbqp
= &phba
->hbqs
[hbqno
];
1223 if (hbqp
->next_hbqPutIdx
== hbqp
->hbqPutIdx
&&
1224 ++hbqp
->next_hbqPutIdx
>= hbqp
->entry_count
)
1225 hbqp
->next_hbqPutIdx
= 0;
1227 if (unlikely(hbqp
->local_hbqGetIdx
== hbqp
->next_hbqPutIdx
)) {
1228 uint32_t raw_index
= phba
->hbq_get
[hbqno
];
1229 uint32_t getidx
= le32_to_cpu(raw_index
);
1231 hbqp
->local_hbqGetIdx
= getidx
;
1233 if (unlikely(hbqp
->local_hbqGetIdx
>= hbqp
->entry_count
)) {
1234 lpfc_printf_log(phba
, KERN_ERR
,
1235 LOG_SLI
| LOG_VPORT
,
1236 "1802 HBQ %d: local_hbqGetIdx "
1237 "%u is > than hbqp->entry_count %u\n",
1238 hbqno
, hbqp
->local_hbqGetIdx
,
1241 phba
->link_state
= LPFC_HBA_ERROR
;
1245 if (hbqp
->local_hbqGetIdx
== hbqp
->next_hbqPutIdx
)
1249 return (struct lpfc_hbq_entry
*) phba
->hbqs
[hbqno
].hbq_virt
+
1254 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1255 * @phba: Pointer to HBA context object.
1257 * This function is called with no lock held to free all the
1258 * hbq buffers while uninitializing the SLI interface. It also
1259 * frees the HBQ buffers returned by the firmware but not yet
1260 * processed by the upper layers.
1263 lpfc_sli_hbqbuf_free_all(struct lpfc_hba
*phba
)
1265 struct lpfc_dmabuf
*dmabuf
, *next_dmabuf
;
1266 struct hbq_dmabuf
*hbq_buf
;
1267 unsigned long flags
;
1271 hbq_count
= lpfc_sli_hbq_count();
1272 /* Return all memory used by all HBQs */
1273 spin_lock_irqsave(&phba
->hbalock
, flags
);
1274 for (i
= 0; i
< hbq_count
; ++i
) {
1275 list_for_each_entry_safe(dmabuf
, next_dmabuf
,
1276 &phba
->hbqs
[i
].hbq_buffer_list
, list
) {
1277 hbq_buf
= container_of(dmabuf
, struct hbq_dmabuf
, dbuf
);
1278 list_del(&hbq_buf
->dbuf
.list
);
1279 (phba
->hbqs
[i
].hbq_free_buffer
)(phba
, hbq_buf
);
1281 phba
->hbqs
[i
].buffer_count
= 0;
1283 /* Return all HBQ buffer that are in-fly */
1284 list_for_each_entry_safe(dmabuf
, next_dmabuf
, &phba
->rb_pend_list
,
1286 hbq_buf
= container_of(dmabuf
, struct hbq_dmabuf
, dbuf
);
1287 list_del(&hbq_buf
->dbuf
.list
);
1288 if (hbq_buf
->tag
== -1) {
1289 (phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
)
1292 hbqno
= hbq_buf
->tag
>> 16;
1293 if (hbqno
>= LPFC_MAX_HBQS
)
1294 (phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
)
1297 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
,
1302 /* Mark the HBQs not in use */
1303 phba
->hbq_in_use
= 0;
1304 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1308 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1309 * @phba: Pointer to HBA context object.
1310 * @hbqno: HBQ number.
1311 * @hbq_buf: Pointer to HBQ buffer.
1313 * This function is called with the hbalock held to post a
1314 * hbq buffer to the firmware. If the function finds an empty
1315 * slot in the HBQ, it will post the buffer. The function will return
1316 * pointer to the hbq entry if it successfully post the buffer
1317 * else it will return NULL.
1320 lpfc_sli_hbq_to_firmware(struct lpfc_hba
*phba
, uint32_t hbqno
,
1321 struct hbq_dmabuf
*hbq_buf
)
1323 return phba
->lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buf
);
1327 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1328 * @phba: Pointer to HBA context object.
1329 * @hbqno: HBQ number.
1330 * @hbq_buf: Pointer to HBQ buffer.
1332 * This function is called with the hbalock held to post a hbq buffer to the
1333 * firmware. If the function finds an empty slot in the HBQ, it will post the
1334 * buffer and place it on the hbq_buffer_list. The function will return zero if
1335 * it successfully post the buffer else it will return an error.
1338 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba
*phba
, uint32_t hbqno
,
1339 struct hbq_dmabuf
*hbq_buf
)
1341 struct lpfc_hbq_entry
*hbqe
;
1342 dma_addr_t physaddr
= hbq_buf
->dbuf
.phys
;
1344 /* Get next HBQ entry slot to use */
1345 hbqe
= lpfc_sli_next_hbq_slot(phba
, hbqno
);
1347 struct hbq_s
*hbqp
= &phba
->hbqs
[hbqno
];
1349 hbqe
->bde
.addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
1350 hbqe
->bde
.addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
1351 hbqe
->bde
.tus
.f
.bdeSize
= hbq_buf
->size
;
1352 hbqe
->bde
.tus
.f
.bdeFlags
= 0;
1353 hbqe
->bde
.tus
.w
= le32_to_cpu(hbqe
->bde
.tus
.w
);
1354 hbqe
->buffer_tag
= le32_to_cpu(hbq_buf
->tag
);
1356 hbqp
->hbqPutIdx
= hbqp
->next_hbqPutIdx
;
1357 writel(hbqp
->hbqPutIdx
, phba
->hbq_put
+ hbqno
);
1359 readl(phba
->hbq_put
+ hbqno
);
1360 list_add_tail(&hbq_buf
->dbuf
.list
, &hbqp
->hbq_buffer_list
);
1367 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1368 * @phba: Pointer to HBA context object.
1369 * @hbqno: HBQ number.
1370 * @hbq_buf: Pointer to HBQ buffer.
1372 * This function is called with the hbalock held to post an RQE to the SLI4
1373 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1374 * the hbq_buffer_list and return zero, otherwise it will return an error.
1377 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba
*phba
, uint32_t hbqno
,
1378 struct hbq_dmabuf
*hbq_buf
)
1381 struct lpfc_rqe hrqe
;
1382 struct lpfc_rqe drqe
;
1384 hrqe
.address_lo
= putPaddrLow(hbq_buf
->hbuf
.phys
);
1385 hrqe
.address_hi
= putPaddrHigh(hbq_buf
->hbuf
.phys
);
1386 drqe
.address_lo
= putPaddrLow(hbq_buf
->dbuf
.phys
);
1387 drqe
.address_hi
= putPaddrHigh(hbq_buf
->dbuf
.phys
);
1388 rc
= lpfc_sli4_rq_put(phba
->sli4_hba
.hdr_rq
, phba
->sli4_hba
.dat_rq
,
1393 list_add_tail(&hbq_buf
->dbuf
.list
, &phba
->hbqs
[hbqno
].hbq_buffer_list
);
1397 /* HBQ for ELS and CT traffic. */
1398 static struct lpfc_hbq_init lpfc_els_hbq
= {
1403 .ring_mask
= (1 << LPFC_ELS_RING
),
1409 /* HBQ for the extra ring if needed */
1410 static struct lpfc_hbq_init lpfc_extra_hbq
= {
1415 .ring_mask
= (1 << LPFC_EXTRA_RING
),
1422 struct lpfc_hbq_init
*lpfc_hbq_defs
[] = {
1428 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1429 * @phba: Pointer to HBA context object.
1430 * @hbqno: HBQ number.
1431 * @count: Number of HBQ buffers to be posted.
1433 * This function is called with no lock held to post more hbq buffers to the
1434 * given HBQ. The function returns the number of HBQ buffers successfully
1438 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba
*phba
, uint32_t hbqno
, uint32_t count
)
1440 uint32_t i
, posted
= 0;
1441 unsigned long flags
;
1442 struct hbq_dmabuf
*hbq_buffer
;
1443 LIST_HEAD(hbq_buf_list
);
1444 if (!phba
->hbqs
[hbqno
].hbq_alloc_buffer
)
1447 if ((phba
->hbqs
[hbqno
].buffer_count
+ count
) >
1448 lpfc_hbq_defs
[hbqno
]->entry_count
)
1449 count
= lpfc_hbq_defs
[hbqno
]->entry_count
-
1450 phba
->hbqs
[hbqno
].buffer_count
;
1453 /* Allocate HBQ entries */
1454 for (i
= 0; i
< count
; i
++) {
1455 hbq_buffer
= (phba
->hbqs
[hbqno
].hbq_alloc_buffer
)(phba
);
1458 list_add_tail(&hbq_buffer
->dbuf
.list
, &hbq_buf_list
);
1460 /* Check whether HBQ is still in use */
1461 spin_lock_irqsave(&phba
->hbalock
, flags
);
1462 if (!phba
->hbq_in_use
)
1464 while (!list_empty(&hbq_buf_list
)) {
1465 list_remove_head(&hbq_buf_list
, hbq_buffer
, struct hbq_dmabuf
,
1467 hbq_buffer
->tag
= (phba
->hbqs
[hbqno
].buffer_count
|
1469 if (!lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buffer
)) {
1470 phba
->hbqs
[hbqno
].buffer_count
++;
1473 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
1475 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1478 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1479 while (!list_empty(&hbq_buf_list
)) {
1480 list_remove_head(&hbq_buf_list
, hbq_buffer
, struct hbq_dmabuf
,
1482 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
1488 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1489 * @phba: Pointer to HBA context object.
1492 * This function posts more buffers to the HBQ. This function
1493 * is called with no lock held. The function returns the number of HBQ entries
1494 * successfully allocated.
1497 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba
*phba
, uint32_t qno
)
1499 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1502 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
1503 lpfc_hbq_defs
[qno
]->add_count
);
1507 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1508 * @phba: Pointer to HBA context object.
1509 * @qno: HBQ queue number.
1511 * This function is called from SLI initialization code path with
1512 * no lock held to post initial HBQ buffers to firmware. The
1513 * function returns the number of HBQ entries successfully allocated.
1516 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba
*phba
, uint32_t qno
)
1518 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1519 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
1520 lpfc_hbq_defs
[qno
]->entry_count
);
1522 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
1523 lpfc_hbq_defs
[qno
]->init_count
);
1527 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1528 * @phba: Pointer to HBA context object.
1529 * @hbqno: HBQ number.
1531 * This function removes the first hbq buffer on an hbq list and returns a
1532 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1534 static struct hbq_dmabuf
*
1535 lpfc_sli_hbqbuf_get(struct list_head
*rb_list
)
1537 struct lpfc_dmabuf
*d_buf
;
1539 list_remove_head(rb_list
, d_buf
, struct lpfc_dmabuf
, list
);
1542 return container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
1546 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1547 * @phba: Pointer to HBA context object.
1548 * @tag: Tag of the hbq buffer.
1550 * This function is called with hbalock held. This function searches
1551 * for the hbq buffer associated with the given tag in the hbq buffer
1552 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1555 static struct hbq_dmabuf
*
1556 lpfc_sli_hbqbuf_find(struct lpfc_hba
*phba
, uint32_t tag
)
1558 struct lpfc_dmabuf
*d_buf
;
1559 struct hbq_dmabuf
*hbq_buf
;
1563 if (hbqno
>= LPFC_MAX_HBQS
)
1566 spin_lock_irq(&phba
->hbalock
);
1567 list_for_each_entry(d_buf
, &phba
->hbqs
[hbqno
].hbq_buffer_list
, list
) {
1568 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
1569 if (hbq_buf
->tag
== tag
) {
1570 spin_unlock_irq(&phba
->hbalock
);
1574 spin_unlock_irq(&phba
->hbalock
);
1575 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
| LOG_VPORT
,
1576 "1803 Bad hbq tag. Data: x%x x%x\n",
1577 tag
, phba
->hbqs
[tag
>> 16].buffer_count
);
1582 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
1583 * @phba: Pointer to HBA context object.
1584 * @hbq_buffer: Pointer to HBQ buffer.
1586 * This function is called with hbalock. This function gives back
1587 * the hbq buffer to firmware. If the HBQ does not have space to
1588 * post the buffer, it will free the buffer.
1591 lpfc_sli_free_hbq(struct lpfc_hba
*phba
, struct hbq_dmabuf
*hbq_buffer
)
1596 hbqno
= hbq_buffer
->tag
>> 16;
1597 if (lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buffer
))
1598 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
1603 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
1604 * @mbxCommand: mailbox command code.
1606 * This function is called by the mailbox event handler function to verify
1607 * that the completed mailbox command is a legitimate mailbox command. If the
1608 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
1609 * and the mailbox event handler will take the HBA offline.
1612 lpfc_sli_chk_mbx_command(uint8_t mbxCommand
)
1616 switch (mbxCommand
) {
1620 case MBX_WRITE_VPARMS
:
1621 case MBX_RUN_BIU_DIAG
:
1624 case MBX_CONFIG_LINK
:
1625 case MBX_CONFIG_RING
:
1626 case MBX_RESET_RING
:
1627 case MBX_READ_CONFIG
:
1628 case MBX_READ_RCONFIG
:
1629 case MBX_READ_SPARM
:
1630 case MBX_READ_STATUS
:
1634 case MBX_READ_LNK_STAT
:
1636 case MBX_UNREG_LOGIN
:
1638 case MBX_DUMP_MEMORY
:
1639 case MBX_DUMP_CONTEXT
:
1642 case MBX_UPDATE_CFG
:
1644 case MBX_DEL_LD_ENTRY
:
1645 case MBX_RUN_PROGRAM
:
1647 case MBX_SET_VARIABLE
:
1648 case MBX_UNREG_D_ID
:
1649 case MBX_KILL_BOARD
:
1650 case MBX_CONFIG_FARP
:
1653 case MBX_RUN_BIU_DIAG64
:
1654 case MBX_CONFIG_PORT
:
1655 case MBX_READ_SPARM64
:
1656 case MBX_READ_RPI64
:
1657 case MBX_REG_LOGIN64
:
1658 case MBX_READ_TOPOLOGY
:
1661 case MBX_LOAD_EXP_ROM
:
1662 case MBX_ASYNCEVT_ENABLE
:
1666 case MBX_PORT_CAPABILITIES
:
1667 case MBX_PORT_IOV_CONTROL
:
1668 case MBX_SLI4_CONFIG
:
1669 case MBX_SLI4_REQ_FTRS
:
1671 case MBX_UNREG_FCFI
:
1676 case MBX_RESUME_RPI
:
1677 case MBX_READ_EVENT_LOG_STATUS
:
1678 case MBX_READ_EVENT_LOG
:
1679 case MBX_SECURITY_MGMT
:
1691 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
1692 * @phba: Pointer to HBA context object.
1693 * @pmboxq: Pointer to mailbox command.
1695 * This is completion handler function for mailbox commands issued from
1696 * lpfc_sli_issue_mbox_wait function. This function is called by the
1697 * mailbox event handler function with no lock held. This function
1698 * will wake up thread waiting on the wait queue pointed by context1
1702 lpfc_sli_wake_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
1704 wait_queue_head_t
*pdone_q
;
1705 unsigned long drvr_flag
;
1708 * If pdone_q is empty, the driver thread gave up waiting and
1709 * continued running.
1711 pmboxq
->mbox_flag
|= LPFC_MBX_WAKE
;
1712 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
1713 pdone_q
= (wait_queue_head_t
*) pmboxq
->context1
;
1715 wake_up_interruptible(pdone_q
);
1716 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
1722 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
1723 * @phba: Pointer to HBA context object.
1724 * @pmb: Pointer to mailbox object.
1726 * This function is the default mailbox completion handler. It
1727 * frees the memory resources associated with the completed mailbox
1728 * command. If the completed command is a REG_LOGIN mailbox command,
1729 * this function will issue a UREG_LOGIN to re-claim the RPI.
1732 lpfc_sli_def_mbox_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1734 struct lpfc_vport
*vport
= pmb
->vport
;
1735 struct lpfc_dmabuf
*mp
;
1736 struct lpfc_nodelist
*ndlp
;
1737 struct Scsi_Host
*shost
;
1741 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
1744 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1748 if ((pmb
->u
.mb
.mbxCommand
== MBX_UNREG_LOGIN
) &&
1749 (phba
->sli_rev
== LPFC_SLI_REV4
) &&
1750 (pmb
->u
.mb
.un
.varUnregLogin
.rsvd1
== 0x0))
1751 lpfc_sli4_free_rpi(phba
, pmb
->u
.mb
.un
.varUnregLogin
.rpi
);
1754 * If a REG_LOGIN succeeded after node is destroyed or node
1755 * is in re-discovery driver need to cleanup the RPI.
1757 if (!(phba
->pport
->load_flag
& FC_UNLOADING
) &&
1758 pmb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
&&
1759 !pmb
->u
.mb
.mbxStatus
) {
1760 rpi
= pmb
->u
.mb
.un
.varWords
[0];
1761 vpi
= pmb
->u
.mb
.un
.varRegLogin
.vpi
- phba
->vpi_base
;
1762 lpfc_unreg_login(phba
, vpi
, rpi
, pmb
);
1763 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
1764 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
1765 if (rc
!= MBX_NOT_FINISHED
)
1769 if ((pmb
->u
.mb
.mbxCommand
== MBX_REG_VPI
) &&
1770 !(phba
->pport
->load_flag
& FC_UNLOADING
) &&
1771 !pmb
->u
.mb
.mbxStatus
) {
1772 shost
= lpfc_shost_from_vport(vport
);
1773 spin_lock_irq(shost
->host_lock
);
1774 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
1775 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
1776 spin_unlock_irq(shost
->host_lock
);
1779 if (pmb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
1780 ndlp
= (struct lpfc_nodelist
*)pmb
->context2
;
1782 pmb
->context2
= NULL
;
1785 /* Check security permission status on INIT_LINK mailbox command */
1786 if ((pmb
->u
.mb
.mbxCommand
== MBX_INIT_LINK
) &&
1787 (pmb
->u
.mb
.mbxStatus
== MBXERR_SEC_NO_PERMISSION
))
1788 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
1789 "2860 SLI authentication is required "
1790 "for INIT_LINK but has not done yet\n");
1792 if (bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
) == MBX_SLI4_CONFIG
)
1793 lpfc_sli4_mbox_cmd_free(phba
, pmb
);
1795 mempool_free(pmb
, phba
->mbox_mem_pool
);
1799 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
1800 * @phba: Pointer to HBA context object.
1802 * This function is called with no lock held. This function processes all
1803 * the completed mailbox commands and gives it to upper layers. The interrupt
1804 * service routine processes mailbox completion interrupt and adds completed
1805 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
1806 * Worker thread call lpfc_sli_handle_mb_event, which will return the
1807 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
1808 * function returns the mailbox commands to the upper layer by calling the
1809 * completion handler function of each mailbox.
1812 lpfc_sli_handle_mb_event(struct lpfc_hba
*phba
)
1819 phba
->sli
.slistat
.mbox_event
++;
1821 /* Get all completed mailboxe buffers into the cmplq */
1822 spin_lock_irq(&phba
->hbalock
);
1823 list_splice_init(&phba
->sli
.mboxq_cmpl
, &cmplq
);
1824 spin_unlock_irq(&phba
->hbalock
);
1826 /* Get a Mailbox buffer to setup mailbox commands for callback */
1828 list_remove_head(&cmplq
, pmb
, LPFC_MBOXQ_t
, list
);
1834 if (pmbox
->mbxCommand
!= MBX_HEARTBEAT
) {
1836 lpfc_debugfs_disc_trc(pmb
->vport
,
1837 LPFC_DISC_TRC_MBOX_VPORT
,
1838 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
1839 (uint32_t)pmbox
->mbxCommand
,
1840 pmbox
->un
.varWords
[0],
1841 pmbox
->un
.varWords
[1]);
1844 lpfc_debugfs_disc_trc(phba
->pport
,
1846 "MBOX cmpl: cmd:x%x mb:x%x x%x",
1847 (uint32_t)pmbox
->mbxCommand
,
1848 pmbox
->un
.varWords
[0],
1849 pmbox
->un
.varWords
[1]);
1854 * It is a fatal error if unknown mbox command completion.
1856 if (lpfc_sli_chk_mbx_command(pmbox
->mbxCommand
) ==
1858 /* Unknown mailbox command compl */
1859 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
1860 "(%d):0323 Unknown Mailbox command "
1862 pmb
->vport
? pmb
->vport
->vpi
: 0,
1864 lpfc_sli4_mbox_opcode_get(phba
, pmb
));
1865 phba
->link_state
= LPFC_HBA_ERROR
;
1866 phba
->work_hs
= HS_FFER3
;
1867 lpfc_handle_eratt(phba
);
1871 if (pmbox
->mbxStatus
) {
1872 phba
->sli
.slistat
.mbox_stat_err
++;
1873 if (pmbox
->mbxStatus
== MBXERR_NO_RESOURCES
) {
1874 /* Mbox cmd cmpl error - RETRYing */
1875 lpfc_printf_log(phba
, KERN_INFO
,
1877 "(%d):0305 Mbox cmd cmpl "
1878 "error - RETRYing Data: x%x "
1879 "(x%x) x%x x%x x%x\n",
1880 pmb
->vport
? pmb
->vport
->vpi
:0,
1882 lpfc_sli4_mbox_opcode_get(phba
,
1885 pmbox
->un
.varWords
[0],
1886 pmb
->vport
->port_state
);
1887 pmbox
->mbxStatus
= 0;
1888 pmbox
->mbxOwner
= OWN_HOST
;
1889 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
1890 if (rc
!= MBX_NOT_FINISHED
)
1895 /* Mailbox cmd <cmd> Cmpl <cmpl> */
1896 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
1897 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
1898 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
1899 pmb
->vport
? pmb
->vport
->vpi
: 0,
1901 lpfc_sli4_mbox_opcode_get(phba
, pmb
),
1903 *((uint32_t *) pmbox
),
1904 pmbox
->un
.varWords
[0],
1905 pmbox
->un
.varWords
[1],
1906 pmbox
->un
.varWords
[2],
1907 pmbox
->un
.varWords
[3],
1908 pmbox
->un
.varWords
[4],
1909 pmbox
->un
.varWords
[5],
1910 pmbox
->un
.varWords
[6],
1911 pmbox
->un
.varWords
[7]);
1914 pmb
->mbox_cmpl(phba
,pmb
);
1920 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
1921 * @phba: Pointer to HBA context object.
1922 * @pring: Pointer to driver SLI ring object.
1925 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
1926 * is set in the tag the buffer is posted for a particular exchange,
1927 * the function will return the buffer without replacing the buffer.
1928 * If the buffer is for unsolicited ELS or CT traffic, this function
1929 * returns the buffer and also posts another buffer to the firmware.
1931 static struct lpfc_dmabuf
*
1932 lpfc_sli_get_buff(struct lpfc_hba
*phba
,
1933 struct lpfc_sli_ring
*pring
,
1936 struct hbq_dmabuf
*hbq_entry
;
1938 if (tag
& QUE_BUFTAG_BIT
)
1939 return lpfc_sli_ring_taggedbuf_get(phba
, pring
, tag
);
1940 hbq_entry
= lpfc_sli_hbqbuf_find(phba
, tag
);
1943 return &hbq_entry
->dbuf
;
1947 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
1948 * @phba: Pointer to HBA context object.
1949 * @pring: Pointer to driver SLI ring object.
1950 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
1951 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
1952 * @fch_type: the type for the first frame of the sequence.
1954 * This function is called with no lock held. This function uses the r_ctl and
1955 * type of the received sequence to find the correct callback function to call
1956 * to process the sequence.
1959 lpfc_complete_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1960 struct lpfc_iocbq
*saveq
, uint32_t fch_r_ctl
,
1965 /* unSolicited Responses */
1966 if (pring
->prt
[0].profile
) {
1967 if (pring
->prt
[0].lpfc_sli_rcv_unsol_event
)
1968 (pring
->prt
[0].lpfc_sli_rcv_unsol_event
) (phba
, pring
,
1972 /* We must search, based on rctl / type
1973 for the right routine */
1974 for (i
= 0; i
< pring
->num_mask
; i
++) {
1975 if ((pring
->prt
[i
].rctl
== fch_r_ctl
) &&
1976 (pring
->prt
[i
].type
== fch_type
)) {
1977 if (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
1978 (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
1979 (phba
, pring
, saveq
);
1987 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
1988 * @phba: Pointer to HBA context object.
1989 * @pring: Pointer to driver SLI ring object.
1990 * @saveq: Pointer to the unsolicited iocb.
1992 * This function is called with no lock held by the ring event handler
1993 * when there is an unsolicited iocb posted to the response ring by the
1994 * firmware. This function gets the buffer associated with the iocbs
1995 * and calls the event handler for the ring. This function handles both
1996 * qring buffers and hbq buffers.
1997 * When the function returns 1 the caller can free the iocb object otherwise
1998 * upper layer functions will free the iocb objects.
2001 lpfc_sli_process_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2002 struct lpfc_iocbq
*saveq
)
2006 uint32_t Rctl
, Type
;
2008 struct lpfc_iocbq
*iocbq
;
2009 struct lpfc_dmabuf
*dmzbuf
;
2012 irsp
= &(saveq
->iocb
);
2014 if (irsp
->ulpCommand
== CMD_ASYNC_STATUS
) {
2015 if (pring
->lpfc_sli_rcv_async_status
)
2016 pring
->lpfc_sli_rcv_async_status(phba
, pring
, saveq
);
2018 lpfc_printf_log(phba
,
2021 "0316 Ring %d handler: unexpected "
2022 "ASYNC_STATUS iocb received evt_code "
2025 irsp
->un
.asyncstat
.evt_code
);
2029 if ((irsp
->ulpCommand
== CMD_IOCB_RET_XRI64_CX
) &&
2030 (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)) {
2031 if (irsp
->ulpBdeCount
> 0) {
2032 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2033 irsp
->un
.ulpWord
[3]);
2034 lpfc_in_buf_free(phba
, dmzbuf
);
2037 if (irsp
->ulpBdeCount
> 1) {
2038 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2039 irsp
->unsli3
.sli3Words
[3]);
2040 lpfc_in_buf_free(phba
, dmzbuf
);
2043 if (irsp
->ulpBdeCount
> 2) {
2044 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2045 irsp
->unsli3
.sli3Words
[7]);
2046 lpfc_in_buf_free(phba
, dmzbuf
);
2052 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
2053 if (irsp
->ulpBdeCount
!= 0) {
2054 saveq
->context2
= lpfc_sli_get_buff(phba
, pring
,
2055 irsp
->un
.ulpWord
[3]);
2056 if (!saveq
->context2
)
2057 lpfc_printf_log(phba
,
2060 "0341 Ring %d Cannot find buffer for "
2061 "an unsolicited iocb. tag 0x%x\n",
2063 irsp
->un
.ulpWord
[3]);
2065 if (irsp
->ulpBdeCount
== 2) {
2066 saveq
->context3
= lpfc_sli_get_buff(phba
, pring
,
2067 irsp
->unsli3
.sli3Words
[7]);
2068 if (!saveq
->context3
)
2069 lpfc_printf_log(phba
,
2072 "0342 Ring %d Cannot find buffer for an"
2073 " unsolicited iocb. tag 0x%x\n",
2075 irsp
->unsli3
.sli3Words
[7]);
2077 list_for_each_entry(iocbq
, &saveq
->list
, list
) {
2078 irsp
= &(iocbq
->iocb
);
2079 if (irsp
->ulpBdeCount
!= 0) {
2080 iocbq
->context2
= lpfc_sli_get_buff(phba
, pring
,
2081 irsp
->un
.ulpWord
[3]);
2082 if (!iocbq
->context2
)
2083 lpfc_printf_log(phba
,
2086 "0343 Ring %d Cannot find "
2087 "buffer for an unsolicited iocb"
2088 ". tag 0x%x\n", pring
->ringno
,
2089 irsp
->un
.ulpWord
[3]);
2091 if (irsp
->ulpBdeCount
== 2) {
2092 iocbq
->context3
= lpfc_sli_get_buff(phba
, pring
,
2093 irsp
->unsli3
.sli3Words
[7]);
2094 if (!iocbq
->context3
)
2095 lpfc_printf_log(phba
,
2098 "0344 Ring %d Cannot find "
2099 "buffer for an unsolicited "
2102 irsp
->unsli3
.sli3Words
[7]);
2106 if (irsp
->ulpBdeCount
!= 0 &&
2107 (irsp
->ulpCommand
== CMD_IOCB_RCV_CONT64_CX
||
2108 irsp
->ulpStatus
== IOSTAT_INTERMED_RSP
)) {
2111 /* search continue save q for same XRI */
2112 list_for_each_entry(iocbq
, &pring
->iocb_continue_saveq
, clist
) {
2113 if (iocbq
->iocb
.ulpContext
== saveq
->iocb
.ulpContext
) {
2114 list_add_tail(&saveq
->list
, &iocbq
->list
);
2120 list_add_tail(&saveq
->clist
,
2121 &pring
->iocb_continue_saveq
);
2122 if (saveq
->iocb
.ulpStatus
!= IOSTAT_INTERMED_RSP
) {
2123 list_del_init(&iocbq
->clist
);
2125 irsp
= &(saveq
->iocb
);
2129 if ((irsp
->ulpCommand
== CMD_RCV_ELS_REQ64_CX
) ||
2130 (irsp
->ulpCommand
== CMD_RCV_ELS_REQ_CX
) ||
2131 (irsp
->ulpCommand
== CMD_IOCB_RCV_ELS64_CX
)) {
2132 Rctl
= FC_RCTL_ELS_REQ
;
2135 w5p
= (WORD5
*)&(saveq
->iocb
.un
.ulpWord
[5]);
2136 Rctl
= w5p
->hcsw
.Rctl
;
2137 Type
= w5p
->hcsw
.Type
;
2139 /* Firmware Workaround */
2140 if ((Rctl
== 0) && (pring
->ringno
== LPFC_ELS_RING
) &&
2141 (irsp
->ulpCommand
== CMD_RCV_SEQUENCE64_CX
||
2142 irsp
->ulpCommand
== CMD_IOCB_RCV_SEQ64_CX
)) {
2143 Rctl
= FC_RCTL_ELS_REQ
;
2145 w5p
->hcsw
.Rctl
= Rctl
;
2146 w5p
->hcsw
.Type
= Type
;
2150 if (!lpfc_complete_unsol_iocb(phba
, pring
, saveq
, Rctl
, Type
))
2151 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
2152 "0313 Ring %d handler: unexpected Rctl x%x "
2153 "Type x%x received\n",
2154 pring
->ringno
, Rctl
, Type
);
2160 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2161 * @phba: Pointer to HBA context object.
2162 * @pring: Pointer to driver SLI ring object.
2163 * @prspiocb: Pointer to response iocb object.
2165 * This function looks up the iocb_lookup table to get the command iocb
2166 * corresponding to the given response iocb using the iotag of the
2167 * response iocb. This function is called with the hbalock held.
2168 * This function returns the command iocb object if it finds the command
2169 * iocb else returns NULL.
2171 static struct lpfc_iocbq
*
2172 lpfc_sli_iocbq_lookup(struct lpfc_hba
*phba
,
2173 struct lpfc_sli_ring
*pring
,
2174 struct lpfc_iocbq
*prspiocb
)
2176 struct lpfc_iocbq
*cmd_iocb
= NULL
;
2179 iotag
= prspiocb
->iocb
.ulpIoTag
;
2181 if (iotag
!= 0 && iotag
<= phba
->sli
.last_iotag
) {
2182 cmd_iocb
= phba
->sli
.iocbq_lookup
[iotag
];
2183 list_del_init(&cmd_iocb
->list
);
2184 if (cmd_iocb
->iocb_flag
& LPFC_IO_ON_Q
) {
2185 pring
->txcmplq_cnt
--;
2186 cmd_iocb
->iocb_flag
&= ~LPFC_IO_ON_Q
;
2191 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2192 "0317 iotag x%x is out off "
2193 "range: max iotag x%x wd0 x%x\n",
2194 iotag
, phba
->sli
.last_iotag
,
2195 *(((uint32_t *) &prspiocb
->iocb
) + 7));
2200 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2201 * @phba: Pointer to HBA context object.
2202 * @pring: Pointer to driver SLI ring object.
2205 * This function looks up the iocb_lookup table to get the command iocb
2206 * corresponding to the given iotag. This function is called with the
2208 * This function returns the command iocb object if it finds the command
2209 * iocb else returns NULL.
2211 static struct lpfc_iocbq
*
2212 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba
*phba
,
2213 struct lpfc_sli_ring
*pring
, uint16_t iotag
)
2215 struct lpfc_iocbq
*cmd_iocb
;
2217 if (iotag
!= 0 && iotag
<= phba
->sli
.last_iotag
) {
2218 cmd_iocb
= phba
->sli
.iocbq_lookup
[iotag
];
2219 list_del_init(&cmd_iocb
->list
);
2220 if (cmd_iocb
->iocb_flag
& LPFC_IO_ON_Q
) {
2221 cmd_iocb
->iocb_flag
&= ~LPFC_IO_ON_Q
;
2222 pring
->txcmplq_cnt
--;
2227 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2228 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2229 iotag
, phba
->sli
.last_iotag
);
2234 * lpfc_sli_process_sol_iocb - process solicited iocb completion
2235 * @phba: Pointer to HBA context object.
2236 * @pring: Pointer to driver SLI ring object.
2237 * @saveq: Pointer to the response iocb to be processed.
2239 * This function is called by the ring event handler for non-fcp
2240 * rings when there is a new response iocb in the response ring.
2241 * The caller is not required to hold any locks. This function
2242 * gets the command iocb associated with the response iocb and
2243 * calls the completion handler for the command iocb. If there
2244 * is no completion handler, the function will free the resources
2245 * associated with command iocb. If the response iocb is for
2246 * an already aborted command iocb, the status of the completion
2247 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2248 * This function always returns 1.
2251 lpfc_sli_process_sol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2252 struct lpfc_iocbq
*saveq
)
2254 struct lpfc_iocbq
*cmdiocbp
;
2256 unsigned long iflag
;
2258 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2259 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2260 cmdiocbp
= lpfc_sli_iocbq_lookup(phba
, pring
, saveq
);
2261 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2264 if (cmdiocbp
->iocb_cmpl
) {
2266 * If an ELS command failed send an event to mgmt
2269 if (saveq
->iocb
.ulpStatus
&&
2270 (pring
->ringno
== LPFC_ELS_RING
) &&
2271 (cmdiocbp
->iocb
.ulpCommand
==
2272 CMD_ELS_REQUEST64_CR
))
2273 lpfc_send_els_failure_event(phba
,
2277 * Post all ELS completions to the worker thread.
2278 * All other are passed to the completion callback.
2280 if (pring
->ringno
== LPFC_ELS_RING
) {
2281 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
2282 (cmdiocbp
->iocb_flag
&
2283 LPFC_DRIVER_ABORTED
)) {
2284 spin_lock_irqsave(&phba
->hbalock
,
2286 cmdiocbp
->iocb_flag
&=
2287 ~LPFC_DRIVER_ABORTED
;
2288 spin_unlock_irqrestore(&phba
->hbalock
,
2290 saveq
->iocb
.ulpStatus
=
2291 IOSTAT_LOCAL_REJECT
;
2292 saveq
->iocb
.un
.ulpWord
[4] =
2295 /* Firmware could still be in progress
2296 * of DMAing payload, so don't free data
2297 * buffer till after a hbeat.
2299 spin_lock_irqsave(&phba
->hbalock
,
2301 saveq
->iocb_flag
|= LPFC_DELAY_MEM_FREE
;
2302 spin_unlock_irqrestore(&phba
->hbalock
,
2305 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2306 if (saveq
->iocb_flag
&
2307 LPFC_EXCHANGE_BUSY
) {
2308 /* Set cmdiocb flag for the
2309 * exchange busy so sgl (xri)
2310 * will not be released until
2311 * the abort xri is received
2315 &phba
->hbalock
, iflag
);
2316 cmdiocbp
->iocb_flag
|=
2318 spin_unlock_irqrestore(
2319 &phba
->hbalock
, iflag
);
2321 if (cmdiocbp
->iocb_flag
&
2322 LPFC_DRIVER_ABORTED
) {
2324 * Clear LPFC_DRIVER_ABORTED
2325 * bit in case it was driver
2329 &phba
->hbalock
, iflag
);
2330 cmdiocbp
->iocb_flag
&=
2331 ~LPFC_DRIVER_ABORTED
;
2332 spin_unlock_irqrestore(
2333 &phba
->hbalock
, iflag
);
2334 cmdiocbp
->iocb
.ulpStatus
=
2335 IOSTAT_LOCAL_REJECT
;
2336 cmdiocbp
->iocb
.un
.ulpWord
[4] =
2337 IOERR_ABORT_REQUESTED
;
2339 * For SLI4, irsiocb contains
2340 * NO_XRI in sli_xritag, it
2341 * shall not affect releasing
2342 * sgl (xri) process.
2344 saveq
->iocb
.ulpStatus
=
2345 IOSTAT_LOCAL_REJECT
;
2346 saveq
->iocb
.un
.ulpWord
[4] =
2349 &phba
->hbalock
, iflag
);
2351 LPFC_DELAY_MEM_FREE
;
2352 spin_unlock_irqrestore(
2353 &phba
->hbalock
, iflag
);
2357 (cmdiocbp
->iocb_cmpl
) (phba
, cmdiocbp
, saveq
);
2359 lpfc_sli_release_iocbq(phba
, cmdiocbp
);
2362 * Unknown initiating command based on the response iotag.
2363 * This could be the case on the ELS ring because of
2366 if (pring
->ringno
!= LPFC_ELS_RING
) {
2368 * Ring <ringno> handler: unexpected completion IoTag
2371 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
2372 "0322 Ring %d handler: "
2373 "unexpected completion IoTag x%x "
2374 "Data: x%x x%x x%x x%x\n",
2376 saveq
->iocb
.ulpIoTag
,
2377 saveq
->iocb
.ulpStatus
,
2378 saveq
->iocb
.un
.ulpWord
[4],
2379 saveq
->iocb
.ulpCommand
,
2380 saveq
->iocb
.ulpContext
);
2388 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2389 * @phba: Pointer to HBA context object.
2390 * @pring: Pointer to driver SLI ring object.
2392 * This function is called from the iocb ring event handlers when
2393 * put pointer is ahead of the get pointer for a ring. This function signal
2394 * an error attention condition to the worker thread and the worker
2395 * thread will transition the HBA to offline state.
2398 lpfc_sli_rsp_pointers_error(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
2400 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
2402 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2403 * rsp ring <portRspMax>
2405 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2406 "0312 Ring %d handler: portRspPut %d "
2407 "is bigger than rsp ring %d\n",
2408 pring
->ringno
, le32_to_cpu(pgp
->rspPutInx
),
2411 phba
->link_state
= LPFC_HBA_ERROR
;
2414 * All error attention handlers are posted to
2417 phba
->work_ha
|= HA_ERATT
;
2418 phba
->work_hs
= HS_FFER3
;
2420 lpfc_worker_wake_up(phba
);
2426 * lpfc_poll_eratt - Error attention polling timer timeout handler
2427 * @ptr: Pointer to address of HBA context object.
2429 * This function is invoked by the Error Attention polling timer when the
2430 * timer times out. It will check the SLI Error Attention register for
2431 * possible attention events. If so, it will post an Error Attention event
2432 * and wake up worker thread to process it. Otherwise, it will set up the
2433 * Error Attention polling timer for the next poll.
2435 void lpfc_poll_eratt(unsigned long ptr
)
2437 struct lpfc_hba
*phba
;
2440 phba
= (struct lpfc_hba
*)ptr
;
2442 /* Check chip HA register for error event */
2443 eratt
= lpfc_sli_check_eratt(phba
);
2446 /* Tell the worker thread there is work to do */
2447 lpfc_worker_wake_up(phba
);
2449 /* Restart the timer for next eratt poll */
2450 mod_timer(&phba
->eratt_poll
, jiffies
+
2451 HZ
* LPFC_ERATT_POLL_INTERVAL
);
2457 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2458 * @phba: Pointer to HBA context object.
2459 * @pring: Pointer to driver SLI ring object.
2460 * @mask: Host attention register mask for this ring.
2462 * This function is called from the interrupt context when there is a ring
2463 * event for the fcp ring. The caller does not hold any lock.
2464 * The function processes each response iocb in the response ring until it
2465 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2466 * LE bit set. The function will call the completion handler of the command iocb
2467 * if the response iocb indicates a completion for a command iocb or it is
2468 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2469 * function if this is an unsolicited iocb.
2470 * This routine presumes LPFC_FCP_RING handling and doesn't bother
2471 * to check it explicitly.
2474 lpfc_sli_handle_fast_ring_event(struct lpfc_hba
*phba
,
2475 struct lpfc_sli_ring
*pring
, uint32_t mask
)
2477 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
2478 IOCB_t
*irsp
= NULL
;
2479 IOCB_t
*entry
= NULL
;
2480 struct lpfc_iocbq
*cmdiocbq
= NULL
;
2481 struct lpfc_iocbq rspiocbq
;
2483 uint32_t portRspPut
, portRspMax
;
2485 lpfc_iocb_type type
;
2486 unsigned long iflag
;
2487 uint32_t rsp_cmpl
= 0;
2489 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2490 pring
->stats
.iocb_event
++;
2493 * The next available response entry should never exceed the maximum
2494 * entries. If it does, treat it as an adapter hardware error.
2496 portRspMax
= pring
->numRiocb
;
2497 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
2498 if (unlikely(portRspPut
>= portRspMax
)) {
2499 lpfc_sli_rsp_pointers_error(phba
, pring
);
2500 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2503 if (phba
->fcp_ring_in_use
) {
2504 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2507 phba
->fcp_ring_in_use
= 1;
2510 while (pring
->rspidx
!= portRspPut
) {
2512 * Fetch an entry off the ring and copy it into a local data
2513 * structure. The copy involves a byte-swap since the
2514 * network byte order and pci byte orders are different.
2516 entry
= lpfc_resp_iocb(phba
, pring
);
2517 phba
->last_completion_time
= jiffies
;
2519 if (++pring
->rspidx
>= portRspMax
)
2522 lpfc_sli_pcimem_bcopy((uint32_t *) entry
,
2523 (uint32_t *) &rspiocbq
.iocb
,
2524 phba
->iocb_rsp_size
);
2525 INIT_LIST_HEAD(&(rspiocbq
.list
));
2526 irsp
= &rspiocbq
.iocb
;
2528 type
= lpfc_sli_iocb_cmd_type(irsp
->ulpCommand
& CMD_IOCB_MASK
);
2529 pring
->stats
.iocb_rsp
++;
2532 if (unlikely(irsp
->ulpStatus
)) {
2534 * If resource errors reported from HBA, reduce
2535 * queuedepths of the SCSI device.
2537 if ((irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
2538 (irsp
->un
.ulpWord
[4] == IOERR_NO_RESOURCES
)) {
2539 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2540 phba
->lpfc_rampdown_queue_depth(phba
);
2541 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2544 /* Rsp ring <ringno> error: IOCB */
2545 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
2546 "0336 Rsp Ring %d error: IOCB Data: "
2547 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2549 irsp
->un
.ulpWord
[0],
2550 irsp
->un
.ulpWord
[1],
2551 irsp
->un
.ulpWord
[2],
2552 irsp
->un
.ulpWord
[3],
2553 irsp
->un
.ulpWord
[4],
2554 irsp
->un
.ulpWord
[5],
2555 *(uint32_t *)&irsp
->un1
,
2556 *((uint32_t *)&irsp
->un1
+ 1));
2560 case LPFC_ABORT_IOCB
:
2563 * Idle exchange closed via ABTS from port. No iocb
2564 * resources need to be recovered.
2566 if (unlikely(irsp
->ulpCommand
== CMD_XRI_ABORTED_CX
)) {
2567 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
2568 "0333 IOCB cmd 0x%x"
2569 " processed. Skipping"
2575 cmdiocbq
= lpfc_sli_iocbq_lookup(phba
, pring
,
2577 if (unlikely(!cmdiocbq
))
2579 if (cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
)
2580 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
2581 if (cmdiocbq
->iocb_cmpl
) {
2582 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2583 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
,
2585 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2588 case LPFC_UNSOL_IOCB
:
2589 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2590 lpfc_sli_process_unsol_iocb(phba
, pring
, &rspiocbq
);
2591 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2594 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
2595 char adaptermsg
[LPFC_MAX_ADPTMSG
];
2596 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
2597 memcpy(&adaptermsg
[0], (uint8_t *) irsp
,
2599 dev_warn(&((phba
->pcidev
)->dev
),
2601 phba
->brd_no
, adaptermsg
);
2603 /* Unknown IOCB command */
2604 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2605 "0334 Unknown IOCB command "
2606 "Data: x%x, x%x x%x x%x x%x\n",
2607 type
, irsp
->ulpCommand
,
2616 * The response IOCB has been processed. Update the ring
2617 * pointer in SLIM. If the port response put pointer has not
2618 * been updated, sync the pgp->rspPutInx and fetch the new port
2619 * response put pointer.
2621 writel(pring
->rspidx
, &phba
->host_gp
[pring
->ringno
].rspGetInx
);
2623 if (pring
->rspidx
== portRspPut
)
2624 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
2627 if ((rsp_cmpl
> 0) && (mask
& HA_R0RE_REQ
)) {
2628 pring
->stats
.iocb_rsp_full
++;
2629 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
2630 writel(status
, phba
->CAregaddr
);
2631 readl(phba
->CAregaddr
);
2633 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
2634 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
2635 pring
->stats
.iocb_cmd_empty
++;
2637 /* Force update of the local copy of cmdGetInx */
2638 pring
->local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
2639 lpfc_sli_resume_iocb(phba
, pring
);
2641 if ((pring
->lpfc_sli_cmd_available
))
2642 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
2646 phba
->fcp_ring_in_use
= 0;
2647 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2652 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
2653 * @phba: Pointer to HBA context object.
2654 * @pring: Pointer to driver SLI ring object.
2655 * @rspiocbp: Pointer to driver response IOCB object.
2657 * This function is called from the worker thread when there is a slow-path
2658 * response IOCB to process. This function chains all the response iocbs until
2659 * seeing the iocb with the LE bit set. The function will call
2660 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
2661 * completion of a command iocb. The function will call the
2662 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
2663 * The function frees the resources or calls the completion handler if this
2664 * iocb is an abort completion. The function returns NULL when the response
2665 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
2666 * this function shall chain the iocb on to the iocb_continueq and return the
2667 * response iocb passed in.
2669 static struct lpfc_iocbq
*
2670 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2671 struct lpfc_iocbq
*rspiocbp
)
2673 struct lpfc_iocbq
*saveq
;
2674 struct lpfc_iocbq
*cmdiocbp
;
2675 struct lpfc_iocbq
*next_iocb
;
2676 IOCB_t
*irsp
= NULL
;
2677 uint32_t free_saveq
;
2678 uint8_t iocb_cmd_type
;
2679 lpfc_iocb_type type
;
2680 unsigned long iflag
;
2683 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2684 /* First add the response iocb to the countinueq list */
2685 list_add_tail(&rspiocbp
->list
, &(pring
->iocb_continueq
));
2686 pring
->iocb_continueq_cnt
++;
2688 /* Now, determine whetehr the list is completed for processing */
2689 irsp
= &rspiocbp
->iocb
;
2692 * By default, the driver expects to free all resources
2693 * associated with this iocb completion.
2696 saveq
= list_get_first(&pring
->iocb_continueq
,
2697 struct lpfc_iocbq
, list
);
2698 irsp
= &(saveq
->iocb
);
2699 list_del_init(&pring
->iocb_continueq
);
2700 pring
->iocb_continueq_cnt
= 0;
2702 pring
->stats
.iocb_rsp
++;
2705 * If resource errors reported from HBA, reduce
2706 * queuedepths of the SCSI device.
2708 if ((irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
2709 (irsp
->un
.ulpWord
[4] == IOERR_NO_RESOURCES
)) {
2710 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2711 phba
->lpfc_rampdown_queue_depth(phba
);
2712 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2715 if (irsp
->ulpStatus
) {
2716 /* Rsp ring <ringno> error: IOCB */
2717 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
2718 "0328 Rsp Ring %d error: "
2723 "x%x x%x x%x x%x\n",
2725 irsp
->un
.ulpWord
[0],
2726 irsp
->un
.ulpWord
[1],
2727 irsp
->un
.ulpWord
[2],
2728 irsp
->un
.ulpWord
[3],
2729 irsp
->un
.ulpWord
[4],
2730 irsp
->un
.ulpWord
[5],
2731 *(((uint32_t *) irsp
) + 6),
2732 *(((uint32_t *) irsp
) + 7),
2733 *(((uint32_t *) irsp
) + 8),
2734 *(((uint32_t *) irsp
) + 9),
2735 *(((uint32_t *) irsp
) + 10),
2736 *(((uint32_t *) irsp
) + 11),
2737 *(((uint32_t *) irsp
) + 12),
2738 *(((uint32_t *) irsp
) + 13),
2739 *(((uint32_t *) irsp
) + 14),
2740 *(((uint32_t *) irsp
) + 15));
2744 * Fetch the IOCB command type and call the correct completion
2745 * routine. Solicited and Unsolicited IOCBs on the ELS ring
2746 * get freed back to the lpfc_iocb_list by the discovery
2749 iocb_cmd_type
= irsp
->ulpCommand
& CMD_IOCB_MASK
;
2750 type
= lpfc_sli_iocb_cmd_type(iocb_cmd_type
);
2753 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2754 rc
= lpfc_sli_process_sol_iocb(phba
, pring
, saveq
);
2755 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2758 case LPFC_UNSOL_IOCB
:
2759 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2760 rc
= lpfc_sli_process_unsol_iocb(phba
, pring
, saveq
);
2761 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2766 case LPFC_ABORT_IOCB
:
2768 if (irsp
->ulpCommand
!= CMD_XRI_ABORTED_CX
)
2769 cmdiocbp
= lpfc_sli_iocbq_lookup(phba
, pring
,
2772 /* Call the specified completion routine */
2773 if (cmdiocbp
->iocb_cmpl
) {
2774 spin_unlock_irqrestore(&phba
->hbalock
,
2776 (cmdiocbp
->iocb_cmpl
)(phba
, cmdiocbp
,
2778 spin_lock_irqsave(&phba
->hbalock
,
2781 __lpfc_sli_release_iocbq(phba
,
2786 case LPFC_UNKNOWN_IOCB
:
2787 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
2788 char adaptermsg
[LPFC_MAX_ADPTMSG
];
2789 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
2790 memcpy(&adaptermsg
[0], (uint8_t *)irsp
,
2792 dev_warn(&((phba
->pcidev
)->dev
),
2794 phba
->brd_no
, adaptermsg
);
2796 /* Unknown IOCB command */
2797 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2798 "0335 Unknown IOCB "
2799 "command Data: x%x "
2810 list_for_each_entry_safe(rspiocbp
, next_iocb
,
2811 &saveq
->list
, list
) {
2812 list_del(&rspiocbp
->list
);
2813 __lpfc_sli_release_iocbq(phba
, rspiocbp
);
2815 __lpfc_sli_release_iocbq(phba
, saveq
);
2819 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2824 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
2825 * @phba: Pointer to HBA context object.
2826 * @pring: Pointer to driver SLI ring object.
2827 * @mask: Host attention register mask for this ring.
2829 * This routine wraps the actual slow_ring event process routine from the
2830 * API jump table function pointer from the lpfc_hba struct.
2833 lpfc_sli_handle_slow_ring_event(struct lpfc_hba
*phba
,
2834 struct lpfc_sli_ring
*pring
, uint32_t mask
)
2836 phba
->lpfc_sli_handle_slow_ring_event(phba
, pring
, mask
);
2840 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
2841 * @phba: Pointer to HBA context object.
2842 * @pring: Pointer to driver SLI ring object.
2843 * @mask: Host attention register mask for this ring.
2845 * This function is called from the worker thread when there is a ring event
2846 * for non-fcp rings. The caller does not hold any lock. The function will
2847 * remove each response iocb in the response ring and calls the handle
2848 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2851 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba
*phba
,
2852 struct lpfc_sli_ring
*pring
, uint32_t mask
)
2854 struct lpfc_pgp
*pgp
;
2856 IOCB_t
*irsp
= NULL
;
2857 struct lpfc_iocbq
*rspiocbp
= NULL
;
2858 uint32_t portRspPut
, portRspMax
;
2859 unsigned long iflag
;
2862 pgp
= &phba
->port_gp
[pring
->ringno
];
2863 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2864 pring
->stats
.iocb_event
++;
2867 * The next available response entry should never exceed the maximum
2868 * entries. If it does, treat it as an adapter hardware error.
2870 portRspMax
= pring
->numRiocb
;
2871 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
2872 if (portRspPut
>= portRspMax
) {
2874 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2875 * rsp ring <portRspMax>
2877 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2878 "0303 Ring %d handler: portRspPut %d "
2879 "is bigger than rsp ring %d\n",
2880 pring
->ringno
, portRspPut
, portRspMax
);
2882 phba
->link_state
= LPFC_HBA_ERROR
;
2883 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2885 phba
->work_hs
= HS_FFER3
;
2886 lpfc_handle_eratt(phba
);
2892 while (pring
->rspidx
!= portRspPut
) {
2894 * Build a completion list and call the appropriate handler.
2895 * The process is to get the next available response iocb, get
2896 * a free iocb from the list, copy the response data into the
2897 * free iocb, insert to the continuation list, and update the
2898 * next response index to slim. This process makes response
2899 * iocb's in the ring available to DMA as fast as possible but
2900 * pays a penalty for a copy operation. Since the iocb is
2901 * only 32 bytes, this penalty is considered small relative to
2902 * the PCI reads for register values and a slim write. When
2903 * the ulpLe field is set, the entire Command has been
2906 entry
= lpfc_resp_iocb(phba
, pring
);
2908 phba
->last_completion_time
= jiffies
;
2909 rspiocbp
= __lpfc_sli_get_iocbq(phba
);
2910 if (rspiocbp
== NULL
) {
2911 printk(KERN_ERR
"%s: out of buffers! Failing "
2912 "completion.\n", __func__
);
2916 lpfc_sli_pcimem_bcopy(entry
, &rspiocbp
->iocb
,
2917 phba
->iocb_rsp_size
);
2918 irsp
= &rspiocbp
->iocb
;
2920 if (++pring
->rspidx
>= portRspMax
)
2923 if (pring
->ringno
== LPFC_ELS_RING
) {
2924 lpfc_debugfs_slow_ring_trc(phba
,
2925 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
2926 *(((uint32_t *) irsp
) + 4),
2927 *(((uint32_t *) irsp
) + 6),
2928 *(((uint32_t *) irsp
) + 7));
2931 writel(pring
->rspidx
, &phba
->host_gp
[pring
->ringno
].rspGetInx
);
2933 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2934 /* Handle the response IOCB */
2935 rspiocbp
= lpfc_sli_sp_handle_rspiocb(phba
, pring
, rspiocbp
);
2936 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2939 * If the port response put pointer has not been updated, sync
2940 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
2941 * response put pointer.
2943 if (pring
->rspidx
== portRspPut
) {
2944 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
2946 } /* while (pring->rspidx != portRspPut) */
2948 if ((rspiocbp
!= NULL
) && (mask
& HA_R0RE_REQ
)) {
2949 /* At least one response entry has been freed */
2950 pring
->stats
.iocb_rsp_full
++;
2951 /* SET RxRE_RSP in Chip Att register */
2952 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
2953 writel(status
, phba
->CAregaddr
);
2954 readl(phba
->CAregaddr
); /* flush */
2956 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
2957 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
2958 pring
->stats
.iocb_cmd_empty
++;
2960 /* Force update of the local copy of cmdGetInx */
2961 pring
->local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
2962 lpfc_sli_resume_iocb(phba
, pring
);
2964 if ((pring
->lpfc_sli_cmd_available
))
2965 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
2969 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2974 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
2975 * @phba: Pointer to HBA context object.
2976 * @pring: Pointer to driver SLI ring object.
2977 * @mask: Host attention register mask for this ring.
2979 * This function is called from the worker thread when there is a pending
2980 * ELS response iocb on the driver internal slow-path response iocb worker
2981 * queue. The caller does not hold any lock. The function will remove each
2982 * response iocb from the response worker queue and calls the handle
2983 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2986 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba
*phba
,
2987 struct lpfc_sli_ring
*pring
, uint32_t mask
)
2989 struct lpfc_iocbq
*irspiocbq
;
2990 struct hbq_dmabuf
*dmabuf
;
2991 struct lpfc_cq_event
*cq_event
;
2992 unsigned long iflag
;
2994 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2995 phba
->hba_flag
&= ~HBA_SP_QUEUE_EVT
;
2996 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2997 while (!list_empty(&phba
->sli4_hba
.sp_queue_event
)) {
2998 /* Get the response iocb from the head of work queue */
2999 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3000 list_remove_head(&phba
->sli4_hba
.sp_queue_event
,
3001 cq_event
, struct lpfc_cq_event
, list
);
3002 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3004 switch (bf_get(lpfc_wcqe_c_code
, &cq_event
->cqe
.wcqe_cmpl
)) {
3005 case CQE_CODE_COMPL_WQE
:
3006 irspiocbq
= container_of(cq_event
, struct lpfc_iocbq
,
3008 /* Translate ELS WCQE to response IOCBQ */
3009 irspiocbq
= lpfc_sli4_els_wcqe_to_rspiocbq(phba
,
3012 lpfc_sli_sp_handle_rspiocb(phba
, pring
,
3015 case CQE_CODE_RECEIVE
:
3016 dmabuf
= container_of(cq_event
, struct hbq_dmabuf
,
3018 lpfc_sli4_handle_received_buffer(phba
, dmabuf
);
3027 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3028 * @phba: Pointer to HBA context object.
3029 * @pring: Pointer to driver SLI ring object.
3031 * This function aborts all iocbs in the given ring and frees all the iocb
3032 * objects in txq. This function issues an abort iocb for all the iocb commands
3033 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3034 * the return of this function. The caller is not required to hold any locks.
3037 lpfc_sli_abort_iocb_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
3039 LIST_HEAD(completions
);
3040 struct lpfc_iocbq
*iocb
, *next_iocb
;
3042 if (pring
->ringno
== LPFC_ELS_RING
) {
3043 lpfc_fabric_abort_hba(phba
);
3046 /* Error everything on txq and txcmplq
3049 spin_lock_irq(&phba
->hbalock
);
3050 list_splice_init(&pring
->txq
, &completions
);
3053 /* Next issue ABTS for everything on the txcmplq */
3054 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
)
3055 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
3057 spin_unlock_irq(&phba
->hbalock
);
3059 /* Cancel all the IOCBs from the completions list */
3060 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
3065 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3066 * @phba: Pointer to HBA context object.
3068 * This function flushes all iocbs in the fcp ring and frees all the iocb
3069 * objects in txq and txcmplq. This function will not issue abort iocbs
3070 * for all the iocb commands in txcmplq, they will just be returned with
3071 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3072 * slot has been permanently disabled.
3075 lpfc_sli_flush_fcp_rings(struct lpfc_hba
*phba
)
3079 struct lpfc_sli
*psli
= &phba
->sli
;
3080 struct lpfc_sli_ring
*pring
;
3082 /* Currently, only one fcp ring */
3083 pring
= &psli
->ring
[psli
->fcp_ring
];
3085 spin_lock_irq(&phba
->hbalock
);
3086 /* Retrieve everything on txq */
3087 list_splice_init(&pring
->txq
, &txq
);
3090 /* Retrieve everything on the txcmplq */
3091 list_splice_init(&pring
->txcmplq
, &txcmplq
);
3092 pring
->txcmplq_cnt
= 0;
3093 spin_unlock_irq(&phba
->hbalock
);
3096 lpfc_sli_cancel_iocbs(phba
, &txq
, IOSTAT_LOCAL_REJECT
,
3099 /* Flush the txcmpq */
3100 lpfc_sli_cancel_iocbs(phba
, &txcmplq
, IOSTAT_LOCAL_REJECT
,
3105 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3106 * @phba: Pointer to HBA context object.
3107 * @mask: Bit mask to be checked.
3109 * This function reads the host status register and compares
3110 * with the provided bit mask to check if HBA completed
3111 * the restart. This function will wait in a loop for the
3112 * HBA to complete restart. If the HBA does not restart within
3113 * 15 iterations, the function will reset the HBA again. The
3114 * function returns 1 when HBA fail to restart otherwise returns
3118 lpfc_sli_brdready_s3(struct lpfc_hba
*phba
, uint32_t mask
)
3124 /* Read the HBA Host Status Register */
3125 status
= readl(phba
->HSregaddr
);
3128 * Check status register every 100ms for 5 retries, then every
3129 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3130 * every 2.5 sec for 4.
3131 * Break our of the loop if errors occurred during init.
3133 while (((status
& mask
) != mask
) &&
3134 !(status
& HS_FFERM
) &&
3146 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
3147 lpfc_sli_brdrestart(phba
);
3149 /* Read the HBA Host Status Register */
3150 status
= readl(phba
->HSregaddr
);
3153 /* Check to see if any errors occurred during init */
3154 if ((status
& HS_FFERM
) || (i
>= 20)) {
3155 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3156 "2751 Adapter failed to restart, "
3157 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3159 readl(phba
->MBslimaddr
+ 0xa8),
3160 readl(phba
->MBslimaddr
+ 0xac));
3161 phba
->link_state
= LPFC_HBA_ERROR
;
3169 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3170 * @phba: Pointer to HBA context object.
3171 * @mask: Bit mask to be checked.
3173 * This function checks the host status register to check if HBA is
3174 * ready. This function will wait in a loop for the HBA to be ready
3175 * If the HBA is not ready , the function will will reset the HBA PCI
3176 * function again. The function returns 1 when HBA fail to be ready
3177 * otherwise returns zero.
3180 lpfc_sli_brdready_s4(struct lpfc_hba
*phba
, uint32_t mask
)
3185 /* Read the HBA Host Status Register */
3186 status
= lpfc_sli4_post_status_check(phba
);
3189 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
3190 lpfc_sli_brdrestart(phba
);
3191 status
= lpfc_sli4_post_status_check(phba
);
3194 /* Check to see if any errors occurred during init */
3196 phba
->link_state
= LPFC_HBA_ERROR
;
3199 phba
->sli4_hba
.intr_enable
= 0;
3205 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3206 * @phba: Pointer to HBA context object.
3207 * @mask: Bit mask to be checked.
3209 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3210 * from the API jump table function pointer from the lpfc_hba struct.
3213 lpfc_sli_brdready(struct lpfc_hba
*phba
, uint32_t mask
)
3215 return phba
->lpfc_sli_brdready(phba
, mask
);
3218 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3221 * lpfc_reset_barrier - Make HBA ready for HBA reset
3222 * @phba: Pointer to HBA context object.
3224 * This function is called before resetting an HBA. This
3225 * function requests HBA to quiesce DMAs before a reset.
3227 void lpfc_reset_barrier(struct lpfc_hba
*phba
)
3229 uint32_t __iomem
*resp_buf
;
3230 uint32_t __iomem
*mbox_buf
;
3231 volatile uint32_t mbox
;
3236 pci_read_config_byte(phba
->pcidev
, PCI_HEADER_TYPE
, &hdrtype
);
3237 if (hdrtype
!= 0x80 ||
3238 (FC_JEDEC_ID(phba
->vpd
.rev
.biuRev
) != HELIOS_JEDEC_ID
&&
3239 FC_JEDEC_ID(phba
->vpd
.rev
.biuRev
) != THOR_JEDEC_ID
))
3243 * Tell the other part of the chip to suspend temporarily all
3246 resp_buf
= phba
->MBslimaddr
;
3248 /* Disable the error attention */
3249 hc_copy
= readl(phba
->HCregaddr
);
3250 writel((hc_copy
& ~HC_ERINT_ENA
), phba
->HCregaddr
);
3251 readl(phba
->HCregaddr
); /* flush */
3252 phba
->link_flag
|= LS_IGNORE_ERATT
;
3254 if (readl(phba
->HAregaddr
) & HA_ERATT
) {
3255 /* Clear Chip error bit */
3256 writel(HA_ERATT
, phba
->HAregaddr
);
3257 phba
->pport
->stopped
= 1;
3261 ((MAILBOX_t
*)&mbox
)->mbxCommand
= MBX_KILL_BOARD
;
3262 ((MAILBOX_t
*)&mbox
)->mbxOwner
= OWN_CHIP
;
3264 writel(BARRIER_TEST_PATTERN
, (resp_buf
+ 1));
3265 mbox_buf
= phba
->MBslimaddr
;
3266 writel(mbox
, mbox_buf
);
3269 readl(resp_buf
+ 1) != ~(BARRIER_TEST_PATTERN
) && i
< 50; i
++)
3272 if (readl(resp_buf
+ 1) != ~(BARRIER_TEST_PATTERN
)) {
3273 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
||
3274 phba
->pport
->stopped
)
3280 ((MAILBOX_t
*)&mbox
)->mbxOwner
= OWN_HOST
;
3281 for (i
= 0; readl(resp_buf
) != mbox
&& i
< 500; i
++)
3286 while (!(readl(phba
->HAregaddr
) & HA_ERATT
) && ++i
< 500)
3289 if (readl(phba
->HAregaddr
) & HA_ERATT
) {
3290 writel(HA_ERATT
, phba
->HAregaddr
);
3291 phba
->pport
->stopped
= 1;
3295 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
3296 writel(hc_copy
, phba
->HCregaddr
);
3297 readl(phba
->HCregaddr
); /* flush */
3301 * lpfc_sli_brdkill - Issue a kill_board mailbox command
3302 * @phba: Pointer to HBA context object.
3304 * This function issues a kill_board mailbox command and waits for
3305 * the error attention interrupt. This function is called for stopping
3306 * the firmware processing. The caller is not required to hold any
3307 * locks. This function calls lpfc_hba_down_post function to free
3308 * any pending commands after the kill. The function will return 1 when it
3309 * fails to kill the board else will return 0.
3312 lpfc_sli_brdkill(struct lpfc_hba
*phba
)
3314 struct lpfc_sli
*psli
;
3324 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3325 "0329 Kill HBA Data: x%x x%x\n",
3326 phba
->pport
->port_state
, psli
->sli_flag
);
3328 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3332 /* Disable the error attention */
3333 spin_lock_irq(&phba
->hbalock
);
3334 status
= readl(phba
->HCregaddr
);
3335 status
&= ~HC_ERINT_ENA
;
3336 writel(status
, phba
->HCregaddr
);
3337 readl(phba
->HCregaddr
); /* flush */
3338 phba
->link_flag
|= LS_IGNORE_ERATT
;
3339 spin_unlock_irq(&phba
->hbalock
);
3341 lpfc_kill_board(phba
, pmb
);
3342 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3343 retval
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
3345 if (retval
!= MBX_SUCCESS
) {
3346 if (retval
!= MBX_BUSY
)
3347 mempool_free(pmb
, phba
->mbox_mem_pool
);
3348 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3349 "2752 KILL_BOARD command failed retval %d\n",
3351 spin_lock_irq(&phba
->hbalock
);
3352 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
3353 spin_unlock_irq(&phba
->hbalock
);
3357 spin_lock_irq(&phba
->hbalock
);
3358 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
3359 spin_unlock_irq(&phba
->hbalock
);
3361 mempool_free(pmb
, phba
->mbox_mem_pool
);
3363 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3364 * attention every 100ms for 3 seconds. If we don't get ERATT after
3365 * 3 seconds we still set HBA_ERROR state because the status of the
3366 * board is now undefined.
3368 ha_copy
= readl(phba
->HAregaddr
);
3370 while ((i
++ < 30) && !(ha_copy
& HA_ERATT
)) {
3372 ha_copy
= readl(phba
->HAregaddr
);
3375 del_timer_sync(&psli
->mbox_tmo
);
3376 if (ha_copy
& HA_ERATT
) {
3377 writel(HA_ERATT
, phba
->HAregaddr
);
3378 phba
->pport
->stopped
= 1;
3380 spin_lock_irq(&phba
->hbalock
);
3381 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
3382 psli
->mbox_active
= NULL
;
3383 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
3384 spin_unlock_irq(&phba
->hbalock
);
3386 lpfc_hba_down_post(phba
);
3387 phba
->link_state
= LPFC_HBA_ERROR
;
3389 return ha_copy
& HA_ERATT
? 0 : 1;
3393 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
3394 * @phba: Pointer to HBA context object.
3396 * This function resets the HBA by writing HC_INITFF to the control
3397 * register. After the HBA resets, this function resets all the iocb ring
3398 * indices. This function disables PCI layer parity checking during
3400 * This function returns 0 always.
3401 * The caller is not required to hold any locks.
3404 lpfc_sli_brdreset(struct lpfc_hba
*phba
)
3406 struct lpfc_sli
*psli
;
3407 struct lpfc_sli_ring
*pring
;
3414 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3415 "0325 Reset HBA Data: x%x x%x\n",
3416 phba
->pport
->port_state
, psli
->sli_flag
);
3418 /* perform board reset */
3419 phba
->fc_eventTag
= 0;
3420 phba
->link_events
= 0;
3421 phba
->pport
->fc_myDID
= 0;
3422 phba
->pport
->fc_prevDID
= 0;
3424 /* Turn off parity checking and serr during the physical reset */
3425 pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &cfg_value
);
3426 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
,
3428 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
3430 psli
->sli_flag
&= ~(LPFC_SLI_ACTIVE
| LPFC_PROCESS_LA
);
3432 /* Now toggle INITFF bit in the Host Control Register */
3433 writel(HC_INITFF
, phba
->HCregaddr
);
3435 readl(phba
->HCregaddr
); /* flush */
3436 writel(0, phba
->HCregaddr
);
3437 readl(phba
->HCregaddr
); /* flush */
3439 /* Restore PCI cmd register */
3440 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, cfg_value
);
3442 /* Initialize relevant SLI info */
3443 for (i
= 0; i
< psli
->num_rings
; i
++) {
3444 pring
= &psli
->ring
[i
];
3447 pring
->next_cmdidx
= 0;
3448 pring
->local_getidx
= 0;
3450 pring
->missbufcnt
= 0;
3453 phba
->link_state
= LPFC_WARM_START
;
3458 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3459 * @phba: Pointer to HBA context object.
3461 * This function resets a SLI4 HBA. This function disables PCI layer parity
3462 * checking during resets the device. The caller is not required to hold
3465 * This function returns 0 always.
3468 lpfc_sli4_brdreset(struct lpfc_hba
*phba
)
3470 struct lpfc_sli
*psli
= &phba
->sli
;
3475 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3476 "0295 Reset HBA Data: x%x x%x\n",
3477 phba
->pport
->port_state
, psli
->sli_flag
);
3479 /* perform board reset */
3480 phba
->fc_eventTag
= 0;
3481 phba
->link_events
= 0;
3482 phba
->pport
->fc_myDID
= 0;
3483 phba
->pport
->fc_prevDID
= 0;
3485 /* Turn off parity checking and serr during the physical reset */
3486 pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &cfg_value
);
3487 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
,
3489 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
3491 spin_lock_irq(&phba
->hbalock
);
3492 psli
->sli_flag
&= ~(LPFC_PROCESS_LA
);
3493 phba
->fcf
.fcf_flag
= 0;
3494 /* Clean up the child queue list for the CQs */
3495 list_del_init(&phba
->sli4_hba
.mbx_wq
->list
);
3496 list_del_init(&phba
->sli4_hba
.els_wq
->list
);
3497 list_del_init(&phba
->sli4_hba
.hdr_rq
->list
);
3498 list_del_init(&phba
->sli4_hba
.dat_rq
->list
);
3499 list_del_init(&phba
->sli4_hba
.mbx_cq
->list
);
3500 list_del_init(&phba
->sli4_hba
.els_cq
->list
);
3501 for (qindx
= 0; qindx
< phba
->cfg_fcp_wq_count
; qindx
++)
3502 list_del_init(&phba
->sli4_hba
.fcp_wq
[qindx
]->list
);
3503 for (qindx
= 0; qindx
< phba
->cfg_fcp_eq_count
; qindx
++)
3504 list_del_init(&phba
->sli4_hba
.fcp_cq
[qindx
]->list
);
3505 spin_unlock_irq(&phba
->hbalock
);
3507 /* Now physically reset the device */
3508 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3509 "0389 Performing PCI function reset!\n");
3510 /* Perform FCoE PCI function reset */
3511 lpfc_pci_function_reset(phba
);
3517 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
3518 * @phba: Pointer to HBA context object.
3520 * This function is called in the SLI initialization code path to
3521 * restart the HBA. The caller is not required to hold any lock.
3522 * This function writes MBX_RESTART mailbox command to the SLIM and
3523 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
3524 * function to free any pending commands. The function enables
3525 * POST only during the first initialization. The function returns zero.
3526 * The function does not guarantee completion of MBX_RESTART mailbox
3527 * command before the return of this function.
3530 lpfc_sli_brdrestart_s3(struct lpfc_hba
*phba
)
3533 struct lpfc_sli
*psli
;
3534 volatile uint32_t word0
;
3535 void __iomem
*to_slim
;
3536 uint32_t hba_aer_enabled
;
3538 spin_lock_irq(&phba
->hbalock
);
3540 /* Take PCIe device Advanced Error Reporting (AER) state */
3541 hba_aer_enabled
= phba
->hba_flag
& HBA_AER_ENABLED
;
3546 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3547 "0337 Restart HBA Data: x%x x%x\n",
3548 phba
->pport
->port_state
, psli
->sli_flag
);
3551 mb
= (MAILBOX_t
*) &word0
;
3552 mb
->mbxCommand
= MBX_RESTART
;
3555 lpfc_reset_barrier(phba
);
3557 to_slim
= phba
->MBslimaddr
;
3558 writel(*(uint32_t *) mb
, to_slim
);
3559 readl(to_slim
); /* flush */
3561 /* Only skip post after fc_ffinit is completed */
3562 if (phba
->pport
->port_state
)
3563 word0
= 1; /* This is really setting up word1 */
3565 word0
= 0; /* This is really setting up word1 */
3566 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
3567 writel(*(uint32_t *) mb
, to_slim
);
3568 readl(to_slim
); /* flush */
3570 lpfc_sli_brdreset(phba
);
3571 phba
->pport
->stopped
= 0;
3572 phba
->link_state
= LPFC_INIT_START
;
3574 spin_unlock_irq(&phba
->hbalock
);
3576 memset(&psli
->lnk_stat_offsets
, 0, sizeof(psli
->lnk_stat_offsets
));
3577 psli
->stats_start
= get_seconds();
3579 /* Give the INITFF and Post time to settle. */
3582 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3583 if (hba_aer_enabled
)
3584 pci_disable_pcie_error_reporting(phba
->pcidev
);
3586 lpfc_hba_down_post(phba
);
3592 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3593 * @phba: Pointer to HBA context object.
3595 * This function is called in the SLI initialization code path to restart
3596 * a SLI4 HBA. The caller is not required to hold any lock.
3597 * At the end of the function, it calls lpfc_hba_down_post function to
3598 * free any pending commands.
3601 lpfc_sli_brdrestart_s4(struct lpfc_hba
*phba
)
3603 struct lpfc_sli
*psli
= &phba
->sli
;
3604 uint32_t hba_aer_enabled
;
3607 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3608 "0296 Restart HBA Data: x%x x%x\n",
3609 phba
->pport
->port_state
, psli
->sli_flag
);
3611 /* Take PCIe device Advanced Error Reporting (AER) state */
3612 hba_aer_enabled
= phba
->hba_flag
& HBA_AER_ENABLED
;
3614 lpfc_sli4_brdreset(phba
);
3616 spin_lock_irq(&phba
->hbalock
);
3617 phba
->pport
->stopped
= 0;
3618 phba
->link_state
= LPFC_INIT_START
;
3620 spin_unlock_irq(&phba
->hbalock
);
3622 memset(&psli
->lnk_stat_offsets
, 0, sizeof(psli
->lnk_stat_offsets
));
3623 psli
->stats_start
= get_seconds();
3625 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3626 if (hba_aer_enabled
)
3627 pci_disable_pcie_error_reporting(phba
->pcidev
);
3629 lpfc_hba_down_post(phba
);
3635 * lpfc_sli_brdrestart - Wrapper func for restarting hba
3636 * @phba: Pointer to HBA context object.
3638 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
3639 * API jump table function pointer from the lpfc_hba struct.
3642 lpfc_sli_brdrestart(struct lpfc_hba
*phba
)
3644 return phba
->lpfc_sli_brdrestart(phba
);
3648 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
3649 * @phba: Pointer to HBA context object.
3651 * This function is called after a HBA restart to wait for successful
3652 * restart of the HBA. Successful restart of the HBA is indicated by
3653 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
3654 * iteration, the function will restart the HBA again. The function returns
3655 * zero if HBA successfully restarted else returns negative error code.
3658 lpfc_sli_chipset_init(struct lpfc_hba
*phba
)
3660 uint32_t status
, i
= 0;
3662 /* Read the HBA Host Status Register */
3663 status
= readl(phba
->HSregaddr
);
3665 /* Check status register to see what current state is */
3667 while ((status
& (HS_FFRDY
| HS_MBRDY
)) != (HS_FFRDY
| HS_MBRDY
)) {
3669 /* Check every 10ms for 10 retries, then every 100ms for 90
3670 * retries, then every 1 sec for 50 retires for a total of
3671 * ~60 seconds before reset the board again and check every
3672 * 1 sec for 50 retries. The up to 60 seconds before the
3673 * board ready is required by the Falcon FIPS zeroization
3674 * complete, and any reset the board in between shall cause
3675 * restart of zeroization, further delay the board ready.
3678 /* Adapter failed to init, timeout, status reg
3680 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3681 "0436 Adapter failed to init, "
3682 "timeout, status reg x%x, "
3683 "FW Data: A8 x%x AC x%x\n", status
,
3684 readl(phba
->MBslimaddr
+ 0xa8),
3685 readl(phba
->MBslimaddr
+ 0xac));
3686 phba
->link_state
= LPFC_HBA_ERROR
;
3690 /* Check to see if any errors occurred during init */
3691 if (status
& HS_FFERM
) {
3692 /* ERROR: During chipset initialization */
3693 /* Adapter failed to init, chipset, status reg
3695 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3696 "0437 Adapter failed to init, "
3697 "chipset, status reg x%x, "
3698 "FW Data: A8 x%x AC x%x\n", status
,
3699 readl(phba
->MBslimaddr
+ 0xa8),
3700 readl(phba
->MBslimaddr
+ 0xac));
3701 phba
->link_state
= LPFC_HBA_ERROR
;
3714 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
3715 lpfc_sli_brdrestart(phba
);
3717 /* Read the HBA Host Status Register */
3718 status
= readl(phba
->HSregaddr
);
3721 /* Check to see if any errors occurred during init */
3722 if (status
& HS_FFERM
) {
3723 /* ERROR: During chipset initialization */
3724 /* Adapter failed to init, chipset, status reg <status> */
3725 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3726 "0438 Adapter failed to init, chipset, "
3728 "FW Data: A8 x%x AC x%x\n", status
,
3729 readl(phba
->MBslimaddr
+ 0xa8),
3730 readl(phba
->MBslimaddr
+ 0xac));
3731 phba
->link_state
= LPFC_HBA_ERROR
;
3735 /* Clear all interrupt enable conditions */
3736 writel(0, phba
->HCregaddr
);
3737 readl(phba
->HCregaddr
); /* flush */
3739 /* setup host attn register */
3740 writel(0xffffffff, phba
->HAregaddr
);
3741 readl(phba
->HAregaddr
); /* flush */
3746 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
3748 * This function calculates and returns the number of HBQs required to be
3752 lpfc_sli_hbq_count(void)
3754 return ARRAY_SIZE(lpfc_hbq_defs
);
3758 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
3760 * This function adds the number of hbq entries in every HBQ to get
3761 * the total number of hbq entries required for the HBA and returns
3765 lpfc_sli_hbq_entry_count(void)
3767 int hbq_count
= lpfc_sli_hbq_count();
3771 for (i
= 0; i
< hbq_count
; ++i
)
3772 count
+= lpfc_hbq_defs
[i
]->entry_count
;
3777 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
3779 * This function calculates amount of memory required for all hbq entries
3780 * to be configured and returns the total memory required.
3783 lpfc_sli_hbq_size(void)
3785 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry
);
3789 * lpfc_sli_hbq_setup - configure and initialize HBQs
3790 * @phba: Pointer to HBA context object.
3792 * This function is called during the SLI initialization to configure
3793 * all the HBQs and post buffers to the HBQ. The caller is not
3794 * required to hold any locks. This function will return zero if successful
3795 * else it will return negative error code.
3798 lpfc_sli_hbq_setup(struct lpfc_hba
*phba
)
3800 int hbq_count
= lpfc_sli_hbq_count();
3804 uint32_t hbq_entry_index
;
3806 /* Get a Mailbox buffer to setup mailbox
3807 * commands for HBA initialization
3809 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3816 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
3817 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
3818 phba
->hbq_in_use
= 1;
3820 hbq_entry_index
= 0;
3821 for (hbqno
= 0; hbqno
< hbq_count
; ++hbqno
) {
3822 phba
->hbqs
[hbqno
].next_hbqPutIdx
= 0;
3823 phba
->hbqs
[hbqno
].hbqPutIdx
= 0;
3824 phba
->hbqs
[hbqno
].local_hbqGetIdx
= 0;
3825 phba
->hbqs
[hbqno
].entry_count
=
3826 lpfc_hbq_defs
[hbqno
]->entry_count
;
3827 lpfc_config_hbq(phba
, hbqno
, lpfc_hbq_defs
[hbqno
],
3828 hbq_entry_index
, pmb
);
3829 hbq_entry_index
+= phba
->hbqs
[hbqno
].entry_count
;
3831 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
3832 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
3833 mbxStatus <status>, ring <num> */
3835 lpfc_printf_log(phba
, KERN_ERR
,
3836 LOG_SLI
| LOG_VPORT
,
3837 "1805 Adapter failed to init. "
3838 "Data: x%x x%x x%x\n",
3840 pmbox
->mbxStatus
, hbqno
);
3842 phba
->link_state
= LPFC_HBA_ERROR
;
3843 mempool_free(pmb
, phba
->mbox_mem_pool
);
3847 phba
->hbq_count
= hbq_count
;
3849 mempool_free(pmb
, phba
->mbox_mem_pool
);
3851 /* Initially populate or replenish the HBQs */
3852 for (hbqno
= 0; hbqno
< hbq_count
; ++hbqno
)
3853 lpfc_sli_hbqbuf_init_hbqs(phba
, hbqno
);
3858 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
3859 * @phba: Pointer to HBA context object.
3861 * This function is called during the SLI initialization to configure
3862 * all the HBQs and post buffers to the HBQ. The caller is not
3863 * required to hold any locks. This function will return zero if successful
3864 * else it will return negative error code.
3867 lpfc_sli4_rb_setup(struct lpfc_hba
*phba
)
3869 phba
->hbq_in_use
= 1;
3870 phba
->hbqs
[0].entry_count
= lpfc_hbq_defs
[0]->entry_count
;
3871 phba
->hbq_count
= 1;
3872 /* Initially populate or replenish the HBQs */
3873 lpfc_sli_hbqbuf_init_hbqs(phba
, 0);
3878 * lpfc_sli_config_port - Issue config port mailbox command
3879 * @phba: Pointer to HBA context object.
3880 * @sli_mode: sli mode - 2/3
3882 * This function is called by the sli intialization code path
3883 * to issue config_port mailbox command. This function restarts the
3884 * HBA firmware and issues a config_port mailbox command to configure
3885 * the SLI interface in the sli mode specified by sli_mode
3886 * variable. The caller is not required to hold any locks.
3887 * The function returns 0 if successful, else returns negative error
3891 lpfc_sli_config_port(struct lpfc_hba
*phba
, int sli_mode
)
3894 uint32_t resetcount
= 0, rc
= 0, done
= 0;
3896 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3898 phba
->link_state
= LPFC_HBA_ERROR
;
3902 phba
->sli_rev
= sli_mode
;
3903 while (resetcount
< 2 && !done
) {
3904 spin_lock_irq(&phba
->hbalock
);
3905 phba
->sli
.sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
3906 spin_unlock_irq(&phba
->hbalock
);
3907 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
3908 lpfc_sli_brdrestart(phba
);
3909 rc
= lpfc_sli_chipset_init(phba
);
3913 spin_lock_irq(&phba
->hbalock
);
3914 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
3915 spin_unlock_irq(&phba
->hbalock
);
3918 /* Call pre CONFIG_PORT mailbox command initialization. A
3919 * value of 0 means the call was successful. Any other
3920 * nonzero value is a failure, but if ERESTART is returned,
3921 * the driver may reset the HBA and try again.
3923 rc
= lpfc_config_port_prep(phba
);
3924 if (rc
== -ERESTART
) {
3925 phba
->link_state
= LPFC_LINK_UNKNOWN
;
3929 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
3930 lpfc_config_port(phba
, pmb
);
3931 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
3932 phba
->sli3_options
&= ~(LPFC_SLI3_NPIV_ENABLED
|
3933 LPFC_SLI3_HBQ_ENABLED
|
3934 LPFC_SLI3_CRP_ENABLED
|
3935 LPFC_SLI3_BG_ENABLED
|
3936 LPFC_SLI3_DSS_ENABLED
);
3937 if (rc
!= MBX_SUCCESS
) {
3938 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3939 "0442 Adapter failed to init, mbxCmd x%x "
3940 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
3941 pmb
->u
.mb
.mbxCommand
, pmb
->u
.mb
.mbxStatus
, 0);
3942 spin_lock_irq(&phba
->hbalock
);
3943 phba
->sli
.sli_flag
&= ~LPFC_SLI_ACTIVE
;
3944 spin_unlock_irq(&phba
->hbalock
);
3947 /* Allow asynchronous mailbox command to go through */
3948 spin_lock_irq(&phba
->hbalock
);
3949 phba
->sli
.sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
3950 spin_unlock_irq(&phba
->hbalock
);
3956 goto do_prep_failed
;
3958 if (pmb
->u
.mb
.un
.varCfgPort
.sli_mode
== 3) {
3959 if (!pmb
->u
.mb
.un
.varCfgPort
.cMA
) {
3961 goto do_prep_failed
;
3963 if (phba
->max_vpi
&& pmb
->u
.mb
.un
.varCfgPort
.gmv
) {
3964 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
3965 phba
->max_vpi
= pmb
->u
.mb
.un
.varCfgPort
.max_vpi
;
3966 phba
->max_vports
= (phba
->max_vpi
> phba
->max_vports
) ?
3967 phba
->max_vpi
: phba
->max_vports
;
3971 phba
->fips_level
= 0;
3972 phba
->fips_spec_rev
= 0;
3973 if (pmb
->u
.mb
.un
.varCfgPort
.gdss
) {
3974 phba
->sli3_options
|= LPFC_SLI3_DSS_ENABLED
;
3975 phba
->fips_level
= pmb
->u
.mb
.un
.varCfgPort
.fips_level
;
3976 phba
->fips_spec_rev
= pmb
->u
.mb
.un
.varCfgPort
.fips_rev
;
3977 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3978 "2850 Security Crypto Active. FIPS x%d "
3980 phba
->fips_level
, phba
->fips_spec_rev
);
3982 if (pmb
->u
.mb
.un
.varCfgPort
.sec_err
) {
3983 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3984 "2856 Config Port Security Crypto "
3986 pmb
->u
.mb
.un
.varCfgPort
.sec_err
);
3988 if (pmb
->u
.mb
.un
.varCfgPort
.gerbm
)
3989 phba
->sli3_options
|= LPFC_SLI3_HBQ_ENABLED
;
3990 if (pmb
->u
.mb
.un
.varCfgPort
.gcrp
)
3991 phba
->sli3_options
|= LPFC_SLI3_CRP_ENABLED
;
3993 phba
->hbq_get
= phba
->mbox
->us
.s3_pgp
.hbq_get
;
3994 phba
->port_gp
= phba
->mbox
->us
.s3_pgp
.port
;
3996 if (phba
->cfg_enable_bg
) {
3997 if (pmb
->u
.mb
.un
.varCfgPort
.gbg
)
3998 phba
->sli3_options
|= LPFC_SLI3_BG_ENABLED
;
4000 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4001 "0443 Adapter did not grant "
4005 phba
->hbq_get
= NULL
;
4006 phba
->port_gp
= phba
->mbox
->us
.s2
.port
;
4010 mempool_free(pmb
, phba
->mbox_mem_pool
);
4016 * lpfc_sli_hba_setup - SLI intialization function
4017 * @phba: Pointer to HBA context object.
4019 * This function is the main SLI intialization function. This function
4020 * is called by the HBA intialization code, HBA reset code and HBA
4021 * error attention handler code. Caller is not required to hold any
4022 * locks. This function issues config_port mailbox command to configure
4023 * the SLI, setup iocb rings and HBQ rings. In the end the function
4024 * calls the config_port_post function to issue init_link mailbox
4025 * command and to start the discovery. The function will return zero
4026 * if successful, else it will return negative error code.
4029 lpfc_sli_hba_setup(struct lpfc_hba
*phba
)
4034 switch (lpfc_sli_mode
) {
4036 if (phba
->cfg_enable_npiv
) {
4037 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
4038 "1824 NPIV enabled: Override lpfc_sli_mode "
4039 "parameter (%d) to auto (0).\n",
4049 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
4050 "1819 Unrecognized lpfc_sli_mode "
4051 "parameter: %d.\n", lpfc_sli_mode
);
4056 rc
= lpfc_sli_config_port(phba
, mode
);
4058 if (rc
&& lpfc_sli_mode
== 3)
4059 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
4060 "1820 Unable to select SLI-3. "
4061 "Not supported by adapter.\n");
4062 if (rc
&& mode
!= 2)
4063 rc
= lpfc_sli_config_port(phba
, 2);
4065 goto lpfc_sli_hba_setup_error
;
4067 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4068 if (phba
->cfg_aer_support
== 1 && !(phba
->hba_flag
& HBA_AER_ENABLED
)) {
4069 rc
= pci_enable_pcie_error_reporting(phba
->pcidev
);
4071 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4072 "2709 This device supports "
4073 "Advanced Error Reporting (AER)\n");
4074 spin_lock_irq(&phba
->hbalock
);
4075 phba
->hba_flag
|= HBA_AER_ENABLED
;
4076 spin_unlock_irq(&phba
->hbalock
);
4078 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4079 "2708 This device does not support "
4080 "Advanced Error Reporting (AER)\n");
4081 phba
->cfg_aer_support
= 0;
4085 if (phba
->sli_rev
== 3) {
4086 phba
->iocb_cmd_size
= SLI3_IOCB_CMD_SIZE
;
4087 phba
->iocb_rsp_size
= SLI3_IOCB_RSP_SIZE
;
4089 phba
->iocb_cmd_size
= SLI2_IOCB_CMD_SIZE
;
4090 phba
->iocb_rsp_size
= SLI2_IOCB_RSP_SIZE
;
4091 phba
->sli3_options
= 0;
4094 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4095 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4096 phba
->sli_rev
, phba
->max_vpi
);
4097 rc
= lpfc_sli_ring_map(phba
);
4100 goto lpfc_sli_hba_setup_error
;
4103 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
4104 rc
= lpfc_sli_hbq_setup(phba
);
4106 goto lpfc_sli_hba_setup_error
;
4108 spin_lock_irq(&phba
->hbalock
);
4109 phba
->sli
.sli_flag
|= LPFC_PROCESS_LA
;
4110 spin_unlock_irq(&phba
->hbalock
);
4112 rc
= lpfc_config_port_post(phba
);
4114 goto lpfc_sli_hba_setup_error
;
4118 lpfc_sli_hba_setup_error
:
4119 phba
->link_state
= LPFC_HBA_ERROR
;
4120 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4121 "0445 Firmware initialization failed\n");
4126 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4127 * @phba: Pointer to HBA context object.
4128 * @mboxq: mailbox pointer.
4129 * This function issue a dump mailbox command to read config region
4130 * 23 and parse the records in the region and populate driver
4134 lpfc_sli4_read_fcoe_params(struct lpfc_hba
*phba
,
4135 LPFC_MBOXQ_t
*mboxq
)
4137 struct lpfc_dmabuf
*mp
;
4138 struct lpfc_mqe
*mqe
;
4139 uint32_t data_length
;
4142 /* Program the default value of vlan_id and fc_map */
4143 phba
->valid_vlan
= 0;
4144 phba
->fc_map
[0] = LPFC_FCOE_FCF_MAP0
;
4145 phba
->fc_map
[1] = LPFC_FCOE_FCF_MAP1
;
4146 phba
->fc_map
[2] = LPFC_FCOE_FCF_MAP2
;
4148 mqe
= &mboxq
->u
.mqe
;
4149 if (lpfc_dump_fcoe_param(phba
, mboxq
))
4152 mp
= (struct lpfc_dmabuf
*) mboxq
->context1
;
4153 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4155 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
4156 "(%d):2571 Mailbox cmd x%x Status x%x "
4157 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4158 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4159 "CQ: x%x x%x x%x x%x\n",
4160 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
4161 bf_get(lpfc_mqe_command
, mqe
),
4162 bf_get(lpfc_mqe_status
, mqe
),
4163 mqe
->un
.mb_words
[0], mqe
->un
.mb_words
[1],
4164 mqe
->un
.mb_words
[2], mqe
->un
.mb_words
[3],
4165 mqe
->un
.mb_words
[4], mqe
->un
.mb_words
[5],
4166 mqe
->un
.mb_words
[6], mqe
->un
.mb_words
[7],
4167 mqe
->un
.mb_words
[8], mqe
->un
.mb_words
[9],
4168 mqe
->un
.mb_words
[10], mqe
->un
.mb_words
[11],
4169 mqe
->un
.mb_words
[12], mqe
->un
.mb_words
[13],
4170 mqe
->un
.mb_words
[14], mqe
->un
.mb_words
[15],
4171 mqe
->un
.mb_words
[16], mqe
->un
.mb_words
[50],
4173 mboxq
->mcqe
.mcqe_tag0
, mboxq
->mcqe
.mcqe_tag1
,
4174 mboxq
->mcqe
.trailer
);
4177 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4181 data_length
= mqe
->un
.mb_words
[5];
4182 if (data_length
> DMP_RGN23_SIZE
) {
4183 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4188 lpfc_parse_fcoe_conf(phba
, mp
->virt
, data_length
);
4189 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4195 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4196 * @phba: pointer to lpfc hba data structure.
4197 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4198 * @vpd: pointer to the memory to hold resulting port vpd data.
4199 * @vpd_size: On input, the number of bytes allocated to @vpd.
4200 * On output, the number of data bytes in @vpd.
4202 * This routine executes a READ_REV SLI4 mailbox command. In
4203 * addition, this routine gets the port vpd data.
4207 * -ENOMEM - could not allocated memory.
4210 lpfc_sli4_read_rev(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
4211 uint8_t *vpd
, uint32_t *vpd_size
)
4215 struct lpfc_dmabuf
*dmabuf
;
4216 struct lpfc_mqe
*mqe
;
4218 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
4223 * Get a DMA buffer for the vpd data resulting from the READ_REV
4226 dma_size
= *vpd_size
;
4227 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
4231 if (!dmabuf
->virt
) {
4235 memset(dmabuf
->virt
, 0, dma_size
);
4238 * The SLI4 implementation of READ_REV conflicts at word1,
4239 * bits 31:16 and SLI4 adds vpd functionality not present
4240 * in SLI3. This code corrects the conflicts.
4242 lpfc_read_rev(phba
, mboxq
);
4243 mqe
= &mboxq
->u
.mqe
;
4244 mqe
->un
.read_rev
.vpd_paddr_high
= putPaddrHigh(dmabuf
->phys
);
4245 mqe
->un
.read_rev
.vpd_paddr_low
= putPaddrLow(dmabuf
->phys
);
4246 mqe
->un
.read_rev
.word1
&= 0x0000FFFF;
4247 bf_set(lpfc_mbx_rd_rev_vpd
, &mqe
->un
.read_rev
, 1);
4248 bf_set(lpfc_mbx_rd_rev_avail_len
, &mqe
->un
.read_rev
, dma_size
);
4250 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4252 dma_free_coherent(&phba
->pcidev
->dev
, dma_size
,
4253 dmabuf
->virt
, dmabuf
->phys
);
4259 * The available vpd length cannot be bigger than the
4260 * DMA buffer passed to the port. Catch the less than
4261 * case and update the caller's size.
4263 if (mqe
->un
.read_rev
.avail_vpd_len
< *vpd_size
)
4264 *vpd_size
= mqe
->un
.read_rev
.avail_vpd_len
;
4266 memcpy(vpd
, dmabuf
->virt
, *vpd_size
);
4268 dma_free_coherent(&phba
->pcidev
->dev
, dma_size
,
4269 dmabuf
->virt
, dmabuf
->phys
);
4275 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4276 * @phba: pointer to lpfc hba data structure.
4278 * This routine is called to explicitly arm the SLI4 device's completion and
4282 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba
*phba
)
4286 lpfc_sli4_cq_release(phba
->sli4_hba
.mbx_cq
, LPFC_QUEUE_REARM
);
4287 lpfc_sli4_cq_release(phba
->sli4_hba
.els_cq
, LPFC_QUEUE_REARM
);
4288 for (fcp_eqidx
= 0; fcp_eqidx
< phba
->cfg_fcp_eq_count
; fcp_eqidx
++)
4289 lpfc_sli4_cq_release(phba
->sli4_hba
.fcp_cq
[fcp_eqidx
],
4291 lpfc_sli4_eq_release(phba
->sli4_hba
.sp_eq
, LPFC_QUEUE_REARM
);
4292 for (fcp_eqidx
= 0; fcp_eqidx
< phba
->cfg_fcp_eq_count
; fcp_eqidx
++)
4293 lpfc_sli4_eq_release(phba
->sli4_hba
.fp_eq
[fcp_eqidx
],
4298 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4299 * @phba: Pointer to HBA context object.
4301 * This function is the main SLI4 device intialization PCI function. This
4302 * function is called by the HBA intialization code, HBA reset code and
4303 * HBA error attention handler code. Caller is not required to hold any
4307 lpfc_sli4_hba_setup(struct lpfc_hba
*phba
)
4310 LPFC_MBOXQ_t
*mboxq
;
4311 struct lpfc_mqe
*mqe
;
4314 uint32_t ftr_rsp
= 0;
4315 struct Scsi_Host
*shost
= lpfc_shost_from_vport(phba
->pport
);
4316 struct lpfc_vport
*vport
= phba
->pport
;
4317 struct lpfc_dmabuf
*mp
;
4319 /* Perform a PCI function reset to start from clean */
4320 rc
= lpfc_pci_function_reset(phba
);
4324 /* Check the HBA Host Status Register for readyness */
4325 rc
= lpfc_sli4_post_status_check(phba
);
4329 spin_lock_irq(&phba
->hbalock
);
4330 phba
->sli
.sli_flag
|= LPFC_SLI_ACTIVE
;
4331 spin_unlock_irq(&phba
->hbalock
);
4335 * Allocate a single mailbox container for initializing the
4338 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4343 * Continue initialization with default values even if driver failed
4344 * to read FCoE param config regions
4346 if (lpfc_sli4_read_fcoe_params(phba
, mboxq
))
4347 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
4348 "2570 Failed to read FCoE parameters\n");
4350 /* Issue READ_REV to collect vpd and FW information. */
4351 vpd_size
= SLI4_PAGE_SIZE
;
4352 vpd
= kzalloc(vpd_size
, GFP_KERNEL
);
4358 rc
= lpfc_sli4_read_rev(phba
, mboxq
, vpd
, &vpd_size
);
4363 mqe
= &mboxq
->u
.mqe
;
4364 phba
->sli_rev
= bf_get(lpfc_mbx_rd_rev_sli_lvl
, &mqe
->un
.read_rev
);
4365 if (bf_get(lpfc_mbx_rd_rev_fcoe
, &mqe
->un
.read_rev
))
4366 phba
->hba_flag
|= HBA_FCOE_MODE
;
4368 phba
->hba_flag
&= ~HBA_FCOE_MODE
;
4370 if (bf_get(lpfc_mbx_rd_rev_cee_ver
, &mqe
->un
.read_rev
) ==
4372 phba
->hba_flag
|= HBA_FIP_SUPPORT
;
4374 phba
->hba_flag
&= ~HBA_FIP_SUPPORT
;
4376 if (phba
->sli_rev
!= LPFC_SLI_REV4
||
4377 !(phba
->hba_flag
& HBA_FCOE_MODE
)) {
4378 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4379 "0376 READ_REV Error. SLI Level %d "
4380 "FCoE enabled %d\n",
4381 phba
->sli_rev
, phba
->hba_flag
& HBA_FCOE_MODE
);
4387 * Evaluate the read rev and vpd data. Populate the driver
4388 * state with the results. If this routine fails, the failure
4389 * is not fatal as the driver will use generic values.
4391 rc
= lpfc_parse_vpd(phba
, vpd
, vpd_size
);
4392 if (unlikely(!rc
)) {
4393 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4394 "0377 Error %d parsing vpd. "
4395 "Using defaults.\n", rc
);
4400 /* Save information as VPD data */
4401 phba
->vpd
.rev
.biuRev
= mqe
->un
.read_rev
.first_hw_rev
;
4402 phba
->vpd
.rev
.smRev
= mqe
->un
.read_rev
.second_hw_rev
;
4403 phba
->vpd
.rev
.endecRev
= mqe
->un
.read_rev
.third_hw_rev
;
4404 phba
->vpd
.rev
.fcphHigh
= bf_get(lpfc_mbx_rd_rev_fcph_high
,
4406 phba
->vpd
.rev
.fcphLow
= bf_get(lpfc_mbx_rd_rev_fcph_low
,
4408 phba
->vpd
.rev
.feaLevelHigh
= bf_get(lpfc_mbx_rd_rev_ftr_lvl_high
,
4410 phba
->vpd
.rev
.feaLevelLow
= bf_get(lpfc_mbx_rd_rev_ftr_lvl_low
,
4412 phba
->vpd
.rev
.sli1FwRev
= mqe
->un
.read_rev
.fw_id_rev
;
4413 memcpy(phba
->vpd
.rev
.sli1FwName
, mqe
->un
.read_rev
.fw_name
, 16);
4414 phba
->vpd
.rev
.sli2FwRev
= mqe
->un
.read_rev
.ulp_fw_id_rev
;
4415 memcpy(phba
->vpd
.rev
.sli2FwName
, mqe
->un
.read_rev
.ulp_fw_name
, 16);
4416 phba
->vpd
.rev
.opFwRev
= mqe
->un
.read_rev
.fw_id_rev
;
4417 memcpy(phba
->vpd
.rev
.opFwName
, mqe
->un
.read_rev
.fw_name
, 16);
4418 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
4419 "(%d):0380 READ_REV Status x%x "
4420 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
4421 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
4422 bf_get(lpfc_mqe_status
, mqe
),
4423 phba
->vpd
.rev
.opFwName
,
4424 phba
->vpd
.rev
.fcphHigh
, phba
->vpd
.rev
.fcphLow
,
4425 phba
->vpd
.rev
.feaLevelHigh
, phba
->vpd
.rev
.feaLevelLow
);
4428 * Discover the port's supported feature set and match it against the
4431 lpfc_request_features(phba
, mboxq
);
4432 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4439 * The port must support FCP initiator mode as this is the
4440 * only mode running in the host.
4442 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi
, &mqe
->un
.req_ftrs
))) {
4443 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
4444 "0378 No support for fcpi mode.\n");
4449 * If the port cannot support the host's requested features
4450 * then turn off the global config parameters to disable the
4451 * feature in the driver. This is not a fatal error.
4453 if ((phba
->cfg_enable_bg
) &&
4454 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif
, &mqe
->un
.req_ftrs
)))
4457 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
4458 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv
, &mqe
->un
.req_ftrs
)))
4462 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
4463 "0379 Feature Mismatch Data: x%08x %08x "
4464 "x%x x%x x%x\n", mqe
->un
.req_ftrs
.word2
,
4465 mqe
->un
.req_ftrs
.word3
, phba
->cfg_enable_bg
,
4466 phba
->cfg_enable_npiv
, phba
->max_vpi
);
4467 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif
, &mqe
->un
.req_ftrs
)))
4468 phba
->cfg_enable_bg
= 0;
4469 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv
, &mqe
->un
.req_ftrs
)))
4470 phba
->cfg_enable_npiv
= 0;
4473 /* These SLI3 features are assumed in SLI4 */
4474 spin_lock_irq(&phba
->hbalock
);
4475 phba
->sli3_options
|= (LPFC_SLI3_NPIV_ENABLED
| LPFC_SLI3_HBQ_ENABLED
);
4476 spin_unlock_irq(&phba
->hbalock
);
4478 /* Read the port's service parameters. */
4479 rc
= lpfc_read_sparam(phba
, mboxq
, vport
->vpi
);
4481 phba
->link_state
= LPFC_HBA_ERROR
;
4486 mboxq
->vport
= vport
;
4487 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4488 mp
= (struct lpfc_dmabuf
*) mboxq
->context1
;
4489 if (rc
== MBX_SUCCESS
) {
4490 memcpy(&vport
->fc_sparam
, mp
->virt
, sizeof(struct serv_parm
));
4495 * This memory was allocated by the lpfc_read_sparam routine. Release
4496 * it to the mbuf pool.
4498 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4500 mboxq
->context1
= NULL
;
4502 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4503 "0382 READ_SPARAM command failed "
4504 "status %d, mbxStatus x%x\n",
4505 rc
, bf_get(lpfc_mqe_status
, mqe
));
4506 phba
->link_state
= LPFC_HBA_ERROR
;
4511 if (phba
->cfg_soft_wwnn
)
4512 u64_to_wwn(phba
->cfg_soft_wwnn
,
4513 vport
->fc_sparam
.nodeName
.u
.wwn
);
4514 if (phba
->cfg_soft_wwpn
)
4515 u64_to_wwn(phba
->cfg_soft_wwpn
,
4516 vport
->fc_sparam
.portName
.u
.wwn
);
4517 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
4518 sizeof(struct lpfc_name
));
4519 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
4520 sizeof(struct lpfc_name
));
4522 /* Update the fc_host data structures with new wwn. */
4523 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
4524 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
4526 /* Register SGL pool to the device using non-embedded mailbox command */
4527 rc
= lpfc_sli4_post_sgl_list(phba
);
4529 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4530 "0582 Error %d during sgl post operation\n",
4536 /* Register SCSI SGL pool to the device */
4537 rc
= lpfc_sli4_repost_scsi_sgl_list(phba
);
4539 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
4540 "0383 Error %d during scsi sgl post "
4542 /* Some Scsi buffers were moved to the abort scsi list */
4543 /* A pci function reset will repost them */
4548 /* Post the rpi header region to the device. */
4549 rc
= lpfc_sli4_post_all_rpi_hdrs(phba
);
4551 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4552 "0393 Error %d during rpi post operation\n",
4558 /* Set up all the queues to the device */
4559 rc
= lpfc_sli4_queue_setup(phba
);
4561 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4562 "0381 Error %d during queue setup.\n ", rc
);
4563 goto out_stop_timers
;
4566 /* Arm the CQs and then EQs on device */
4567 lpfc_sli4_arm_cqeq_intr(phba
);
4569 /* Indicate device interrupt mode */
4570 phba
->sli4_hba
.intr_enable
= 1;
4572 /* Allow asynchronous mailbox command to go through */
4573 spin_lock_irq(&phba
->hbalock
);
4574 phba
->sli
.sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
4575 spin_unlock_irq(&phba
->hbalock
);
4577 /* Post receive buffers to the device */
4578 lpfc_sli4_rb_setup(phba
);
4580 /* Reset HBA FCF states after HBA reset */
4581 phba
->fcf
.fcf_flag
= 0;
4582 phba
->fcf
.current_rec
.flag
= 0;
4584 /* Start the ELS watchdog timer */
4585 mod_timer(&vport
->els_tmofunc
,
4586 jiffies
+ HZ
* (phba
->fc_ratov
* 2));
4588 /* Start heart beat timer */
4589 mod_timer(&phba
->hb_tmofunc
,
4590 jiffies
+ HZ
* LPFC_HB_MBOX_INTERVAL
);
4591 phba
->hb_outstanding
= 0;
4592 phba
->last_completion_time
= jiffies
;
4594 /* Start error attention (ERATT) polling timer */
4595 mod_timer(&phba
->eratt_poll
, jiffies
+ HZ
* LPFC_ERATT_POLL_INTERVAL
);
4597 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4598 if (phba
->cfg_aer_support
== 1 && !(phba
->hba_flag
& HBA_AER_ENABLED
)) {
4599 rc
= pci_enable_pcie_error_reporting(phba
->pcidev
);
4601 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4602 "2829 This device supports "
4603 "Advanced Error Reporting (AER)\n");
4604 spin_lock_irq(&phba
->hbalock
);
4605 phba
->hba_flag
|= HBA_AER_ENABLED
;
4606 spin_unlock_irq(&phba
->hbalock
);
4608 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4609 "2830 This device does not support "
4610 "Advanced Error Reporting (AER)\n");
4611 phba
->cfg_aer_support
= 0;
4615 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
4617 * The FC Port needs to register FCFI (index 0)
4619 lpfc_reg_fcfi(phba
, mboxq
);
4620 mboxq
->vport
= phba
->pport
;
4621 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4622 if (rc
== MBX_SUCCESS
)
4625 goto out_unset_queue
;
4628 * The port is ready, set the host's link state to LINK_DOWN
4629 * in preparation for link interrupts.
4631 spin_lock_irq(&phba
->hbalock
);
4632 phba
->link_state
= LPFC_LINK_DOWN
;
4633 spin_unlock_irq(&phba
->hbalock
);
4634 rc
= phba
->lpfc_hba_init_link(phba
, MBX_NOWAIT
);
4636 /* Unset all the queues set up in this routine when error out */
4638 lpfc_sli4_queue_unset(phba
);
4641 lpfc_stop_hba_timers(phba
);
4643 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4648 * lpfc_mbox_timeout - Timeout call back function for mbox timer
4649 * @ptr: context object - pointer to hba structure.
4651 * This is the callback function for mailbox timer. The mailbox
4652 * timer is armed when a new mailbox command is issued and the timer
4653 * is deleted when the mailbox complete. The function is called by
4654 * the kernel timer code when a mailbox does not complete within
4655 * expected time. This function wakes up the worker thread to
4656 * process the mailbox timeout and returns. All the processing is
4657 * done by the worker thread function lpfc_mbox_timeout_handler.
4660 lpfc_mbox_timeout(unsigned long ptr
)
4662 struct lpfc_hba
*phba
= (struct lpfc_hba
*) ptr
;
4663 unsigned long iflag
;
4664 uint32_t tmo_posted
;
4666 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
4667 tmo_posted
= phba
->pport
->work_port_events
& WORKER_MBOX_TMO
;
4669 phba
->pport
->work_port_events
|= WORKER_MBOX_TMO
;
4670 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
4673 lpfc_worker_wake_up(phba
);
4679 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
4680 * @phba: Pointer to HBA context object.
4682 * This function is called from worker thread when a mailbox command times out.
4683 * The caller is not required to hold any locks. This function will reset the
4684 * HBA and recover all the pending commands.
4687 lpfc_mbox_timeout_handler(struct lpfc_hba
*phba
)
4689 LPFC_MBOXQ_t
*pmbox
= phba
->sli
.mbox_active
;
4690 MAILBOX_t
*mb
= &pmbox
->u
.mb
;
4691 struct lpfc_sli
*psli
= &phba
->sli
;
4692 struct lpfc_sli_ring
*pring
;
4694 /* Check the pmbox pointer first. There is a race condition
4695 * between the mbox timeout handler getting executed in the
4696 * worklist and the mailbox actually completing. When this
4697 * race condition occurs, the mbox_active will be NULL.
4699 spin_lock_irq(&phba
->hbalock
);
4700 if (pmbox
== NULL
) {
4701 lpfc_printf_log(phba
, KERN_WARNING
,
4703 "0353 Active Mailbox cleared - mailbox timeout "
4705 spin_unlock_irq(&phba
->hbalock
);
4709 /* Mbox cmd <mbxCommand> timeout */
4710 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4711 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
4713 phba
->pport
->port_state
,
4715 phba
->sli
.mbox_active
);
4716 spin_unlock_irq(&phba
->hbalock
);
4718 /* Setting state unknown so lpfc_sli_abort_iocb_ring
4719 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
4720 * it to fail all oustanding SCSI IO.
4722 spin_lock_irq(&phba
->pport
->work_port_lock
);
4723 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
4724 spin_unlock_irq(&phba
->pport
->work_port_lock
);
4725 spin_lock_irq(&phba
->hbalock
);
4726 phba
->link_state
= LPFC_LINK_UNKNOWN
;
4727 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
4728 spin_unlock_irq(&phba
->hbalock
);
4730 pring
= &psli
->ring
[psli
->fcp_ring
];
4731 lpfc_sli_abort_iocb_ring(phba
, pring
);
4733 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4734 "0345 Resetting board due to mailbox timeout\n");
4736 /* Reset the HBA device */
4737 lpfc_reset_hba(phba
);
4741 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
4742 * @phba: Pointer to HBA context object.
4743 * @pmbox: Pointer to mailbox object.
4744 * @flag: Flag indicating how the mailbox need to be processed.
4746 * This function is called by discovery code and HBA management code
4747 * to submit a mailbox command to firmware with SLI-3 interface spec. This
4748 * function gets the hbalock to protect the data structures.
4749 * The mailbox command can be submitted in polling mode, in which case
4750 * this function will wait in a polling loop for the completion of the
4752 * If the mailbox is submitted in no_wait mode (not polling) the
4753 * function will submit the command and returns immediately without waiting
4754 * for the mailbox completion. The no_wait is supported only when HBA
4755 * is in SLI2/SLI3 mode - interrupts are enabled.
4756 * The SLI interface allows only one mailbox pending at a time. If the
4757 * mailbox is issued in polling mode and there is already a mailbox
4758 * pending, then the function will return an error. If the mailbox is issued
4759 * in NO_WAIT mode and there is a mailbox pending already, the function
4760 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
4761 * The sli layer owns the mailbox object until the completion of mailbox
4762 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
4763 * return codes the caller owns the mailbox command after the return of
4767 lpfc_sli_issue_mbox_s3(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmbox
,
4771 struct lpfc_sli
*psli
= &phba
->sli
;
4772 uint32_t status
, evtctr
;
4775 unsigned long timeout
;
4776 unsigned long drvr_flag
= 0;
4777 uint32_t word0
, ldata
;
4778 void __iomem
*to_slim
;
4779 int processing_queue
= 0;
4781 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
4783 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
4784 /* processing mbox queue from intr_handler */
4785 if (unlikely(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
4786 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
4789 processing_queue
= 1;
4790 pmbox
= lpfc_mbox_get(phba
);
4792 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
4797 if (pmbox
->mbox_cmpl
&& pmbox
->mbox_cmpl
!= lpfc_sli_def_mbox_cmpl
&&
4798 pmbox
->mbox_cmpl
!= lpfc_sli_wake_mbox_wait
) {
4800 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
4801 lpfc_printf_log(phba
, KERN_ERR
,
4802 LOG_MBOX
| LOG_VPORT
,
4803 "1806 Mbox x%x failed. No vport\n",
4804 pmbox
->u
.mb
.mbxCommand
);
4806 goto out_not_finished
;
4810 /* If the PCI channel is in offline state, do not post mbox. */
4811 if (unlikely(pci_channel_offline(phba
->pcidev
))) {
4812 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
4813 goto out_not_finished
;
4816 /* If HBA has a deferred error attention, fail the iocb. */
4817 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
4818 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
4819 goto out_not_finished
;
4825 status
= MBX_SUCCESS
;
4827 if (phba
->link_state
== LPFC_HBA_ERROR
) {
4828 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
4830 /* Mbox command <mbxCommand> cannot issue */
4831 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4832 "(%d):0311 Mailbox command x%x cannot "
4833 "issue Data: x%x x%x\n",
4834 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
4835 pmbox
->u
.mb
.mbxCommand
, psli
->sli_flag
, flag
);
4836 goto out_not_finished
;
4839 if (mb
->mbxCommand
!= MBX_KILL_BOARD
&& flag
& MBX_NOWAIT
&&
4840 !(readl(phba
->HCregaddr
) & HC_MBINT_ENA
)) {
4841 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
4842 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4843 "(%d):2528 Mailbox command x%x cannot "
4844 "issue Data: x%x x%x\n",
4845 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
4846 pmbox
->u
.mb
.mbxCommand
, psli
->sli_flag
, flag
);
4847 goto out_not_finished
;
4850 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
4851 /* Polling for a mbox command when another one is already active
4852 * is not allowed in SLI. Also, the driver must have established
4853 * SLI2 mode to queue and process multiple mbox commands.
4856 if (flag
& MBX_POLL
) {
4857 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
4859 /* Mbox command <mbxCommand> cannot issue */
4860 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4861 "(%d):2529 Mailbox command x%x "
4862 "cannot issue Data: x%x x%x\n",
4863 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
4864 pmbox
->u
.mb
.mbxCommand
,
4865 psli
->sli_flag
, flag
);
4866 goto out_not_finished
;
4869 if (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
)) {
4870 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
4871 /* Mbox command <mbxCommand> cannot issue */
4872 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4873 "(%d):2530 Mailbox command x%x "
4874 "cannot issue Data: x%x x%x\n",
4875 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
4876 pmbox
->u
.mb
.mbxCommand
,
4877 psli
->sli_flag
, flag
);
4878 goto out_not_finished
;
4881 /* Another mailbox command is still being processed, queue this
4882 * command to be processed later.
4884 lpfc_mbox_put(phba
, pmbox
);
4886 /* Mbox cmd issue - BUSY */
4887 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
4888 "(%d):0308 Mbox cmd issue - BUSY Data: "
4889 "x%x x%x x%x x%x\n",
4890 pmbox
->vport
? pmbox
->vport
->vpi
: 0xffffff,
4891 mb
->mbxCommand
, phba
->pport
->port_state
,
4892 psli
->sli_flag
, flag
);
4894 psli
->slistat
.mbox_busy
++;
4895 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
4898 lpfc_debugfs_disc_trc(pmbox
->vport
,
4899 LPFC_DISC_TRC_MBOX_VPORT
,
4900 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
4901 (uint32_t)mb
->mbxCommand
,
4902 mb
->un
.varWords
[0], mb
->un
.varWords
[1]);
4905 lpfc_debugfs_disc_trc(phba
->pport
,
4907 "MBOX Bsy: cmd:x%x mb:x%x x%x",
4908 (uint32_t)mb
->mbxCommand
,
4909 mb
->un
.varWords
[0], mb
->un
.varWords
[1]);
4915 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
4917 /* If we are not polling, we MUST be in SLI2 mode */
4918 if (flag
!= MBX_POLL
) {
4919 if (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
) &&
4920 (mb
->mbxCommand
!= MBX_KILL_BOARD
)) {
4921 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
4922 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
4923 /* Mbox command <mbxCommand> cannot issue */
4924 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4925 "(%d):2531 Mailbox command x%x "
4926 "cannot issue Data: x%x x%x\n",
4927 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
4928 pmbox
->u
.mb
.mbxCommand
,
4929 psli
->sli_flag
, flag
);
4930 goto out_not_finished
;
4932 /* timeout active mbox command */
4933 mod_timer(&psli
->mbox_tmo
, (jiffies
+
4934 (HZ
* lpfc_mbox_tmo_val(phba
, mb
->mbxCommand
))));
4937 /* Mailbox cmd <cmd> issue */
4938 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
4939 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
4941 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
4942 mb
->mbxCommand
, phba
->pport
->port_state
,
4943 psli
->sli_flag
, flag
);
4945 if (mb
->mbxCommand
!= MBX_HEARTBEAT
) {
4947 lpfc_debugfs_disc_trc(pmbox
->vport
,
4948 LPFC_DISC_TRC_MBOX_VPORT
,
4949 "MBOX Send vport: cmd:x%x mb:x%x x%x",
4950 (uint32_t)mb
->mbxCommand
,
4951 mb
->un
.varWords
[0], mb
->un
.varWords
[1]);
4954 lpfc_debugfs_disc_trc(phba
->pport
,
4956 "MBOX Send: cmd:x%x mb:x%x x%x",
4957 (uint32_t)mb
->mbxCommand
,
4958 mb
->un
.varWords
[0], mb
->un
.varWords
[1]);
4962 psli
->slistat
.mbox_cmd
++;
4963 evtctr
= psli
->slistat
.mbox_event
;
4965 /* next set own bit for the adapter and copy over command word */
4966 mb
->mbxOwner
= OWN_CHIP
;
4968 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
4969 /* Populate mbox extension offset word. */
4970 if (pmbox
->in_ext_byte_len
|| pmbox
->out_ext_byte_len
) {
4971 *(((uint32_t *)mb
) + pmbox
->mbox_offset_word
)
4972 = (uint8_t *)phba
->mbox_ext
4973 - (uint8_t *)phba
->mbox
;
4976 /* Copy the mailbox extension data */
4977 if (pmbox
->in_ext_byte_len
&& pmbox
->context2
) {
4978 lpfc_sli_pcimem_bcopy(pmbox
->context2
,
4979 (uint8_t *)phba
->mbox_ext
,
4980 pmbox
->in_ext_byte_len
);
4982 /* Copy command data to host SLIM area */
4983 lpfc_sli_pcimem_bcopy(mb
, phba
->mbox
, MAILBOX_CMD_SIZE
);
4985 /* Populate mbox extension offset word. */
4986 if (pmbox
->in_ext_byte_len
|| pmbox
->out_ext_byte_len
)
4987 *(((uint32_t *)mb
) + pmbox
->mbox_offset_word
)
4988 = MAILBOX_HBA_EXT_OFFSET
;
4990 /* Copy the mailbox extension data */
4991 if (pmbox
->in_ext_byte_len
&& pmbox
->context2
) {
4992 lpfc_memcpy_to_slim(phba
->MBslimaddr
+
4993 MAILBOX_HBA_EXT_OFFSET
,
4994 pmbox
->context2
, pmbox
->in_ext_byte_len
);
4997 if (mb
->mbxCommand
== MBX_CONFIG_PORT
) {
4998 /* copy command data into host mbox for cmpl */
4999 lpfc_sli_pcimem_bcopy(mb
, phba
->mbox
, MAILBOX_CMD_SIZE
);
5002 /* First copy mbox command data to HBA SLIM, skip past first
5004 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
5005 lpfc_memcpy_to_slim(to_slim
, &mb
->un
.varWords
[0],
5006 MAILBOX_CMD_SIZE
- sizeof (uint32_t));
5008 /* Next copy over first word, with mbxOwner set */
5009 ldata
= *((uint32_t *)mb
);
5010 to_slim
= phba
->MBslimaddr
;
5011 writel(ldata
, to_slim
);
5012 readl(to_slim
); /* flush */
5014 if (mb
->mbxCommand
== MBX_CONFIG_PORT
) {
5015 /* switch over to host mailbox */
5016 psli
->sli_flag
|= LPFC_SLI_ACTIVE
;
5024 /* Set up reference to mailbox command */
5025 psli
->mbox_active
= pmbox
;
5026 /* Interrupt board to do it */
5027 writel(CA_MBATT
, phba
->CAregaddr
);
5028 readl(phba
->CAregaddr
); /* flush */
5029 /* Don't wait for it to finish, just return */
5033 /* Set up null reference to mailbox command */
5034 psli
->mbox_active
= NULL
;
5035 /* Interrupt board to do it */
5036 writel(CA_MBATT
, phba
->CAregaddr
);
5037 readl(phba
->CAregaddr
); /* flush */
5039 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
5040 /* First read mbox status word */
5041 word0
= *((uint32_t *)phba
->mbox
);
5042 word0
= le32_to_cpu(word0
);
5044 /* First read mbox status word */
5045 word0
= readl(phba
->MBslimaddr
);
5048 /* Read the HBA Host Attention Register */
5049 ha_copy
= readl(phba
->HAregaddr
);
5050 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
5054 /* Wait for command to complete */
5055 while (((word0
& OWN_CHIP
) == OWN_CHIP
) ||
5056 (!(ha_copy
& HA_MBATT
) &&
5057 (phba
->link_state
> LPFC_WARM_START
))) {
5058 if (time_after(jiffies
, timeout
)) {
5059 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
5060 spin_unlock_irqrestore(&phba
->hbalock
,
5062 goto out_not_finished
;
5065 /* Check if we took a mbox interrupt while we were
5067 if (((word0
& OWN_CHIP
) != OWN_CHIP
)
5068 && (evtctr
!= psli
->slistat
.mbox_event
))
5072 spin_unlock_irqrestore(&phba
->hbalock
,
5075 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
5078 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
5079 /* First copy command data */
5080 word0
= *((uint32_t *)phba
->mbox
);
5081 word0
= le32_to_cpu(word0
);
5082 if (mb
->mbxCommand
== MBX_CONFIG_PORT
) {
5085 /* Check real SLIM for any errors */
5086 slimword0
= readl(phba
->MBslimaddr
);
5087 slimmb
= (MAILBOX_t
*) & slimword0
;
5088 if (((slimword0
& OWN_CHIP
) != OWN_CHIP
)
5089 && slimmb
->mbxStatus
) {
5096 /* First copy command data */
5097 word0
= readl(phba
->MBslimaddr
);
5099 /* Read the HBA Host Attention Register */
5100 ha_copy
= readl(phba
->HAregaddr
);
5103 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
5104 /* copy results back to user */
5105 lpfc_sli_pcimem_bcopy(phba
->mbox
, mb
, MAILBOX_CMD_SIZE
);
5106 /* Copy the mailbox extension data */
5107 if (pmbox
->out_ext_byte_len
&& pmbox
->context2
) {
5108 lpfc_sli_pcimem_bcopy(phba
->mbox_ext
,
5110 pmbox
->out_ext_byte_len
);
5113 /* First copy command data */
5114 lpfc_memcpy_from_slim(mb
, phba
->MBslimaddr
,
5116 /* Copy the mailbox extension data */
5117 if (pmbox
->out_ext_byte_len
&& pmbox
->context2
) {
5118 lpfc_memcpy_from_slim(pmbox
->context2
,
5120 MAILBOX_HBA_EXT_OFFSET
,
5121 pmbox
->out_ext_byte_len
);
5125 writel(HA_MBATT
, phba
->HAregaddr
);
5126 readl(phba
->HAregaddr
); /* flush */
5128 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
5129 status
= mb
->mbxStatus
;
5132 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
5136 if (processing_queue
) {
5137 pmbox
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
5138 lpfc_mbox_cmpl_put(phba
, pmbox
);
5140 return MBX_NOT_FINISHED
;
5144 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
5145 * @phba: Pointer to HBA context object.
5147 * The function blocks the posting of SLI4 asynchronous mailbox commands from
5148 * the driver internal pending mailbox queue. It will then try to wait out the
5149 * possible outstanding mailbox command before return.
5152 * 0 - the outstanding mailbox command completed; otherwise, the wait for
5153 * the outstanding mailbox command timed out.
5156 lpfc_sli4_async_mbox_block(struct lpfc_hba
*phba
)
5158 struct lpfc_sli
*psli
= &phba
->sli
;
5159 uint8_t actcmd
= MBX_HEARTBEAT
;
5161 unsigned long timeout
;
5163 /* Mark the asynchronous mailbox command posting as blocked */
5164 spin_lock_irq(&phba
->hbalock
);
5165 psli
->sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
5166 if (phba
->sli
.mbox_active
)
5167 actcmd
= phba
->sli
.mbox_active
->u
.mb
.mbxCommand
;
5168 spin_unlock_irq(&phba
->hbalock
);
5169 /* Determine how long we might wait for the active mailbox
5170 * command to be gracefully completed by firmware.
5172 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, actcmd
) * 1000) +
5174 /* Wait for the outstnading mailbox command to complete */
5175 while (phba
->sli
.mbox_active
) {
5176 /* Check active mailbox complete status every 2ms */
5178 if (time_after(jiffies
, timeout
)) {
5179 /* Timeout, marked the outstanding cmd not complete */
5185 /* Can not cleanly block async mailbox command, fails it */
5187 spin_lock_irq(&phba
->hbalock
);
5188 psli
->sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
5189 spin_unlock_irq(&phba
->hbalock
);
5195 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
5196 * @phba: Pointer to HBA context object.
5198 * The function unblocks and resume posting of SLI4 asynchronous mailbox
5199 * commands from the driver internal pending mailbox queue. It makes sure
5200 * that there is no outstanding mailbox command before resuming posting
5201 * asynchronous mailbox commands. If, for any reason, there is outstanding
5202 * mailbox command, it will try to wait it out before resuming asynchronous
5203 * mailbox command posting.
5206 lpfc_sli4_async_mbox_unblock(struct lpfc_hba
*phba
)
5208 struct lpfc_sli
*psli
= &phba
->sli
;
5210 spin_lock_irq(&phba
->hbalock
);
5211 if (!(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
5212 /* Asynchronous mailbox posting is not blocked, do nothing */
5213 spin_unlock_irq(&phba
->hbalock
);
5217 /* Outstanding synchronous mailbox command is guaranteed to be done,
5218 * successful or timeout, after timing-out the outstanding mailbox
5219 * command shall always be removed, so just unblock posting async
5220 * mailbox command and resume
5222 psli
->sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
5223 spin_unlock_irq(&phba
->hbalock
);
5225 /* wake up worker thread to post asynchronlous mailbox command */
5226 lpfc_worker_wake_up(phba
);
5230 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5231 * @phba: Pointer to HBA context object.
5232 * @mboxq: Pointer to mailbox object.
5234 * The function posts a mailbox to the port. The mailbox is expected
5235 * to be comletely filled in and ready for the port to operate on it.
5236 * This routine executes a synchronous completion operation on the
5237 * mailbox by polling for its completion.
5239 * The caller must not be holding any locks when calling this routine.
5242 * MBX_SUCCESS - mailbox posted successfully
5243 * Any of the MBX error values.
5246 lpfc_sli4_post_sync_mbox(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
5248 int rc
= MBX_SUCCESS
;
5249 unsigned long iflag
;
5251 uint32_t mcqe_status
;
5253 unsigned long timeout
;
5254 struct lpfc_sli
*psli
= &phba
->sli
;
5255 struct lpfc_mqe
*mb
= &mboxq
->u
.mqe
;
5256 struct lpfc_bmbx_create
*mbox_rgn
;
5257 struct dma_address
*dma_address
;
5258 struct lpfc_register bmbx_reg
;
5261 * Only one mailbox can be active to the bootstrap mailbox region
5262 * at a time and there is no queueing provided.
5264 spin_lock_irqsave(&phba
->hbalock
, iflag
);
5265 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
5266 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
5267 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5268 "(%d):2532 Mailbox command x%x (x%x) "
5269 "cannot issue Data: x%x x%x\n",
5270 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5271 mboxq
->u
.mb
.mbxCommand
,
5272 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5273 psli
->sli_flag
, MBX_POLL
);
5274 return MBXERR_ERROR
;
5276 /* The server grabs the token and owns it until release */
5277 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
5278 phba
->sli
.mbox_active
= mboxq
;
5279 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
5282 * Initialize the bootstrap memory region to avoid stale data areas
5283 * in the mailbox post. Then copy the caller's mailbox contents to
5284 * the bmbx mailbox region.
5286 mbx_cmnd
= bf_get(lpfc_mqe_command
, mb
);
5287 memset(phba
->sli4_hba
.bmbx
.avirt
, 0, sizeof(struct lpfc_bmbx_create
));
5288 lpfc_sli_pcimem_bcopy(mb
, phba
->sli4_hba
.bmbx
.avirt
,
5289 sizeof(struct lpfc_mqe
));
5291 /* Post the high mailbox dma address to the port and wait for ready. */
5292 dma_address
= &phba
->sli4_hba
.bmbx
.dma_address
;
5293 writel(dma_address
->addr_hi
, phba
->sli4_hba
.BMBXregaddr
);
5295 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, mbx_cmnd
)
5298 bmbx_reg
.word0
= readl(phba
->sli4_hba
.BMBXregaddr
);
5299 db_ready
= bf_get(lpfc_bmbx_rdy
, &bmbx_reg
);
5303 if (time_after(jiffies
, timeout
)) {
5307 } while (!db_ready
);
5309 /* Post the low mailbox dma address to the port. */
5310 writel(dma_address
->addr_lo
, phba
->sli4_hba
.BMBXregaddr
);
5311 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, mbx_cmnd
)
5314 bmbx_reg
.word0
= readl(phba
->sli4_hba
.BMBXregaddr
);
5315 db_ready
= bf_get(lpfc_bmbx_rdy
, &bmbx_reg
);
5319 if (time_after(jiffies
, timeout
)) {
5323 } while (!db_ready
);
5326 * Read the CQ to ensure the mailbox has completed.
5327 * If so, update the mailbox status so that the upper layers
5328 * can complete the request normally.
5330 lpfc_sli_pcimem_bcopy(phba
->sli4_hba
.bmbx
.avirt
, mb
,
5331 sizeof(struct lpfc_mqe
));
5332 mbox_rgn
= (struct lpfc_bmbx_create
*) phba
->sli4_hba
.bmbx
.avirt
;
5333 lpfc_sli_pcimem_bcopy(&mbox_rgn
->mcqe
, &mboxq
->mcqe
,
5334 sizeof(struct lpfc_mcqe
));
5335 mcqe_status
= bf_get(lpfc_mcqe_status
, &mbox_rgn
->mcqe
);
5337 /* Prefix the mailbox status with range x4000 to note SLI4 status. */
5338 if (mcqe_status
!= MB_CQE_STATUS_SUCCESS
) {
5339 bf_set(lpfc_mqe_status
, mb
, LPFC_MBX_ERROR_RANGE
| mcqe_status
);
5342 lpfc_sli4_swap_str(phba
, mboxq
);
5344 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
5345 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
5346 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
5347 " x%x x%x CQ: x%x x%x x%x x%x\n",
5348 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5349 mbx_cmnd
, lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5350 bf_get(lpfc_mqe_status
, mb
),
5351 mb
->un
.mb_words
[0], mb
->un
.mb_words
[1],
5352 mb
->un
.mb_words
[2], mb
->un
.mb_words
[3],
5353 mb
->un
.mb_words
[4], mb
->un
.mb_words
[5],
5354 mb
->un
.mb_words
[6], mb
->un
.mb_words
[7],
5355 mb
->un
.mb_words
[8], mb
->un
.mb_words
[9],
5356 mb
->un
.mb_words
[10], mb
->un
.mb_words
[11],
5357 mb
->un
.mb_words
[12], mboxq
->mcqe
.word0
,
5358 mboxq
->mcqe
.mcqe_tag0
, mboxq
->mcqe
.mcqe_tag1
,
5359 mboxq
->mcqe
.trailer
);
5361 /* We are holding the token, no needed for lock when release */
5362 spin_lock_irqsave(&phba
->hbalock
, iflag
);
5363 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
5364 phba
->sli
.mbox_active
= NULL
;
5365 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
5370 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
5371 * @phba: Pointer to HBA context object.
5372 * @pmbox: Pointer to mailbox object.
5373 * @flag: Flag indicating how the mailbox need to be processed.
5375 * This function is called by discovery code and HBA management code to submit
5376 * a mailbox command to firmware with SLI-4 interface spec.
5378 * Return codes the caller owns the mailbox command after the return of the
5382 lpfc_sli_issue_mbox_s4(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
5385 struct lpfc_sli
*psli
= &phba
->sli
;
5386 unsigned long iflags
;
5389 rc
= lpfc_mbox_dev_check(phba
);
5391 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5392 "(%d):2544 Mailbox command x%x (x%x) "
5393 "cannot issue Data: x%x x%x\n",
5394 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5395 mboxq
->u
.mb
.mbxCommand
,
5396 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5397 psli
->sli_flag
, flag
);
5398 goto out_not_finished
;
5401 /* Detect polling mode and jump to a handler */
5402 if (!phba
->sli4_hba
.intr_enable
) {
5403 if (flag
== MBX_POLL
)
5404 rc
= lpfc_sli4_post_sync_mbox(phba
, mboxq
);
5407 if (rc
!= MBX_SUCCESS
)
5408 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5409 "(%d):2541 Mailbox command x%x "
5410 "(x%x) cannot issue Data: x%x x%x\n",
5411 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5412 mboxq
->u
.mb
.mbxCommand
,
5413 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5414 psli
->sli_flag
, flag
);
5416 } else if (flag
== MBX_POLL
) {
5417 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
5418 "(%d):2542 Try to issue mailbox command "
5419 "x%x (x%x) synchronously ahead of async"
5420 "mailbox command queue: x%x x%x\n",
5421 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5422 mboxq
->u
.mb
.mbxCommand
,
5423 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5424 psli
->sli_flag
, flag
);
5425 /* Try to block the asynchronous mailbox posting */
5426 rc
= lpfc_sli4_async_mbox_block(phba
);
5428 /* Successfully blocked, now issue sync mbox cmd */
5429 rc
= lpfc_sli4_post_sync_mbox(phba
, mboxq
);
5430 if (rc
!= MBX_SUCCESS
)
5431 lpfc_printf_log(phba
, KERN_ERR
,
5433 "(%d):2597 Mailbox command "
5434 "x%x (x%x) cannot issue "
5437 mboxq
->vport
->vpi
: 0,
5438 mboxq
->u
.mb
.mbxCommand
,
5439 lpfc_sli4_mbox_opcode_get(phba
,
5441 psli
->sli_flag
, flag
);
5442 /* Unblock the async mailbox posting afterward */
5443 lpfc_sli4_async_mbox_unblock(phba
);
5448 /* Now, interrupt mode asynchrous mailbox command */
5449 rc
= lpfc_mbox_cmd_check(phba
, mboxq
);
5451 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5452 "(%d):2543 Mailbox command x%x (x%x) "
5453 "cannot issue Data: x%x x%x\n",
5454 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5455 mboxq
->u
.mb
.mbxCommand
,
5456 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5457 psli
->sli_flag
, flag
);
5458 goto out_not_finished
;
5461 /* Put the mailbox command to the driver internal FIFO */
5462 psli
->slistat
.mbox_busy
++;
5463 spin_lock_irqsave(&phba
->hbalock
, iflags
);
5464 lpfc_mbox_put(phba
, mboxq
);
5465 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5466 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
5467 "(%d):0354 Mbox cmd issue - Enqueue Data: "
5468 "x%x (x%x) x%x x%x x%x\n",
5469 mboxq
->vport
? mboxq
->vport
->vpi
: 0xffffff,
5470 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
5471 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5472 phba
->pport
->port_state
,
5473 psli
->sli_flag
, MBX_NOWAIT
);
5474 /* Wake up worker thread to transport mailbox command from head */
5475 lpfc_worker_wake_up(phba
);
5480 return MBX_NOT_FINISHED
;
5484 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
5485 * @phba: Pointer to HBA context object.
5487 * This function is called by worker thread to send a mailbox command to
5488 * SLI4 HBA firmware.
5492 lpfc_sli4_post_async_mbox(struct lpfc_hba
*phba
)
5494 struct lpfc_sli
*psli
= &phba
->sli
;
5495 LPFC_MBOXQ_t
*mboxq
;
5496 int rc
= MBX_SUCCESS
;
5497 unsigned long iflags
;
5498 struct lpfc_mqe
*mqe
;
5501 /* Check interrupt mode before post async mailbox command */
5502 if (unlikely(!phba
->sli4_hba
.intr_enable
))
5503 return MBX_NOT_FINISHED
;
5505 /* Check for mailbox command service token */
5506 spin_lock_irqsave(&phba
->hbalock
, iflags
);
5507 if (unlikely(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
5508 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5509 return MBX_NOT_FINISHED
;
5511 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
5512 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5513 return MBX_NOT_FINISHED
;
5515 if (unlikely(phba
->sli
.mbox_active
)) {
5516 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5517 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5518 "0384 There is pending active mailbox cmd\n");
5519 return MBX_NOT_FINISHED
;
5521 /* Take the mailbox command service token */
5522 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
5524 /* Get the next mailbox command from head of queue */
5525 mboxq
= lpfc_mbox_get(phba
);
5527 /* If no more mailbox command waiting for post, we're done */
5529 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
5530 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5533 phba
->sli
.mbox_active
= mboxq
;
5534 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5536 /* Check device readiness for posting mailbox command */
5537 rc
= lpfc_mbox_dev_check(phba
);
5539 /* Driver clean routine will clean up pending mailbox */
5540 goto out_not_finished
;
5542 /* Prepare the mbox command to be posted */
5543 mqe
= &mboxq
->u
.mqe
;
5544 mbx_cmnd
= bf_get(lpfc_mqe_command
, mqe
);
5546 /* Start timer for the mbox_tmo and log some mailbox post messages */
5547 mod_timer(&psli
->mbox_tmo
, (jiffies
+
5548 (HZ
* lpfc_mbox_tmo_val(phba
, mbx_cmnd
))));
5550 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
5551 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
5553 mboxq
->vport
? mboxq
->vport
->vpi
: 0, mbx_cmnd
,
5554 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5555 phba
->pport
->port_state
, psli
->sli_flag
);
5557 if (mbx_cmnd
!= MBX_HEARTBEAT
) {
5559 lpfc_debugfs_disc_trc(mboxq
->vport
,
5560 LPFC_DISC_TRC_MBOX_VPORT
,
5561 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5562 mbx_cmnd
, mqe
->un
.mb_words
[0],
5563 mqe
->un
.mb_words
[1]);
5565 lpfc_debugfs_disc_trc(phba
->pport
,
5567 "MBOX Send: cmd:x%x mb:x%x x%x",
5568 mbx_cmnd
, mqe
->un
.mb_words
[0],
5569 mqe
->un
.mb_words
[1]);
5572 psli
->slistat
.mbox_cmd
++;
5574 /* Post the mailbox command to the port */
5575 rc
= lpfc_sli4_mq_put(phba
->sli4_hba
.mbx_wq
, mqe
);
5576 if (rc
!= MBX_SUCCESS
) {
5577 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5578 "(%d):2533 Mailbox command x%x (x%x) "
5579 "cannot issue Data: x%x x%x\n",
5580 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5581 mboxq
->u
.mb
.mbxCommand
,
5582 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5583 psli
->sli_flag
, MBX_NOWAIT
);
5584 goto out_not_finished
;
5590 spin_lock_irqsave(&phba
->hbalock
, iflags
);
5591 mboxq
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
5592 __lpfc_mbox_cmpl_put(phba
, mboxq
);
5593 /* Release the token */
5594 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
5595 phba
->sli
.mbox_active
= NULL
;
5596 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5598 return MBX_NOT_FINISHED
;
5602 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
5603 * @phba: Pointer to HBA context object.
5604 * @pmbox: Pointer to mailbox object.
5605 * @flag: Flag indicating how the mailbox need to be processed.
5607 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
5608 * the API jump table function pointer from the lpfc_hba struct.
5610 * Return codes the caller owns the mailbox command after the return of the
5614 lpfc_sli_issue_mbox(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmbox
, uint32_t flag
)
5616 return phba
->lpfc_sli_issue_mbox(phba
, pmbox
, flag
);
5620 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
5621 * @phba: The hba struct for which this call is being executed.
5622 * @dev_grp: The HBA PCI-Device group number.
5624 * This routine sets up the mbox interface API function jump table in @phba
5626 * Returns: 0 - success, -ENODEV - failure.
5629 lpfc_mbox_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
5633 case LPFC_PCI_DEV_LP
:
5634 phba
->lpfc_sli_issue_mbox
= lpfc_sli_issue_mbox_s3
;
5635 phba
->lpfc_sli_handle_slow_ring_event
=
5636 lpfc_sli_handle_slow_ring_event_s3
;
5637 phba
->lpfc_sli_hbq_to_firmware
= lpfc_sli_hbq_to_firmware_s3
;
5638 phba
->lpfc_sli_brdrestart
= lpfc_sli_brdrestart_s3
;
5639 phba
->lpfc_sli_brdready
= lpfc_sli_brdready_s3
;
5641 case LPFC_PCI_DEV_OC
:
5642 phba
->lpfc_sli_issue_mbox
= lpfc_sli_issue_mbox_s4
;
5643 phba
->lpfc_sli_handle_slow_ring_event
=
5644 lpfc_sli_handle_slow_ring_event_s4
;
5645 phba
->lpfc_sli_hbq_to_firmware
= lpfc_sli_hbq_to_firmware_s4
;
5646 phba
->lpfc_sli_brdrestart
= lpfc_sli_brdrestart_s4
;
5647 phba
->lpfc_sli_brdready
= lpfc_sli_brdready_s4
;
5650 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5651 "1420 Invalid HBA PCI-device group: 0x%x\n",
5660 * __lpfc_sli_ringtx_put - Add an iocb to the txq
5661 * @phba: Pointer to HBA context object.
5662 * @pring: Pointer to driver SLI ring object.
5663 * @piocb: Pointer to address of newly added command iocb.
5665 * This function is called with hbalock held to add a command
5666 * iocb to the txq when SLI layer cannot submit the command iocb
5670 __lpfc_sli_ringtx_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
5671 struct lpfc_iocbq
*piocb
)
5673 /* Insert the caller's iocb in the txq tail for later processing. */
5674 list_add_tail(&piocb
->list
, &pring
->txq
);
5679 * lpfc_sli_next_iocb - Get the next iocb in the txq
5680 * @phba: Pointer to HBA context object.
5681 * @pring: Pointer to driver SLI ring object.
5682 * @piocb: Pointer to address of newly added command iocb.
5684 * This function is called with hbalock held before a new
5685 * iocb is submitted to the firmware. This function checks
5686 * txq to flush the iocbs in txq to Firmware before
5687 * submitting new iocbs to the Firmware.
5688 * If there are iocbs in the txq which need to be submitted
5689 * to firmware, lpfc_sli_next_iocb returns the first element
5690 * of the txq after dequeuing it from txq.
5691 * If there is no iocb in the txq then the function will return
5692 * *piocb and *piocb is set to NULL. Caller needs to check
5693 * *piocb to find if there are more commands in the txq.
5695 static struct lpfc_iocbq
*
5696 lpfc_sli_next_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
5697 struct lpfc_iocbq
**piocb
)
5699 struct lpfc_iocbq
* nextiocb
;
5701 nextiocb
= lpfc_sli_ringtx_get(phba
, pring
);
5711 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
5712 * @phba: Pointer to HBA context object.
5713 * @ring_number: SLI ring number to issue iocb on.
5714 * @piocb: Pointer to command iocb.
5715 * @flag: Flag indicating if this command can be put into txq.
5717 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
5718 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
5719 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
5720 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
5721 * this function allows only iocbs for posting buffers. This function finds
5722 * next available slot in the command ring and posts the command to the
5723 * available slot and writes the port attention register to request HBA start
5724 * processing new iocb. If there is no slot available in the ring and
5725 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
5726 * the function returns IOCB_BUSY.
5728 * This function is called with hbalock held. The function will return success
5729 * after it successfully submit the iocb to firmware or after adding to the
5733 __lpfc_sli_issue_iocb_s3(struct lpfc_hba
*phba
, uint32_t ring_number
,
5734 struct lpfc_iocbq
*piocb
, uint32_t flag
)
5736 struct lpfc_iocbq
*nextiocb
;
5738 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[ring_number
];
5740 if (piocb
->iocb_cmpl
&& (!piocb
->vport
) &&
5741 (piocb
->iocb
.ulpCommand
!= CMD_ABORT_XRI_CN
) &&
5742 (piocb
->iocb
.ulpCommand
!= CMD_CLOSE_XRI_CN
)) {
5743 lpfc_printf_log(phba
, KERN_ERR
,
5744 LOG_SLI
| LOG_VPORT
,
5745 "1807 IOCB x%x failed. No vport\n",
5746 piocb
->iocb
.ulpCommand
);
5752 /* If the PCI channel is in offline state, do not post iocbs. */
5753 if (unlikely(pci_channel_offline(phba
->pcidev
)))
5756 /* If HBA has a deferred error attention, fail the iocb. */
5757 if (unlikely(phba
->hba_flag
& DEFER_ERATT
))
5761 * We should never get an IOCB if we are in a < LINK_DOWN state
5763 if (unlikely(phba
->link_state
< LPFC_LINK_DOWN
))
5767 * Check to see if we are blocking IOCB processing because of a
5768 * outstanding event.
5770 if (unlikely(pring
->flag
& LPFC_STOP_IOCB_EVENT
))
5773 if (unlikely(phba
->link_state
== LPFC_LINK_DOWN
)) {
5775 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
5776 * can be issued if the link is not up.
5778 switch (piocb
->iocb
.ulpCommand
) {
5779 case CMD_GEN_REQUEST64_CR
:
5780 case CMD_GEN_REQUEST64_CX
:
5781 if (!(phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
) ||
5782 (piocb
->iocb
.un
.genreq64
.w5
.hcsw
.Rctl
!=
5783 FC_RCTL_DD_UNSOL_CMD
) ||
5784 (piocb
->iocb
.un
.genreq64
.w5
.hcsw
.Type
!=
5785 MENLO_TRANSPORT_TYPE
))
5789 case CMD_QUE_RING_BUF_CN
:
5790 case CMD_QUE_RING_BUF64_CN
:
5792 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
5793 * completion, iocb_cmpl MUST be 0.
5795 if (piocb
->iocb_cmpl
)
5796 piocb
->iocb_cmpl
= NULL
;
5798 case CMD_CREATE_XRI_CR
:
5799 case CMD_CLOSE_XRI_CN
:
5800 case CMD_CLOSE_XRI_CX
:
5807 * For FCP commands, we must be in a state where we can process link
5810 } else if (unlikely(pring
->ringno
== phba
->sli
.fcp_ring
&&
5811 !(phba
->sli
.sli_flag
& LPFC_PROCESS_LA
))) {
5815 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
5816 (nextiocb
= lpfc_sli_next_iocb(phba
, pring
, &piocb
)))
5817 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
5820 lpfc_sli_update_ring(phba
, pring
);
5822 lpfc_sli_update_full_ring(phba
, pring
);
5825 return IOCB_SUCCESS
;
5830 pring
->stats
.iocb_cmd_delay
++;
5834 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
5835 __lpfc_sli_ringtx_put(phba
, pring
, piocb
);
5836 return IOCB_SUCCESS
;
5843 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
5844 * @phba: Pointer to HBA context object.
5845 * @piocb: Pointer to command iocb.
5846 * @sglq: Pointer to the scatter gather queue object.
5848 * This routine converts the bpl or bde that is in the IOCB
5849 * to a sgl list for the sli4 hardware. The physical address
5850 * of the bpl/bde is converted back to a virtual address.
5851 * If the IOCB contains a BPL then the list of BDE's is
5852 * converted to sli4_sge's. If the IOCB contains a single
5853 * BDE then it is converted to a single sli_sge.
5854 * The IOCB is still in cpu endianess so the contents of
5855 * the bpl can be used without byte swapping.
5857 * Returns valid XRI = Success, NO_XRI = Failure.
5860 lpfc_sli4_bpl2sgl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
,
5861 struct lpfc_sglq
*sglq
)
5863 uint16_t xritag
= NO_XRI
;
5864 struct ulp_bde64
*bpl
= NULL
;
5865 struct ulp_bde64 bde
;
5866 struct sli4_sge
*sgl
= NULL
;
5870 uint32_t offset
= 0; /* accumulated offset in the sg request list */
5871 int inbound
= 0; /* number of sg reply entries inbound from firmware */
5873 if (!piocbq
|| !sglq
)
5876 sgl
= (struct sli4_sge
*)sglq
->sgl
;
5877 icmd
= &piocbq
->iocb
;
5878 if (icmd
->un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BLP_64
) {
5879 numBdes
= icmd
->un
.genreq64
.bdl
.bdeSize
/
5880 sizeof(struct ulp_bde64
);
5881 /* The addrHigh and addrLow fields within the IOCB
5882 * have not been byteswapped yet so there is no
5883 * need to swap them back.
5885 bpl
= (struct ulp_bde64
*)
5886 ((struct lpfc_dmabuf
*)piocbq
->context3
)->virt
;
5891 for (i
= 0; i
< numBdes
; i
++) {
5892 /* Should already be byte swapped. */
5893 sgl
->addr_hi
= bpl
->addrHigh
;
5894 sgl
->addr_lo
= bpl
->addrLow
;
5896 if ((i
+1) == numBdes
)
5897 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
5899 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
5900 sgl
->word2
= cpu_to_le32(sgl
->word2
);
5901 /* swap the size field back to the cpu so we
5902 * can assign it to the sgl.
5904 bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
5905 sgl
->sge_len
= cpu_to_le32(bde
.tus
.f
.bdeSize
);
5906 /* The offsets in the sgl need to be accumulated
5907 * separately for the request and reply lists.
5908 * The request is always first, the reply follows.
5910 if (piocbq
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
) {
5911 /* add up the reply sg entries */
5912 if (bpl
->tus
.f
.bdeFlags
== BUFF_TYPE_BDE_64I
)
5914 /* first inbound? reset the offset */
5917 bf_set(lpfc_sli4_sge_offset
, sgl
, offset
);
5918 offset
+= bde
.tus
.f
.bdeSize
;
5923 } else if (icmd
->un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BDE_64
) {
5924 /* The addrHigh and addrLow fields of the BDE have not
5925 * been byteswapped yet so they need to be swapped
5926 * before putting them in the sgl.
5929 cpu_to_le32(icmd
->un
.genreq64
.bdl
.addrHigh
);
5931 cpu_to_le32(icmd
->un
.genreq64
.bdl
.addrLow
);
5932 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
5933 sgl
->word2
= cpu_to_le32(sgl
->word2
);
5935 cpu_to_le32(icmd
->un
.genreq64
.bdl
.bdeSize
);
5937 return sglq
->sli4_xritag
;
5941 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
5942 * @phba: Pointer to HBA context object.
5944 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
5945 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
5948 * Return: index into SLI4 fast-path FCP queue index.
5951 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba
*phba
)
5954 if (phba
->fcp_qidx
>= phba
->cfg_fcp_wq_count
)
5957 return phba
->fcp_qidx
;
5961 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
5962 * @phba: Pointer to HBA context object.
5963 * @piocb: Pointer to command iocb.
5964 * @wqe: Pointer to the work queue entry.
5966 * This routine converts the iocb command to its Work Queue Entry
5967 * equivalent. The wqe pointer should not have any fields set when
5968 * this routine is called because it will memcpy over them.
5969 * This routine does not set the CQ_ID or the WQEC bits in the
5972 * Returns: 0 = Success, IOCB_ERROR = Failure.
5975 lpfc_sli4_iocb2wqe(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
,
5976 union lpfc_wqe
*wqe
)
5978 uint32_t xmit_len
= 0, total_len
= 0;
5982 uint8_t command_type
= ELS_COMMAND_NON_FIP
;
5985 uint16_t abrt_iotag
;
5986 struct lpfc_iocbq
*abrtiocbq
;
5987 struct ulp_bde64
*bpl
= NULL
;
5988 uint32_t els_id
= LPFC_ELS_ID_DEFAULT
;
5990 struct ulp_bde64 bde
;
5992 fip
= phba
->hba_flag
& HBA_FIP_SUPPORT
;
5993 /* The fcp commands will set command type */
5994 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
5995 command_type
= FCP_COMMAND
;
5996 else if (fip
&& (iocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
))
5997 command_type
= ELS_COMMAND_FIP
;
5999 command_type
= ELS_COMMAND_NON_FIP
;
6001 /* Some of the fields are in the right position already */
6002 memcpy(wqe
, &iocbq
->iocb
, sizeof(union lpfc_wqe
));
6003 abort_tag
= (uint32_t) iocbq
->iotag
;
6004 xritag
= iocbq
->sli4_xritag
;
6005 wqe
->generic
.wqe_com
.word7
= 0; /* The ct field has moved so reset */
6006 /* words0-2 bpl convert bde */
6007 if (iocbq
->iocb
.un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BLP_64
) {
6008 numBdes
= iocbq
->iocb
.un
.genreq64
.bdl
.bdeSize
/
6009 sizeof(struct ulp_bde64
);
6010 bpl
= (struct ulp_bde64
*)
6011 ((struct lpfc_dmabuf
*)iocbq
->context3
)->virt
;
6015 /* Should already be byte swapped. */
6016 wqe
->generic
.bde
.addrHigh
= le32_to_cpu(bpl
->addrHigh
);
6017 wqe
->generic
.bde
.addrLow
= le32_to_cpu(bpl
->addrLow
);
6018 /* swap the size field back to the cpu so we
6019 * can assign it to the sgl.
6021 wqe
->generic
.bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
6022 xmit_len
= wqe
->generic
.bde
.tus
.f
.bdeSize
;
6024 for (i
= 0; i
< numBdes
; i
++) {
6025 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
6026 total_len
+= bde
.tus
.f
.bdeSize
;
6029 xmit_len
= iocbq
->iocb
.un
.fcpi64
.bdl
.bdeSize
;
6031 iocbq
->iocb
.ulpIoTag
= iocbq
->iotag
;
6032 cmnd
= iocbq
->iocb
.ulpCommand
;
6034 switch (iocbq
->iocb
.ulpCommand
) {
6035 case CMD_ELS_REQUEST64_CR
:
6036 if (!iocbq
->iocb
.ulpLe
) {
6037 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6038 "2007 Only Limited Edition cmd Format"
6039 " supported 0x%x\n",
6040 iocbq
->iocb
.ulpCommand
);
6043 wqe
->els_req
.payload_len
= xmit_len
;
6044 /* Els_reguest64 has a TMO */
6045 bf_set(wqe_tmo
, &wqe
->els_req
.wqe_com
,
6046 iocbq
->iocb
.ulpTimeout
);
6047 /* Need a VF for word 4 set the vf bit*/
6048 bf_set(els_req64_vf
, &wqe
->els_req
, 0);
6049 /* And a VFID for word 12 */
6050 bf_set(els_req64_vfid
, &wqe
->els_req
, 0);
6052 * Set ct field to 3, indicates that the context_tag field
6053 * contains the FCFI and remote N_Port_ID is
6056 ct
= ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
);
6057 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
6058 iocbq
->iocb
.ulpContext
);
6059 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, ct
);
6060 bf_set(wqe_pu
, &wqe
->els_req
.wqe_com
, 0);
6061 /* CCP CCPE PV PRI in word10 were set in the memcpy */
6062 if (command_type
== ELS_COMMAND_FIP
) {
6063 els_id
= ((iocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
)
6064 >> LPFC_FIP_ELS_ID_SHIFT
);
6066 bf_set(wqe_els_id
, &wqe
->els_req
.wqe_com
, els_id
);
6067 bf_set(wqe_dbde
, &wqe
->els_req
.wqe_com
, 1);
6068 bf_set(wqe_iod
, &wqe
->els_req
.wqe_com
, LPFC_WQE_IOD_READ
);
6069 bf_set(wqe_qosd
, &wqe
->els_req
.wqe_com
, 1);
6070 bf_set(wqe_lenloc
, &wqe
->els_req
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
6071 bf_set(wqe_ebde_cnt
, &wqe
->els_req
.wqe_com
, 0);
6073 case CMD_XMIT_SEQUENCE64_CX
:
6074 bf_set(wqe_ctxt_tag
, &wqe
->xmit_sequence
.wqe_com
,
6075 iocbq
->iocb
.un
.ulpWord
[3]);
6076 bf_set(wqe_rcvoxid
, &wqe
->xmit_sequence
.wqe_com
,
6077 iocbq
->iocb
.ulpContext
);
6078 /* The entire sequence is transmitted for this IOCB */
6079 xmit_len
= total_len
;
6080 cmnd
= CMD_XMIT_SEQUENCE64_CR
;
6081 case CMD_XMIT_SEQUENCE64_CR
:
6082 /* word3 iocb=io_tag32 wqe=reserved */
6083 wqe
->xmit_sequence
.rsvd3
= 0;
6084 /* word4 relative_offset memcpy */
6085 /* word5 r_ctl/df_ctl memcpy */
6086 bf_set(wqe_pu
, &wqe
->xmit_sequence
.wqe_com
, 0);
6087 bf_set(wqe_dbde
, &wqe
->xmit_sequence
.wqe_com
, 1);
6088 bf_set(wqe_iod
, &wqe
->xmit_sequence
.wqe_com
,
6089 LPFC_WQE_IOD_WRITE
);
6090 bf_set(wqe_lenloc
, &wqe
->xmit_sequence
.wqe_com
,
6091 LPFC_WQE_LENLOC_WORD12
);
6092 bf_set(wqe_ebde_cnt
, &wqe
->xmit_sequence
.wqe_com
, 0);
6093 wqe
->xmit_sequence
.xmit_len
= xmit_len
;
6094 command_type
= OTHER_COMMAND
;
6096 case CMD_XMIT_BCAST64_CN
:
6097 /* word3 iocb=iotag32 wqe=seq_payload_len */
6098 wqe
->xmit_bcast64
.seq_payload_len
= xmit_len
;
6099 /* word4 iocb=rsvd wqe=rsvd */
6100 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
6101 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
6102 bf_set(wqe_ct
, &wqe
->xmit_bcast64
.wqe_com
,
6103 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
6104 bf_set(wqe_dbde
, &wqe
->xmit_bcast64
.wqe_com
, 1);
6105 bf_set(wqe_iod
, &wqe
->xmit_bcast64
.wqe_com
, LPFC_WQE_IOD_WRITE
);
6106 bf_set(wqe_lenloc
, &wqe
->xmit_bcast64
.wqe_com
,
6107 LPFC_WQE_LENLOC_WORD3
);
6108 bf_set(wqe_ebde_cnt
, &wqe
->xmit_bcast64
.wqe_com
, 0);
6110 case CMD_FCP_IWRITE64_CR
:
6111 command_type
= FCP_COMMAND_DATA_OUT
;
6112 /* word3 iocb=iotag wqe=payload_offset_len */
6113 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
6114 wqe
->fcp_iwrite
.payload_offset_len
=
6115 xmit_len
+ sizeof(struct fcp_rsp
);
6116 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
6117 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
6118 bf_set(wqe_erp
, &wqe
->fcp_iwrite
.wqe_com
,
6119 iocbq
->iocb
.ulpFCP2Rcvy
);
6120 bf_set(wqe_lnk
, &wqe
->fcp_iwrite
.wqe_com
, iocbq
->iocb
.ulpXS
);
6121 /* Always open the exchange */
6122 bf_set(wqe_xc
, &wqe
->fcp_iwrite
.wqe_com
, 0);
6123 bf_set(wqe_dbde
, &wqe
->fcp_iwrite
.wqe_com
, 1);
6124 bf_set(wqe_iod
, &wqe
->fcp_iwrite
.wqe_com
, LPFC_WQE_IOD_WRITE
);
6125 bf_set(wqe_lenloc
, &wqe
->fcp_iwrite
.wqe_com
,
6126 LPFC_WQE_LENLOC_WORD4
);
6127 bf_set(wqe_ebde_cnt
, &wqe
->fcp_iwrite
.wqe_com
, 0);
6128 bf_set(wqe_pu
, &wqe
->fcp_iwrite
.wqe_com
, iocbq
->iocb
.ulpPU
);
6130 case CMD_FCP_IREAD64_CR
:
6131 /* word3 iocb=iotag wqe=payload_offset_len */
6132 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
6133 wqe
->fcp_iread
.payload_offset_len
=
6134 xmit_len
+ sizeof(struct fcp_rsp
);
6135 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
6136 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
6137 bf_set(wqe_erp
, &wqe
->fcp_iread
.wqe_com
,
6138 iocbq
->iocb
.ulpFCP2Rcvy
);
6139 bf_set(wqe_lnk
, &wqe
->fcp_iread
.wqe_com
, iocbq
->iocb
.ulpXS
);
6140 /* Always open the exchange */
6141 bf_set(wqe_xc
, &wqe
->fcp_iread
.wqe_com
, 0);
6142 bf_set(wqe_dbde
, &wqe
->fcp_iread
.wqe_com
, 1);
6143 bf_set(wqe_iod
, &wqe
->fcp_iread
.wqe_com
, LPFC_WQE_IOD_READ
);
6144 bf_set(wqe_lenloc
, &wqe
->fcp_iread
.wqe_com
,
6145 LPFC_WQE_LENLOC_WORD4
);
6146 bf_set(wqe_ebde_cnt
, &wqe
->fcp_iread
.wqe_com
, 0);
6147 bf_set(wqe_pu
, &wqe
->fcp_iread
.wqe_com
, iocbq
->iocb
.ulpPU
);
6149 case CMD_FCP_ICMND64_CR
:
6150 /* word3 iocb=IO_TAG wqe=reserved */
6151 wqe
->fcp_icmd
.rsrvd3
= 0;
6152 bf_set(wqe_pu
, &wqe
->fcp_icmd
.wqe_com
, 0);
6153 /* Always open the exchange */
6154 bf_set(wqe_xc
, &wqe
->fcp_icmd
.wqe_com
, 0);
6155 bf_set(wqe_dbde
, &wqe
->fcp_icmd
.wqe_com
, 1);
6156 bf_set(wqe_iod
, &wqe
->fcp_icmd
.wqe_com
, LPFC_WQE_IOD_WRITE
);
6157 bf_set(wqe_qosd
, &wqe
->fcp_icmd
.wqe_com
, 1);
6158 bf_set(wqe_lenloc
, &wqe
->fcp_icmd
.wqe_com
,
6159 LPFC_WQE_LENLOC_NONE
);
6160 bf_set(wqe_ebde_cnt
, &wqe
->fcp_icmd
.wqe_com
, 0);
6162 case CMD_GEN_REQUEST64_CR
:
6163 /* For this command calculate the xmit length of the
6167 numBdes
= iocbq
->iocb
.un
.genreq64
.bdl
.bdeSize
/
6168 sizeof(struct ulp_bde64
);
6169 for (i
= 0; i
< numBdes
; i
++) {
6170 if (bpl
[i
].tus
.f
.bdeFlags
!= BUFF_TYPE_BDE_64
)
6172 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
6173 xmit_len
+= bde
.tus
.f
.bdeSize
;
6175 /* word3 iocb=IO_TAG wqe=request_payload_len */
6176 wqe
->gen_req
.request_payload_len
= xmit_len
;
6177 /* word4 iocb=parameter wqe=relative_offset memcpy */
6178 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
6179 /* word6 context tag copied in memcpy */
6180 if (iocbq
->iocb
.ulpCt_h
|| iocbq
->iocb
.ulpCt_l
) {
6181 ct
= ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
);
6182 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6183 "2015 Invalid CT %x command 0x%x\n",
6184 ct
, iocbq
->iocb
.ulpCommand
);
6187 bf_set(wqe_ct
, &wqe
->gen_req
.wqe_com
, 0);
6188 bf_set(wqe_tmo
, &wqe
->gen_req
.wqe_com
, iocbq
->iocb
.ulpTimeout
);
6189 bf_set(wqe_pu
, &wqe
->gen_req
.wqe_com
, iocbq
->iocb
.ulpPU
);
6190 bf_set(wqe_dbde
, &wqe
->gen_req
.wqe_com
, 1);
6191 bf_set(wqe_iod
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_IOD_READ
);
6192 bf_set(wqe_qosd
, &wqe
->gen_req
.wqe_com
, 1);
6193 bf_set(wqe_lenloc
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
6194 bf_set(wqe_ebde_cnt
, &wqe
->gen_req
.wqe_com
, 0);
6195 command_type
= OTHER_COMMAND
;
6197 case CMD_XMIT_ELS_RSP64_CX
:
6198 /* words0-2 BDE memcpy */
6199 /* word3 iocb=iotag32 wqe=response_payload_len */
6200 wqe
->xmit_els_rsp
.response_payload_len
= xmit_len
;
6201 /* word4 iocb=did wge=rsvd. */
6202 wqe
->xmit_els_rsp
.rsvd4
= 0;
6203 /* word5 iocb=rsvd wge=did */
6204 bf_set(wqe_els_did
, &wqe
->xmit_els_rsp
.wqe_dest
,
6205 iocbq
->iocb
.un
.elsreq64
.remoteID
);
6206 bf_set(wqe_ct
, &wqe
->xmit_els_rsp
.wqe_com
,
6207 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
6208 bf_set(wqe_pu
, &wqe
->xmit_els_rsp
.wqe_com
, iocbq
->iocb
.ulpPU
);
6209 bf_set(wqe_rcvoxid
, &wqe
->xmit_els_rsp
.wqe_com
,
6210 iocbq
->iocb
.ulpContext
);
6211 if (!iocbq
->iocb
.ulpCt_h
&& iocbq
->iocb
.ulpCt_l
)
6212 bf_set(wqe_ctxt_tag
, &wqe
->xmit_els_rsp
.wqe_com
,
6213 iocbq
->vport
->vpi
+ phba
->vpi_base
);
6214 bf_set(wqe_dbde
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
6215 bf_set(wqe_iod
, &wqe
->xmit_els_rsp
.wqe_com
, LPFC_WQE_IOD_WRITE
);
6216 bf_set(wqe_qosd
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
6217 bf_set(wqe_lenloc
, &wqe
->xmit_els_rsp
.wqe_com
,
6218 LPFC_WQE_LENLOC_WORD3
);
6219 bf_set(wqe_ebde_cnt
, &wqe
->xmit_els_rsp
.wqe_com
, 0);
6220 command_type
= OTHER_COMMAND
;
6222 case CMD_CLOSE_XRI_CN
:
6223 case CMD_ABORT_XRI_CN
:
6224 case CMD_ABORT_XRI_CX
:
6225 /* words 0-2 memcpy should be 0 rserved */
6226 /* port will send abts */
6227 abrt_iotag
= iocbq
->iocb
.un
.acxri
.abortContextTag
;
6228 if (abrt_iotag
!= 0 && abrt_iotag
<= phba
->sli
.last_iotag
) {
6229 abrtiocbq
= phba
->sli
.iocbq_lookup
[abrt_iotag
];
6230 fip
= abrtiocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
;
6234 if ((iocbq
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
) || fip
)
6236 * The link is down, or the command was ELS_FIP
6237 * so the fw does not need to send abts
6240 bf_set(abort_cmd_ia
, &wqe
->abort_cmd
, 1);
6242 bf_set(abort_cmd_ia
, &wqe
->abort_cmd
, 0);
6243 bf_set(abort_cmd_criteria
, &wqe
->abort_cmd
, T_XRI_TAG
);
6244 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
6245 wqe
->abort_cmd
.rsrvd5
= 0;
6246 bf_set(wqe_ct
, &wqe
->abort_cmd
.wqe_com
,
6247 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
6248 abort_tag
= iocbq
->iocb
.un
.acxri
.abortIoTag
;
6250 * The abort handler will send us CMD_ABORT_XRI_CN or
6251 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
6253 bf_set(wqe_cmnd
, &wqe
->abort_cmd
.wqe_com
, CMD_ABORT_XRI_CX
);
6254 bf_set(wqe_qosd
, &wqe
->abort_cmd
.wqe_com
, 1);
6255 bf_set(wqe_lenloc
, &wqe
->abort_cmd
.wqe_com
,
6256 LPFC_WQE_LENLOC_NONE
);
6257 cmnd
= CMD_ABORT_XRI_CX
;
6258 command_type
= OTHER_COMMAND
;
6261 case CMD_XMIT_BLS_RSP64_CX
:
6262 /* As BLS ABTS-ACC WQE is very different from other WQEs,
6263 * we re-construct this WQE here based on information in
6264 * iocbq from scratch.
6266 memset(wqe
, 0, sizeof(union lpfc_wqe
));
6267 /* OX_ID is invariable to who sent ABTS to CT exchange */
6268 bf_set(xmit_bls_rsp64_oxid
, &wqe
->xmit_bls_rsp
,
6269 bf_get(lpfc_abts_oxid
, &iocbq
->iocb
.un
.bls_acc
));
6270 if (bf_get(lpfc_abts_orig
, &iocbq
->iocb
.un
.bls_acc
) ==
6271 LPFC_ABTS_UNSOL_INT
) {
6272 /* ABTS sent by initiator to CT exchange, the
6273 * RX_ID field will be filled with the newly
6274 * allocated responder XRI.
6276 bf_set(xmit_bls_rsp64_rxid
, &wqe
->xmit_bls_rsp
,
6277 iocbq
->sli4_xritag
);
6279 /* ABTS sent by responder to CT exchange, the
6280 * RX_ID field will be filled with the responder
6283 bf_set(xmit_bls_rsp64_rxid
, &wqe
->xmit_bls_rsp
,
6284 bf_get(lpfc_abts_rxid
, &iocbq
->iocb
.un
.bls_acc
));
6286 bf_set(xmit_bls_rsp64_seqcnthi
, &wqe
->xmit_bls_rsp
, 0xffff);
6287 bf_set(wqe_xmit_bls_pt
, &wqe
->xmit_bls_rsp
.wqe_dest
, 0x1);
6288 bf_set(wqe_ctxt_tag
, &wqe
->xmit_bls_rsp
.wqe_com
,
6289 iocbq
->iocb
.ulpContext
);
6290 bf_set(wqe_qosd
, &wqe
->xmit_bls_rsp
.wqe_com
, 1);
6291 bf_set(wqe_lenloc
, &wqe
->xmit_bls_rsp
.wqe_com
,
6292 LPFC_WQE_LENLOC_NONE
);
6293 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
6294 command_type
= OTHER_COMMAND
;
6296 case CMD_XRI_ABORTED_CX
:
6297 case CMD_CREATE_XRI_CR
: /* Do we expect to use this? */
6298 case CMD_IOCB_FCP_IBIDIR64_CR
: /* bidirectional xfer */
6299 case CMD_FCP_TSEND64_CX
: /* Target mode send xfer-ready */
6300 case CMD_FCP_TRSP64_CX
: /* Target mode rcv */
6301 case CMD_FCP_AUTO_TRSP_CX
: /* Auto target rsp */
6303 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6304 "2014 Invalid command 0x%x\n",
6305 iocbq
->iocb
.ulpCommand
);
6309 bf_set(wqe_xri_tag
, &wqe
->generic
.wqe_com
, xritag
);
6310 bf_set(wqe_reqtag
, &wqe
->generic
.wqe_com
, iocbq
->iotag
);
6311 wqe
->generic
.wqe_com
.abort_tag
= abort_tag
;
6312 bf_set(wqe_cmd_type
, &wqe
->generic
.wqe_com
, command_type
);
6313 bf_set(wqe_cmnd
, &wqe
->generic
.wqe_com
, cmnd
);
6314 bf_set(wqe_class
, &wqe
->generic
.wqe_com
, iocbq
->iocb
.ulpClass
);
6315 bf_set(wqe_cqid
, &wqe
->generic
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
6320 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
6321 * @phba: Pointer to HBA context object.
6322 * @ring_number: SLI ring number to issue iocb on.
6323 * @piocb: Pointer to command iocb.
6324 * @flag: Flag indicating if this command can be put into txq.
6326 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
6327 * an iocb command to an HBA with SLI-4 interface spec.
6329 * This function is called with hbalock held. The function will return success
6330 * after it successfully submit the iocb to firmware or after adding to the
6334 __lpfc_sli_issue_iocb_s4(struct lpfc_hba
*phba
, uint32_t ring_number
,
6335 struct lpfc_iocbq
*piocb
, uint32_t flag
)
6337 struct lpfc_sglq
*sglq
;
6339 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[ring_number
];
6341 if (piocb
->sli4_xritag
== NO_XRI
) {
6342 if (piocb
->iocb
.ulpCommand
== CMD_ABORT_XRI_CN
||
6343 piocb
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
)
6346 if (pring
->txq_cnt
) {
6347 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
6348 __lpfc_sli_ringtx_put(phba
,
6350 return IOCB_SUCCESS
;
6355 sglq
= __lpfc_sli_get_sglq(phba
);
6357 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
6358 __lpfc_sli_ringtx_put(phba
,
6361 return IOCB_SUCCESS
;
6367 } else if (piocb
->iocb_flag
& LPFC_IO_FCP
) {
6368 sglq
= NULL
; /* These IO's already have an XRI and
6372 /* This is a continuation of a commandi,(CX) so this
6373 * sglq is on the active list
6375 sglq
= __lpfc_get_active_sglq(phba
, piocb
->sli4_xritag
);
6381 piocb
->sli4_xritag
= sglq
->sli4_xritag
;
6383 if (NO_XRI
== lpfc_sli4_bpl2sgl(phba
, piocb
, sglq
))
6387 if (lpfc_sli4_iocb2wqe(phba
, piocb
, &wqe
))
6390 if ((piocb
->iocb_flag
& LPFC_IO_FCP
) ||
6391 (piocb
->iocb_flag
& LPFC_USE_FCPWQIDX
)) {
6393 * For FCP command IOCB, get a new WQ index to distribute
6394 * WQE across the WQsr. On the other hand, for abort IOCB,
6395 * it carries the same WQ index to the original command
6398 if (piocb
->iocb_flag
& LPFC_IO_FCP
)
6399 piocb
->fcp_wqidx
= lpfc_sli4_scmd_to_wqidx_distr(phba
);
6400 if (lpfc_sli4_wq_put(phba
->sli4_hba
.fcp_wq
[piocb
->fcp_wqidx
],
6404 if (lpfc_sli4_wq_put(phba
->sli4_hba
.els_wq
, &wqe
))
6407 lpfc_sli_ringtxcmpl_put(phba
, pring
, piocb
);
6413 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
6415 * This routine wraps the actual lockless version for issusing IOCB function
6416 * pointer from the lpfc_hba struct.
6419 * IOCB_ERROR - Error
6420 * IOCB_SUCCESS - Success
6424 __lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, uint32_t ring_number
,
6425 struct lpfc_iocbq
*piocb
, uint32_t flag
)
6427 return phba
->__lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
6431 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
6432 * @phba: The hba struct for which this call is being executed.
6433 * @dev_grp: The HBA PCI-Device group number.
6435 * This routine sets up the SLI interface API function jump table in @phba
6437 * Returns: 0 - success, -ENODEV - failure.
6440 lpfc_sli_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
6444 case LPFC_PCI_DEV_LP
:
6445 phba
->__lpfc_sli_issue_iocb
= __lpfc_sli_issue_iocb_s3
;
6446 phba
->__lpfc_sli_release_iocbq
= __lpfc_sli_release_iocbq_s3
;
6448 case LPFC_PCI_DEV_OC
:
6449 phba
->__lpfc_sli_issue_iocb
= __lpfc_sli_issue_iocb_s4
;
6450 phba
->__lpfc_sli_release_iocbq
= __lpfc_sli_release_iocbq_s4
;
6453 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6454 "1419 Invalid HBA PCI-device group: 0x%x\n",
6459 phba
->lpfc_get_iocb_from_iocbq
= lpfc_get_iocb_from_iocbq
;
6464 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
6465 * @phba: Pointer to HBA context object.
6466 * @pring: Pointer to driver SLI ring object.
6467 * @piocb: Pointer to command iocb.
6468 * @flag: Flag indicating if this command can be put into txq.
6470 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
6471 * function. This function gets the hbalock and calls
6472 * __lpfc_sli_issue_iocb function and will return the error returned
6473 * by __lpfc_sli_issue_iocb function. This wrapper is used by
6474 * functions which do not hold hbalock.
6477 lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, uint32_t ring_number
,
6478 struct lpfc_iocbq
*piocb
, uint32_t flag
)
6480 unsigned long iflags
;
6483 spin_lock_irqsave(&phba
->hbalock
, iflags
);
6484 rc
= __lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
6485 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
6491 * lpfc_extra_ring_setup - Extra ring setup function
6492 * @phba: Pointer to HBA context object.
6494 * This function is called while driver attaches with the
6495 * HBA to setup the extra ring. The extra ring is used
6496 * only when driver needs to support target mode functionality
6497 * or IP over FC functionalities.
6499 * This function is called with no lock held.
6502 lpfc_extra_ring_setup( struct lpfc_hba
*phba
)
6504 struct lpfc_sli
*psli
;
6505 struct lpfc_sli_ring
*pring
;
6509 /* Adjust cmd/rsp ring iocb entries more evenly */
6511 /* Take some away from the FCP ring */
6512 pring
= &psli
->ring
[psli
->fcp_ring
];
6513 pring
->numCiocb
-= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
6514 pring
->numRiocb
-= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
6515 pring
->numCiocb
-= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
6516 pring
->numRiocb
-= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
6518 /* and give them to the extra ring */
6519 pring
= &psli
->ring
[psli
->extra_ring
];
6521 pring
->numCiocb
+= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
6522 pring
->numRiocb
+= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
6523 pring
->numCiocb
+= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
6524 pring
->numRiocb
+= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
6526 /* Setup default profile for this ring */
6527 pring
->iotag_max
= 4096;
6528 pring
->num_mask
= 1;
6529 pring
->prt
[0].profile
= 0; /* Mask 0 */
6530 pring
->prt
[0].rctl
= phba
->cfg_multi_ring_rctl
;
6531 pring
->prt
[0].type
= phba
->cfg_multi_ring_type
;
6532 pring
->prt
[0].lpfc_sli_rcv_unsol_event
= NULL
;
6537 * lpfc_sli_async_event_handler - ASYNC iocb handler function
6538 * @phba: Pointer to HBA context object.
6539 * @pring: Pointer to driver SLI ring object.
6540 * @iocbq: Pointer to iocb object.
6542 * This function is called by the slow ring event handler
6543 * function when there is an ASYNC event iocb in the ring.
6544 * This function is called with no lock held.
6545 * Currently this function handles only temperature related
6546 * ASYNC events. The function decodes the temperature sensor
6547 * event message and posts events for the management applications.
6550 lpfc_sli_async_event_handler(struct lpfc_hba
* phba
,
6551 struct lpfc_sli_ring
* pring
, struct lpfc_iocbq
* iocbq
)
6556 struct temp_event temp_event_data
;
6557 struct Scsi_Host
*shost
;
6560 icmd
= &iocbq
->iocb
;
6561 evt_code
= icmd
->un
.asyncstat
.evt_code
;
6562 temp
= icmd
->ulpContext
;
6564 if ((evt_code
!= ASYNC_TEMP_WARN
) &&
6565 (evt_code
!= ASYNC_TEMP_SAFE
)) {
6566 iocb_w
= (uint32_t *) icmd
;
6567 lpfc_printf_log(phba
,
6570 "0346 Ring %d handler: unexpected ASYNC_STATUS"
6572 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
6573 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
6574 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
6575 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
6577 icmd
->un
.asyncstat
.evt_code
,
6578 iocb_w
[0], iocb_w
[1], iocb_w
[2], iocb_w
[3],
6579 iocb_w
[4], iocb_w
[5], iocb_w
[6], iocb_w
[7],
6580 iocb_w
[8], iocb_w
[9], iocb_w
[10], iocb_w
[11],
6581 iocb_w
[12], iocb_w
[13], iocb_w
[14], iocb_w
[15]);
6585 temp_event_data
.data
= (uint32_t)temp
;
6586 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
6587 if (evt_code
== ASYNC_TEMP_WARN
) {
6588 temp_event_data
.event_code
= LPFC_THRESHOLD_TEMP
;
6589 lpfc_printf_log(phba
,
6592 "0347 Adapter is very hot, please take "
6593 "corrective action. temperature : %d Celsius\n",
6596 if (evt_code
== ASYNC_TEMP_SAFE
) {
6597 temp_event_data
.event_code
= LPFC_NORMAL_TEMP
;
6598 lpfc_printf_log(phba
,
6601 "0340 Adapter temperature is OK now. "
6602 "temperature : %d Celsius\n",
6606 /* Send temperature change event to applications */
6607 shost
= lpfc_shost_from_vport(phba
->pport
);
6608 fc_host_post_vendor_event(shost
, fc_get_event_number(),
6609 sizeof(temp_event_data
), (char *) &temp_event_data
,
6616 * lpfc_sli_setup - SLI ring setup function
6617 * @phba: Pointer to HBA context object.
6619 * lpfc_sli_setup sets up rings of the SLI interface with
6620 * number of iocbs per ring and iotags. This function is
6621 * called while driver attach to the HBA and before the
6622 * interrupts are enabled. So there is no need for locking.
6624 * This function always returns 0.
6627 lpfc_sli_setup(struct lpfc_hba
*phba
)
6629 int i
, totiocbsize
= 0;
6630 struct lpfc_sli
*psli
= &phba
->sli
;
6631 struct lpfc_sli_ring
*pring
;
6633 psli
->num_rings
= MAX_CONFIGURED_RINGS
;
6635 psli
->fcp_ring
= LPFC_FCP_RING
;
6636 psli
->next_ring
= LPFC_FCP_NEXT_RING
;
6637 psli
->extra_ring
= LPFC_EXTRA_RING
;
6639 psli
->iocbq_lookup
= NULL
;
6640 psli
->iocbq_lookup_len
= 0;
6641 psli
->last_iotag
= 0;
6643 for (i
= 0; i
< psli
->num_rings
; i
++) {
6644 pring
= &psli
->ring
[i
];
6646 case LPFC_FCP_RING
: /* ring 0 - FCP */
6647 /* numCiocb and numRiocb are used in config_port */
6648 pring
->numCiocb
= SLI2_IOCB_CMD_R0_ENTRIES
;
6649 pring
->numRiocb
= SLI2_IOCB_RSP_R0_ENTRIES
;
6650 pring
->numCiocb
+= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
6651 pring
->numRiocb
+= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
6652 pring
->numCiocb
+= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
6653 pring
->numRiocb
+= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
6654 pring
->sizeCiocb
= (phba
->sli_rev
== 3) ?
6655 SLI3_IOCB_CMD_SIZE
:
6657 pring
->sizeRiocb
= (phba
->sli_rev
== 3) ?
6658 SLI3_IOCB_RSP_SIZE
:
6660 pring
->iotag_ctr
= 0;
6662 (phba
->cfg_hba_queue_depth
* 2);
6663 pring
->fast_iotag
= pring
->iotag_max
;
6664 pring
->num_mask
= 0;
6666 case LPFC_EXTRA_RING
: /* ring 1 - EXTRA */
6667 /* numCiocb and numRiocb are used in config_port */
6668 pring
->numCiocb
= SLI2_IOCB_CMD_R1_ENTRIES
;
6669 pring
->numRiocb
= SLI2_IOCB_RSP_R1_ENTRIES
;
6670 pring
->sizeCiocb
= (phba
->sli_rev
== 3) ?
6671 SLI3_IOCB_CMD_SIZE
:
6673 pring
->sizeRiocb
= (phba
->sli_rev
== 3) ?
6674 SLI3_IOCB_RSP_SIZE
:
6676 pring
->iotag_max
= phba
->cfg_hba_queue_depth
;
6677 pring
->num_mask
= 0;
6679 case LPFC_ELS_RING
: /* ring 2 - ELS / CT */
6680 /* numCiocb and numRiocb are used in config_port */
6681 pring
->numCiocb
= SLI2_IOCB_CMD_R2_ENTRIES
;
6682 pring
->numRiocb
= SLI2_IOCB_RSP_R2_ENTRIES
;
6683 pring
->sizeCiocb
= (phba
->sli_rev
== 3) ?
6684 SLI3_IOCB_CMD_SIZE
:
6686 pring
->sizeRiocb
= (phba
->sli_rev
== 3) ?
6687 SLI3_IOCB_RSP_SIZE
:
6689 pring
->fast_iotag
= 0;
6690 pring
->iotag_ctr
= 0;
6691 pring
->iotag_max
= 4096;
6692 pring
->lpfc_sli_rcv_async_status
=
6693 lpfc_sli_async_event_handler
;
6694 pring
->num_mask
= LPFC_MAX_RING_MASK
;
6695 pring
->prt
[0].profile
= 0; /* Mask 0 */
6696 pring
->prt
[0].rctl
= FC_RCTL_ELS_REQ
;
6697 pring
->prt
[0].type
= FC_TYPE_ELS
;
6698 pring
->prt
[0].lpfc_sli_rcv_unsol_event
=
6699 lpfc_els_unsol_event
;
6700 pring
->prt
[1].profile
= 0; /* Mask 1 */
6701 pring
->prt
[1].rctl
= FC_RCTL_ELS_REP
;
6702 pring
->prt
[1].type
= FC_TYPE_ELS
;
6703 pring
->prt
[1].lpfc_sli_rcv_unsol_event
=
6704 lpfc_els_unsol_event
;
6705 pring
->prt
[2].profile
= 0; /* Mask 2 */
6706 /* NameServer Inquiry */
6707 pring
->prt
[2].rctl
= FC_RCTL_DD_UNSOL_CTL
;
6709 pring
->prt
[2].type
= FC_TYPE_CT
;
6710 pring
->prt
[2].lpfc_sli_rcv_unsol_event
=
6711 lpfc_ct_unsol_event
;
6712 pring
->prt
[3].profile
= 0; /* Mask 3 */
6713 /* NameServer response */
6714 pring
->prt
[3].rctl
= FC_RCTL_DD_SOL_CTL
;
6716 pring
->prt
[3].type
= FC_TYPE_CT
;
6717 pring
->prt
[3].lpfc_sli_rcv_unsol_event
=
6718 lpfc_ct_unsol_event
;
6719 /* abort unsolicited sequence */
6720 pring
->prt
[4].profile
= 0; /* Mask 4 */
6721 pring
->prt
[4].rctl
= FC_RCTL_BA_ABTS
;
6722 pring
->prt
[4].type
= FC_TYPE_BLS
;
6723 pring
->prt
[4].lpfc_sli_rcv_unsol_event
=
6724 lpfc_sli4_ct_abort_unsol_event
;
6727 totiocbsize
+= (pring
->numCiocb
* pring
->sizeCiocb
) +
6728 (pring
->numRiocb
* pring
->sizeRiocb
);
6730 if (totiocbsize
> MAX_SLIM_IOCB_SIZE
) {
6731 /* Too many cmd / rsp ring entries in SLI2 SLIM */
6732 printk(KERN_ERR
"%d:0462 Too many cmd / rsp ring entries in "
6733 "SLI2 SLIM Data: x%x x%lx\n",
6734 phba
->brd_no
, totiocbsize
,
6735 (unsigned long) MAX_SLIM_IOCB_SIZE
);
6737 if (phba
->cfg_multi_ring_support
== 2)
6738 lpfc_extra_ring_setup(phba
);
6744 * lpfc_sli_queue_setup - Queue initialization function
6745 * @phba: Pointer to HBA context object.
6747 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
6748 * ring. This function also initializes ring indices of each ring.
6749 * This function is called during the initialization of the SLI
6750 * interface of an HBA.
6751 * This function is called with no lock held and always returns
6755 lpfc_sli_queue_setup(struct lpfc_hba
*phba
)
6757 struct lpfc_sli
*psli
;
6758 struct lpfc_sli_ring
*pring
;
6762 spin_lock_irq(&phba
->hbalock
);
6763 INIT_LIST_HEAD(&psli
->mboxq
);
6764 INIT_LIST_HEAD(&psli
->mboxq_cmpl
);
6765 /* Initialize list headers for txq and txcmplq as double linked lists */
6766 for (i
= 0; i
< psli
->num_rings
; i
++) {
6767 pring
= &psli
->ring
[i
];
6769 pring
->next_cmdidx
= 0;
6770 pring
->local_getidx
= 0;
6772 INIT_LIST_HEAD(&pring
->txq
);
6773 INIT_LIST_HEAD(&pring
->txcmplq
);
6774 INIT_LIST_HEAD(&pring
->iocb_continueq
);
6775 INIT_LIST_HEAD(&pring
->iocb_continue_saveq
);
6776 INIT_LIST_HEAD(&pring
->postbufq
);
6778 spin_unlock_irq(&phba
->hbalock
);
6783 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
6784 * @phba: Pointer to HBA context object.
6786 * This routine flushes the mailbox command subsystem. It will unconditionally
6787 * flush all the mailbox commands in the three possible stages in the mailbox
6788 * command sub-system: pending mailbox command queue; the outstanding mailbox
6789 * command; and completed mailbox command queue. It is caller's responsibility
6790 * to make sure that the driver is in the proper state to flush the mailbox
6791 * command sub-system. Namely, the posting of mailbox commands into the
6792 * pending mailbox command queue from the various clients must be stopped;
6793 * either the HBA is in a state that it will never works on the outstanding
6794 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
6795 * mailbox command has been completed.
6798 lpfc_sli_mbox_sys_flush(struct lpfc_hba
*phba
)
6800 LIST_HEAD(completions
);
6801 struct lpfc_sli
*psli
= &phba
->sli
;
6803 unsigned long iflag
;
6805 /* Flush all the mailbox commands in the mbox system */
6806 spin_lock_irqsave(&phba
->hbalock
, iflag
);
6807 /* The pending mailbox command queue */
6808 list_splice_init(&phba
->sli
.mboxq
, &completions
);
6809 /* The outstanding active mailbox command */
6810 if (psli
->mbox_active
) {
6811 list_add_tail(&psli
->mbox_active
->list
, &completions
);
6812 psli
->mbox_active
= NULL
;
6813 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
6815 /* The completed mailbox command queue */
6816 list_splice_init(&phba
->sli
.mboxq_cmpl
, &completions
);
6817 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
6819 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
6820 while (!list_empty(&completions
)) {
6821 list_remove_head(&completions
, pmb
, LPFC_MBOXQ_t
, list
);
6822 pmb
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
6824 pmb
->mbox_cmpl(phba
, pmb
);
6829 * lpfc_sli_host_down - Vport cleanup function
6830 * @vport: Pointer to virtual port object.
6832 * lpfc_sli_host_down is called to clean up the resources
6833 * associated with a vport before destroying virtual
6834 * port data structures.
6835 * This function does following operations:
6836 * - Free discovery resources associated with this virtual
6838 * - Free iocbs associated with this virtual port in
6840 * - Send abort for all iocb commands associated with this
6843 * This function is called with no lock held and always returns 1.
6846 lpfc_sli_host_down(struct lpfc_vport
*vport
)
6848 LIST_HEAD(completions
);
6849 struct lpfc_hba
*phba
= vport
->phba
;
6850 struct lpfc_sli
*psli
= &phba
->sli
;
6851 struct lpfc_sli_ring
*pring
;
6852 struct lpfc_iocbq
*iocb
, *next_iocb
;
6854 unsigned long flags
= 0;
6855 uint16_t prev_pring_flag
;
6857 lpfc_cleanup_discovery_resources(vport
);
6859 spin_lock_irqsave(&phba
->hbalock
, flags
);
6860 for (i
= 0; i
< psli
->num_rings
; i
++) {
6861 pring
= &psli
->ring
[i
];
6862 prev_pring_flag
= pring
->flag
;
6863 /* Only slow rings */
6864 if (pring
->ringno
== LPFC_ELS_RING
) {
6865 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
6866 /* Set the lpfc data pending flag */
6867 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
6870 * Error everything on the txq since these iocbs have not been
6871 * given to the FW yet.
6873 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
6874 if (iocb
->vport
!= vport
)
6876 list_move_tail(&iocb
->list
, &completions
);
6880 /* Next issue ABTS for everything on the txcmplq */
6881 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
,
6883 if (iocb
->vport
!= vport
)
6885 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
6888 pring
->flag
= prev_pring_flag
;
6891 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
6893 /* Cancel all the IOCBs from the completions list */
6894 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
6900 * lpfc_sli_hba_down - Resource cleanup function for the HBA
6901 * @phba: Pointer to HBA context object.
6903 * This function cleans up all iocb, buffers, mailbox commands
6904 * while shutting down the HBA. This function is called with no
6905 * lock held and always returns 1.
6906 * This function does the following to cleanup driver resources:
6907 * - Free discovery resources for each virtual port
6908 * - Cleanup any pending fabric iocbs
6909 * - Iterate through the iocb txq and free each entry
6911 * - Free up any buffer posted to the HBA
6912 * - Free mailbox commands in the mailbox queue.
6915 lpfc_sli_hba_down(struct lpfc_hba
*phba
)
6917 LIST_HEAD(completions
);
6918 struct lpfc_sli
*psli
= &phba
->sli
;
6919 struct lpfc_sli_ring
*pring
;
6920 struct lpfc_dmabuf
*buf_ptr
;
6921 unsigned long flags
= 0;
6924 /* Shutdown the mailbox command sub-system */
6925 lpfc_sli_mbox_sys_shutdown(phba
);
6927 lpfc_hba_down_prep(phba
);
6929 lpfc_fabric_abort_hba(phba
);
6931 spin_lock_irqsave(&phba
->hbalock
, flags
);
6932 for (i
= 0; i
< psli
->num_rings
; i
++) {
6933 pring
= &psli
->ring
[i
];
6934 /* Only slow rings */
6935 if (pring
->ringno
== LPFC_ELS_RING
) {
6936 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
6937 /* Set the lpfc data pending flag */
6938 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
6942 * Error everything on the txq since these iocbs have not been
6943 * given to the FW yet.
6945 list_splice_init(&pring
->txq
, &completions
);
6949 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
6951 /* Cancel all the IOCBs from the completions list */
6952 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
6955 spin_lock_irqsave(&phba
->hbalock
, flags
);
6956 list_splice_init(&phba
->elsbuf
, &completions
);
6957 phba
->elsbuf_cnt
= 0;
6958 phba
->elsbuf_prev_cnt
= 0;
6959 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
6961 while (!list_empty(&completions
)) {
6962 list_remove_head(&completions
, buf_ptr
,
6963 struct lpfc_dmabuf
, list
);
6964 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
6968 /* Return any active mbox cmds */
6969 del_timer_sync(&psli
->mbox_tmo
);
6971 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
6972 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
6973 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
6979 * lpfc_sli_pcimem_bcopy - SLI memory copy function
6980 * @srcp: Source memory pointer.
6981 * @destp: Destination memory pointer.
6982 * @cnt: Number of words required to be copied.
6984 * This function is used for copying data between driver memory
6985 * and the SLI memory. This function also changes the endianness
6986 * of each word if native endianness is different from SLI
6987 * endianness. This function can be called with or without
6991 lpfc_sli_pcimem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
6993 uint32_t *src
= srcp
;
6994 uint32_t *dest
= destp
;
6998 for (i
= 0; i
< (int)cnt
; i
+= sizeof (uint32_t)) {
7000 ldata
= le32_to_cpu(ldata
);
7009 * lpfc_sli_bemem_bcopy - SLI memory copy function
7010 * @srcp: Source memory pointer.
7011 * @destp: Destination memory pointer.
7012 * @cnt: Number of words required to be copied.
7014 * This function is used for copying data between a data structure
7015 * with big endian representation to local endianness.
7016 * This function can be called with or without lock.
7019 lpfc_sli_bemem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
7021 uint32_t *src
= srcp
;
7022 uint32_t *dest
= destp
;
7026 for (i
= 0; i
< (int)cnt
; i
+= sizeof(uint32_t)) {
7028 ldata
= be32_to_cpu(ldata
);
7036 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
7037 * @phba: Pointer to HBA context object.
7038 * @pring: Pointer to driver SLI ring object.
7039 * @mp: Pointer to driver buffer object.
7041 * This function is called with no lock held.
7042 * It always return zero after adding the buffer to the postbufq
7046 lpfc_sli_ringpostbuf_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7047 struct lpfc_dmabuf
*mp
)
7049 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
7051 spin_lock_irq(&phba
->hbalock
);
7052 list_add_tail(&mp
->list
, &pring
->postbufq
);
7053 pring
->postbufq_cnt
++;
7054 spin_unlock_irq(&phba
->hbalock
);
7059 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
7060 * @phba: Pointer to HBA context object.
7062 * When HBQ is enabled, buffers are searched based on tags. This function
7063 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
7064 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
7065 * does not conflict with tags of buffer posted for unsolicited events.
7066 * The function returns the allocated tag. The function is called with
7070 lpfc_sli_get_buffer_tag(struct lpfc_hba
*phba
)
7072 spin_lock_irq(&phba
->hbalock
);
7073 phba
->buffer_tag_count
++;
7075 * Always set the QUE_BUFTAG_BIT to distiguish between
7076 * a tag assigned by HBQ.
7078 phba
->buffer_tag_count
|= QUE_BUFTAG_BIT
;
7079 spin_unlock_irq(&phba
->hbalock
);
7080 return phba
->buffer_tag_count
;
7084 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
7085 * @phba: Pointer to HBA context object.
7086 * @pring: Pointer to driver SLI ring object.
7089 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
7090 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
7091 * iocb is posted to the response ring with the tag of the buffer.
7092 * This function searches the pring->postbufq list using the tag
7093 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
7094 * iocb. If the buffer is found then lpfc_dmabuf object of the
7095 * buffer is returned to the caller else NULL is returned.
7096 * This function is called with no lock held.
7098 struct lpfc_dmabuf
*
7099 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7102 struct lpfc_dmabuf
*mp
, *next_mp
;
7103 struct list_head
*slp
= &pring
->postbufq
;
7105 /* Search postbufq, from the begining, looking for a match on tag */
7106 spin_lock_irq(&phba
->hbalock
);
7107 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
7108 if (mp
->buffer_tag
== tag
) {
7109 list_del_init(&mp
->list
);
7110 pring
->postbufq_cnt
--;
7111 spin_unlock_irq(&phba
->hbalock
);
7116 spin_unlock_irq(&phba
->hbalock
);
7117 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7118 "0402 Cannot find virtual addr for buffer tag on "
7119 "ring %d Data x%lx x%p x%p x%x\n",
7120 pring
->ringno
, (unsigned long) tag
,
7121 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
7127 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
7128 * @phba: Pointer to HBA context object.
7129 * @pring: Pointer to driver SLI ring object.
7130 * @phys: DMA address of the buffer.
7132 * This function searches the buffer list using the dma_address
7133 * of unsolicited event to find the driver's lpfc_dmabuf object
7134 * corresponding to the dma_address. The function returns the
7135 * lpfc_dmabuf object if a buffer is found else it returns NULL.
7136 * This function is called by the ct and els unsolicited event
7137 * handlers to get the buffer associated with the unsolicited
7140 * This function is called with no lock held.
7142 struct lpfc_dmabuf
*
7143 lpfc_sli_ringpostbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7146 struct lpfc_dmabuf
*mp
, *next_mp
;
7147 struct list_head
*slp
= &pring
->postbufq
;
7149 /* Search postbufq, from the begining, looking for a match on phys */
7150 spin_lock_irq(&phba
->hbalock
);
7151 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
7152 if (mp
->phys
== phys
) {
7153 list_del_init(&mp
->list
);
7154 pring
->postbufq_cnt
--;
7155 spin_unlock_irq(&phba
->hbalock
);
7160 spin_unlock_irq(&phba
->hbalock
);
7161 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7162 "0410 Cannot find virtual addr for mapped buf on "
7163 "ring %d Data x%llx x%p x%p x%x\n",
7164 pring
->ringno
, (unsigned long long)phys
,
7165 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
7170 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
7171 * @phba: Pointer to HBA context object.
7172 * @cmdiocb: Pointer to driver command iocb object.
7173 * @rspiocb: Pointer to driver response iocb object.
7175 * This function is the completion handler for the abort iocbs for
7176 * ELS commands. This function is called from the ELS ring event
7177 * handler with no lock held. This function frees memory resources
7178 * associated with the abort iocb.
7181 lpfc_sli_abort_els_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
7182 struct lpfc_iocbq
*rspiocb
)
7184 IOCB_t
*irsp
= &rspiocb
->iocb
;
7185 uint16_t abort_iotag
, abort_context
;
7186 struct lpfc_iocbq
*abort_iocb
;
7187 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
7191 if (irsp
->ulpStatus
) {
7192 abort_context
= cmdiocb
->iocb
.un
.acxri
.abortContextTag
;
7193 abort_iotag
= cmdiocb
->iocb
.un
.acxri
.abortIoTag
;
7195 spin_lock_irq(&phba
->hbalock
);
7196 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
7197 if (abort_iotag
!= 0 &&
7198 abort_iotag
<= phba
->sli
.last_iotag
)
7200 phba
->sli
.iocbq_lookup
[abort_iotag
];
7202 /* For sli4 the abort_tag is the XRI,
7203 * so the abort routine puts the iotag of the iocb
7204 * being aborted in the context field of the abort
7207 abort_iocb
= phba
->sli
.iocbq_lookup
[abort_context
];
7210 * If the iocb is not found in Firmware queue the iocb
7211 * might have completed already. Do not free it again.
7213 if (irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
7214 if (irsp
->un
.ulpWord
[4] != IOERR_NO_XRI
) {
7215 spin_unlock_irq(&phba
->hbalock
);
7216 lpfc_sli_release_iocbq(phba
, cmdiocb
);
7219 /* For SLI4 the ulpContext field for abort IOCB
7220 * holds the iotag of the IOCB being aborted so
7221 * the local abort_context needs to be reset to
7222 * match the aborted IOCBs ulpContext.
7224 if (abort_iocb
&& phba
->sli_rev
== LPFC_SLI_REV4
)
7225 abort_context
= abort_iocb
->iocb
.ulpContext
;
7228 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
| LOG_SLI
,
7229 "0327 Cannot abort els iocb %p "
7230 "with tag %x context %x, abort status %x, "
7232 abort_iocb
, abort_iotag
, abort_context
,
7233 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4]);
7235 * make sure we have the right iocbq before taking it
7236 * off the txcmplq and try to call completion routine.
7239 abort_iocb
->iocb
.ulpContext
!= abort_context
||
7240 (abort_iocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) == 0)
7241 spin_unlock_irq(&phba
->hbalock
);
7242 else if (phba
->sli_rev
< LPFC_SLI_REV4
) {
7244 * leave the SLI4 aborted command on the txcmplq
7245 * list and the command complete WCQE's XB bit
7246 * will tell whether the SGL (XRI) can be released
7247 * immediately or to the aborted SGL list for the
7248 * following abort XRI from the HBA.
7250 list_del_init(&abort_iocb
->list
);
7251 if (abort_iocb
->iocb_flag
& LPFC_IO_ON_Q
) {
7252 abort_iocb
->iocb_flag
&= ~LPFC_IO_ON_Q
;
7253 pring
->txcmplq_cnt
--;
7256 /* Firmware could still be in progress of DMAing
7257 * payload, so don't free data buffer till after
7260 abort_iocb
->iocb_flag
|= LPFC_DELAY_MEM_FREE
;
7261 abort_iocb
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
7262 spin_unlock_irq(&phba
->hbalock
);
7264 abort_iocb
->iocb
.ulpStatus
= IOSTAT_LOCAL_REJECT
;
7265 abort_iocb
->iocb
.un
.ulpWord
[4] = IOERR_ABORT_REQUESTED
;
7266 (abort_iocb
->iocb_cmpl
)(phba
, abort_iocb
, abort_iocb
);
7268 spin_unlock_irq(&phba
->hbalock
);
7271 lpfc_sli_release_iocbq(phba
, cmdiocb
);
7276 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
7277 * @phba: Pointer to HBA context object.
7278 * @cmdiocb: Pointer to driver command iocb object.
7279 * @rspiocb: Pointer to driver response iocb object.
7281 * The function is called from SLI ring event handler with no
7282 * lock held. This function is the completion handler for ELS commands
7283 * which are aborted. The function frees memory resources used for
7284 * the aborted ELS commands.
7287 lpfc_ignore_els_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
7288 struct lpfc_iocbq
*rspiocb
)
7290 IOCB_t
*irsp
= &rspiocb
->iocb
;
7292 /* ELS cmd tag <ulpIoTag> completes */
7293 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
7294 "0139 Ignoring ELS cmd tag x%x completion Data: "
7296 irsp
->ulpIoTag
, irsp
->ulpStatus
,
7297 irsp
->un
.ulpWord
[4], irsp
->ulpTimeout
);
7298 if (cmdiocb
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
)
7299 lpfc_ct_free_iocb(phba
, cmdiocb
);
7301 lpfc_els_free_iocb(phba
, cmdiocb
);
7306 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
7307 * @phba: Pointer to HBA context object.
7308 * @pring: Pointer to driver SLI ring object.
7309 * @cmdiocb: Pointer to driver command iocb object.
7311 * This function issues an abort iocb for the provided command iocb down to
7312 * the port. Other than the case the outstanding command iocb is an abort
7313 * request, this function issues abort out unconditionally. This function is
7314 * called with hbalock held. The function returns 0 when it fails due to
7315 * memory allocation failure or when the command iocb is an abort request.
7318 lpfc_sli_abort_iotag_issue(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7319 struct lpfc_iocbq
*cmdiocb
)
7321 struct lpfc_vport
*vport
= cmdiocb
->vport
;
7322 struct lpfc_iocbq
*abtsiocbp
;
7323 IOCB_t
*icmd
= NULL
;
7324 IOCB_t
*iabt
= NULL
;
7328 * There are certain command types we don't want to abort. And we
7329 * don't want to abort commands that are already in the process of
7332 icmd
= &cmdiocb
->iocb
;
7333 if (icmd
->ulpCommand
== CMD_ABORT_XRI_CN
||
7334 icmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
7335 (cmdiocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) != 0)
7338 /* issue ABTS for this IOCB based on iotag */
7339 abtsiocbp
= __lpfc_sli_get_iocbq(phba
);
7340 if (abtsiocbp
== NULL
)
7343 /* This signals the response to set the correct status
7344 * before calling the completion handler
7346 cmdiocb
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
7348 iabt
= &abtsiocbp
->iocb
;
7349 iabt
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
7350 iabt
->un
.acxri
.abortContextTag
= icmd
->ulpContext
;
7351 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
7352 iabt
->un
.acxri
.abortIoTag
= cmdiocb
->sli4_xritag
;
7353 iabt
->un
.acxri
.abortContextTag
= cmdiocb
->iotag
;
7356 iabt
->un
.acxri
.abortIoTag
= icmd
->ulpIoTag
;
7358 iabt
->ulpClass
= icmd
->ulpClass
;
7360 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7361 abtsiocbp
->fcp_wqidx
= cmdiocb
->fcp_wqidx
;
7362 if (cmdiocb
->iocb_flag
& LPFC_IO_FCP
)
7363 abtsiocbp
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
7365 if (phba
->link_state
>= LPFC_LINK_UP
)
7366 iabt
->ulpCommand
= CMD_ABORT_XRI_CN
;
7368 iabt
->ulpCommand
= CMD_CLOSE_XRI_CN
;
7370 abtsiocbp
->iocb_cmpl
= lpfc_sli_abort_els_cmpl
;
7372 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
7373 "0339 Abort xri x%x, original iotag x%x, "
7374 "abort cmd iotag x%x\n",
7375 iabt
->un
.acxri
.abortIoTag
,
7376 iabt
->un
.acxri
.abortContextTag
,
7378 retval
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
, abtsiocbp
, 0);
7381 __lpfc_sli_release_iocbq(phba
, abtsiocbp
);
7384 * Caller to this routine should check for IOCB_ERROR
7385 * and handle it properly. This routine no longer removes
7386 * iocb off txcmplq and call compl in case of IOCB_ERROR.
7392 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
7393 * @phba: Pointer to HBA context object.
7394 * @pring: Pointer to driver SLI ring object.
7395 * @cmdiocb: Pointer to driver command iocb object.
7397 * This function issues an abort iocb for the provided command iocb. In case
7398 * of unloading, the abort iocb will not be issued to commands on the ELS
7399 * ring. Instead, the callback function shall be changed to those commands
7400 * so that nothing happens when them finishes. This function is called with
7401 * hbalock held. The function returns 0 when the command iocb is an abort
7405 lpfc_sli_issue_abort_iotag(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7406 struct lpfc_iocbq
*cmdiocb
)
7408 struct lpfc_vport
*vport
= cmdiocb
->vport
;
7409 int retval
= IOCB_ERROR
;
7410 IOCB_t
*icmd
= NULL
;
7413 * There are certain command types we don't want to abort. And we
7414 * don't want to abort commands that are already in the process of
7417 icmd
= &cmdiocb
->iocb
;
7418 if (icmd
->ulpCommand
== CMD_ABORT_XRI_CN
||
7419 icmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
7420 (cmdiocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) != 0)
7424 * If we're unloading, don't abort iocb on the ELS ring, but change
7425 * the callback so that nothing happens when it finishes.
7427 if ((vport
->load_flag
& FC_UNLOADING
) &&
7428 (pring
->ringno
== LPFC_ELS_RING
)) {
7429 if (cmdiocb
->iocb_flag
& LPFC_IO_FABRIC
)
7430 cmdiocb
->fabric_iocb_cmpl
= lpfc_ignore_els_cmpl
;
7432 cmdiocb
->iocb_cmpl
= lpfc_ignore_els_cmpl
;
7433 goto abort_iotag_exit
;
7436 /* Now, we try to issue the abort to the cmdiocb out */
7437 retval
= lpfc_sli_abort_iotag_issue(phba
, pring
, cmdiocb
);
7441 * Caller to this routine should check for IOCB_ERROR
7442 * and handle it properly. This routine no longer removes
7443 * iocb off txcmplq and call compl in case of IOCB_ERROR.
7449 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
7450 * @phba: Pointer to HBA context object.
7451 * @pring: Pointer to driver SLI ring object.
7453 * This function aborts all iocbs in the given ring and frees all the iocb
7454 * objects in txq. This function issues abort iocbs unconditionally for all
7455 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
7456 * to complete before the return of this function. The caller is not required
7457 * to hold any locks.
7460 lpfc_sli_iocb_ring_abort(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
7462 LIST_HEAD(completions
);
7463 struct lpfc_iocbq
*iocb
, *next_iocb
;
7465 if (pring
->ringno
== LPFC_ELS_RING
)
7466 lpfc_fabric_abort_hba(phba
);
7468 spin_lock_irq(&phba
->hbalock
);
7470 /* Take off all the iocbs on txq for cancelling */
7471 list_splice_init(&pring
->txq
, &completions
);
7474 /* Next issue ABTS for everything on the txcmplq */
7475 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
)
7476 lpfc_sli_abort_iotag_issue(phba
, pring
, iocb
);
7478 spin_unlock_irq(&phba
->hbalock
);
7480 /* Cancel all the IOCBs from the completions list */
7481 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
7486 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
7487 * @phba: pointer to lpfc HBA data structure.
7489 * This routine will abort all pending and outstanding iocbs to an HBA.
7492 lpfc_sli_hba_iocb_abort(struct lpfc_hba
*phba
)
7494 struct lpfc_sli
*psli
= &phba
->sli
;
7495 struct lpfc_sli_ring
*pring
;
7498 for (i
= 0; i
< psli
->num_rings
; i
++) {
7499 pring
= &psli
->ring
[i
];
7500 lpfc_sli_iocb_ring_abort(phba
, pring
);
7505 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
7506 * @iocbq: Pointer to driver iocb object.
7507 * @vport: Pointer to driver virtual port object.
7508 * @tgt_id: SCSI ID of the target.
7509 * @lun_id: LUN ID of the scsi device.
7510 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
7512 * This function acts as an iocb filter for functions which abort or count
7513 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
7514 * 0 if the filtering criteria is met for the given iocb and will return
7515 * 1 if the filtering criteria is not met.
7516 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
7517 * given iocb is for the SCSI device specified by vport, tgt_id and
7519 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
7520 * given iocb is for the SCSI target specified by vport and tgt_id
7522 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
7523 * given iocb is for the SCSI host associated with the given vport.
7524 * This function is called with no locks held.
7527 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq
*iocbq
, struct lpfc_vport
*vport
,
7528 uint16_t tgt_id
, uint64_t lun_id
,
7529 lpfc_ctx_cmd ctx_cmd
)
7531 struct lpfc_scsi_buf
*lpfc_cmd
;
7534 if (!(iocbq
->iocb_flag
& LPFC_IO_FCP
))
7537 if (iocbq
->vport
!= vport
)
7540 lpfc_cmd
= container_of(iocbq
, struct lpfc_scsi_buf
, cur_iocbq
);
7542 if (lpfc_cmd
->pCmd
== NULL
)
7547 if ((lpfc_cmd
->rdata
->pnode
) &&
7548 (lpfc_cmd
->rdata
->pnode
->nlp_sid
== tgt_id
) &&
7549 (scsilun_to_int(&lpfc_cmd
->fcp_cmnd
->fcp_lun
) == lun_id
))
7553 if ((lpfc_cmd
->rdata
->pnode
) &&
7554 (lpfc_cmd
->rdata
->pnode
->nlp_sid
== tgt_id
))
7561 printk(KERN_ERR
"%s: Unknown context cmd type, value %d\n",
7570 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
7571 * @vport: Pointer to virtual port.
7572 * @tgt_id: SCSI ID of the target.
7573 * @lun_id: LUN ID of the scsi device.
7574 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
7576 * This function returns number of FCP commands pending for the vport.
7577 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
7578 * commands pending on the vport associated with SCSI device specified
7579 * by tgt_id and lun_id parameters.
7580 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
7581 * commands pending on the vport associated with SCSI target specified
7582 * by tgt_id parameter.
7583 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
7584 * commands pending on the vport.
7585 * This function returns the number of iocbs which satisfy the filter.
7586 * This function is called without any lock held.
7589 lpfc_sli_sum_iocb(struct lpfc_vport
*vport
, uint16_t tgt_id
, uint64_t lun_id
,
7590 lpfc_ctx_cmd ctx_cmd
)
7592 struct lpfc_hba
*phba
= vport
->phba
;
7593 struct lpfc_iocbq
*iocbq
;
7596 for (i
= 1, sum
= 0; i
<= phba
->sli
.last_iotag
; i
++) {
7597 iocbq
= phba
->sli
.iocbq_lookup
[i
];
7599 if (lpfc_sli_validate_fcp_iocb (iocbq
, vport
, tgt_id
, lun_id
,
7608 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
7609 * @phba: Pointer to HBA context object
7610 * @cmdiocb: Pointer to command iocb object.
7611 * @rspiocb: Pointer to response iocb object.
7613 * This function is called when an aborted FCP iocb completes. This
7614 * function is called by the ring event handler with no lock held.
7615 * This function frees the iocb.
7618 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
7619 struct lpfc_iocbq
*rspiocb
)
7621 lpfc_sli_release_iocbq(phba
, cmdiocb
);
7626 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
7627 * @vport: Pointer to virtual port.
7628 * @pring: Pointer to driver SLI ring object.
7629 * @tgt_id: SCSI ID of the target.
7630 * @lun_id: LUN ID of the scsi device.
7631 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
7633 * This function sends an abort command for every SCSI command
7634 * associated with the given virtual port pending on the ring
7635 * filtered by lpfc_sli_validate_fcp_iocb function.
7636 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
7637 * FCP iocbs associated with lun specified by tgt_id and lun_id
7639 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
7640 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
7641 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
7642 * FCP iocbs associated with virtual port.
7643 * This function returns number of iocbs it failed to abort.
7644 * This function is called with no locks held.
7647 lpfc_sli_abort_iocb(struct lpfc_vport
*vport
, struct lpfc_sli_ring
*pring
,
7648 uint16_t tgt_id
, uint64_t lun_id
, lpfc_ctx_cmd abort_cmd
)
7650 struct lpfc_hba
*phba
= vport
->phba
;
7651 struct lpfc_iocbq
*iocbq
;
7652 struct lpfc_iocbq
*abtsiocb
;
7654 int errcnt
= 0, ret_val
= 0;
7657 for (i
= 1; i
<= phba
->sli
.last_iotag
; i
++) {
7658 iocbq
= phba
->sli
.iocbq_lookup
[i
];
7660 if (lpfc_sli_validate_fcp_iocb(iocbq
, vport
, tgt_id
, lun_id
,
7664 /* issue ABTS for this IOCB based on iotag */
7665 abtsiocb
= lpfc_sli_get_iocbq(phba
);
7666 if (abtsiocb
== NULL
) {
7672 abtsiocb
->iocb
.un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
7673 abtsiocb
->iocb
.un
.acxri
.abortContextTag
= cmd
->ulpContext
;
7674 if (phba
->sli_rev
== LPFC_SLI_REV4
)
7675 abtsiocb
->iocb
.un
.acxri
.abortIoTag
= iocbq
->sli4_xritag
;
7677 abtsiocb
->iocb
.un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
7678 abtsiocb
->iocb
.ulpLe
= 1;
7679 abtsiocb
->iocb
.ulpClass
= cmd
->ulpClass
;
7680 abtsiocb
->vport
= phba
->pport
;
7682 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7683 abtsiocb
->fcp_wqidx
= iocbq
->fcp_wqidx
;
7684 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
7685 abtsiocb
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
7687 if (lpfc_is_link_up(phba
))
7688 abtsiocb
->iocb
.ulpCommand
= CMD_ABORT_XRI_CN
;
7690 abtsiocb
->iocb
.ulpCommand
= CMD_CLOSE_XRI_CN
;
7692 /* Setup callback routine and issue the command. */
7693 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
7694 ret_val
= lpfc_sli_issue_iocb(phba
, pring
->ringno
,
7696 if (ret_val
== IOCB_ERROR
) {
7697 lpfc_sli_release_iocbq(phba
, abtsiocb
);
7707 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
7708 * @phba: Pointer to HBA context object.
7709 * @cmdiocbq: Pointer to command iocb.
7710 * @rspiocbq: Pointer to response iocb.
7712 * This function is the completion handler for iocbs issued using
7713 * lpfc_sli_issue_iocb_wait function. This function is called by the
7714 * ring event handler function without any lock held. This function
7715 * can be called from both worker thread context and interrupt
7716 * context. This function also can be called from other thread which
7717 * cleans up the SLI layer objects.
7718 * This function copy the contents of the response iocb to the
7719 * response iocb memory object provided by the caller of
7720 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
7721 * sleeps for the iocb completion.
7724 lpfc_sli_wake_iocb_wait(struct lpfc_hba
*phba
,
7725 struct lpfc_iocbq
*cmdiocbq
,
7726 struct lpfc_iocbq
*rspiocbq
)
7728 wait_queue_head_t
*pdone_q
;
7729 unsigned long iflags
;
7730 struct lpfc_scsi_buf
*lpfc_cmd
;
7732 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7733 cmdiocbq
->iocb_flag
|= LPFC_IO_WAKE
;
7734 if (cmdiocbq
->context2
&& rspiocbq
)
7735 memcpy(&((struct lpfc_iocbq
*)cmdiocbq
->context2
)->iocb
,
7736 &rspiocbq
->iocb
, sizeof(IOCB_t
));
7738 /* Set the exchange busy flag for task management commands */
7739 if ((cmdiocbq
->iocb_flag
& LPFC_IO_FCP
) &&
7740 !(cmdiocbq
->iocb_flag
& LPFC_IO_LIBDFC
)) {
7741 lpfc_cmd
= container_of(cmdiocbq
, struct lpfc_scsi_buf
,
7743 lpfc_cmd
->exch_busy
= rspiocbq
->iocb_flag
& LPFC_EXCHANGE_BUSY
;
7746 pdone_q
= cmdiocbq
->context_un
.wait_queue
;
7749 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7754 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
7755 * @phba: Pointer to HBA context object..
7756 * @piocbq: Pointer to command iocb.
7757 * @flag: Flag to test.
7759 * This routine grabs the hbalock and then test the iocb_flag to
7760 * see if the passed in flag is set.
7763 * 0 if flag is not set.
7766 lpfc_chk_iocb_flg(struct lpfc_hba
*phba
,
7767 struct lpfc_iocbq
*piocbq
, uint32_t flag
)
7769 unsigned long iflags
;
7772 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7773 ret
= piocbq
->iocb_flag
& flag
;
7774 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7780 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
7781 * @phba: Pointer to HBA context object..
7782 * @pring: Pointer to sli ring.
7783 * @piocb: Pointer to command iocb.
7784 * @prspiocbq: Pointer to response iocb.
7785 * @timeout: Timeout in number of seconds.
7787 * This function issues the iocb to firmware and waits for the
7788 * iocb to complete. If the iocb command is not
7789 * completed within timeout seconds, it returns IOCB_TIMEDOUT.
7790 * Caller should not free the iocb resources if this function
7791 * returns IOCB_TIMEDOUT.
7792 * The function waits for the iocb completion using an
7793 * non-interruptible wait.
7794 * This function will sleep while waiting for iocb completion.
7795 * So, this function should not be called from any context which
7796 * does not allow sleeping. Due to the same reason, this function
7797 * cannot be called with interrupt disabled.
7798 * This function assumes that the iocb completions occur while
7799 * this function sleep. So, this function cannot be called from
7800 * the thread which process iocb completion for this ring.
7801 * This function clears the iocb_flag of the iocb object before
7802 * issuing the iocb and the iocb completion handler sets this
7803 * flag and wakes this thread when the iocb completes.
7804 * The contents of the response iocb will be copied to prspiocbq
7805 * by the completion handler when the command completes.
7806 * This function returns IOCB_SUCCESS when success.
7807 * This function is called with no lock held.
7810 lpfc_sli_issue_iocb_wait(struct lpfc_hba
*phba
,
7811 uint32_t ring_number
,
7812 struct lpfc_iocbq
*piocb
,
7813 struct lpfc_iocbq
*prspiocbq
,
7816 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q
);
7817 long timeleft
, timeout_req
= 0;
7818 int retval
= IOCB_SUCCESS
;
7820 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
7822 * If the caller has provided a response iocbq buffer, then context2
7823 * is NULL or its an error.
7826 if (piocb
->context2
)
7828 piocb
->context2
= prspiocbq
;
7831 piocb
->iocb_cmpl
= lpfc_sli_wake_iocb_wait
;
7832 piocb
->context_un
.wait_queue
= &done_q
;
7833 piocb
->iocb_flag
&= ~LPFC_IO_WAKE
;
7835 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
7836 creg_val
= readl(phba
->HCregaddr
);
7837 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
7838 writel(creg_val
, phba
->HCregaddr
);
7839 readl(phba
->HCregaddr
); /* flush */
7842 retval
= lpfc_sli_issue_iocb(phba
, ring_number
, piocb
,
7844 if (retval
== IOCB_SUCCESS
) {
7845 timeout_req
= timeout
* HZ
;
7846 timeleft
= wait_event_timeout(done_q
,
7847 lpfc_chk_iocb_flg(phba
, piocb
, LPFC_IO_WAKE
),
7850 if (piocb
->iocb_flag
& LPFC_IO_WAKE
) {
7851 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
7852 "0331 IOCB wake signaled\n");
7853 } else if (timeleft
== 0) {
7854 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
7855 "0338 IOCB wait timeout error - no "
7856 "wake response Data x%x\n", timeout
);
7857 retval
= IOCB_TIMEDOUT
;
7859 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
7860 "0330 IOCB wake NOT set, "
7862 timeout
, (timeleft
/ jiffies
));
7863 retval
= IOCB_TIMEDOUT
;
7865 } else if (retval
== IOCB_BUSY
) {
7866 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
7867 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
7868 phba
->iocb_cnt
, pring
->txq_cnt
, pring
->txcmplq_cnt
);
7871 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
7872 "0332 IOCB wait issue failed, Data x%x\n",
7874 retval
= IOCB_ERROR
;
7877 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
7878 creg_val
= readl(phba
->HCregaddr
);
7879 creg_val
&= ~(HC_R0INT_ENA
<< LPFC_FCP_RING
);
7880 writel(creg_val
, phba
->HCregaddr
);
7881 readl(phba
->HCregaddr
); /* flush */
7885 piocb
->context2
= NULL
;
7887 piocb
->context_un
.wait_queue
= NULL
;
7888 piocb
->iocb_cmpl
= NULL
;
7893 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
7894 * @phba: Pointer to HBA context object.
7895 * @pmboxq: Pointer to driver mailbox object.
7896 * @timeout: Timeout in number of seconds.
7898 * This function issues the mailbox to firmware and waits for the
7899 * mailbox command to complete. If the mailbox command is not
7900 * completed within timeout seconds, it returns MBX_TIMEOUT.
7901 * The function waits for the mailbox completion using an
7902 * interruptible wait. If the thread is woken up due to a
7903 * signal, MBX_TIMEOUT error is returned to the caller. Caller
7904 * should not free the mailbox resources, if this function returns
7906 * This function will sleep while waiting for mailbox completion.
7907 * So, this function should not be called from any context which
7908 * does not allow sleeping. Due to the same reason, this function
7909 * cannot be called with interrupt disabled.
7910 * This function assumes that the mailbox completion occurs while
7911 * this function sleep. So, this function cannot be called from
7912 * the worker thread which processes mailbox completion.
7913 * This function is called in the context of HBA management
7915 * This function returns MBX_SUCCESS when successful.
7916 * This function is called with no lock held.
7919 lpfc_sli_issue_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
,
7922 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q
);
7926 /* The caller must leave context1 empty. */
7927 if (pmboxq
->context1
)
7928 return MBX_NOT_FINISHED
;
7930 pmboxq
->mbox_flag
&= ~LPFC_MBX_WAKE
;
7931 /* setup wake call as IOCB callback */
7932 pmboxq
->mbox_cmpl
= lpfc_sli_wake_mbox_wait
;
7933 /* setup context field to pass wait_queue pointer to wake function */
7934 pmboxq
->context1
= &done_q
;
7936 /* now issue the command */
7937 retval
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
7939 if (retval
== MBX_BUSY
|| retval
== MBX_SUCCESS
) {
7940 wait_event_interruptible_timeout(done_q
,
7941 pmboxq
->mbox_flag
& LPFC_MBX_WAKE
,
7944 spin_lock_irqsave(&phba
->hbalock
, flag
);
7945 pmboxq
->context1
= NULL
;
7947 * if LPFC_MBX_WAKE flag is set the mailbox is completed
7948 * else do not free the resources.
7950 if (pmboxq
->mbox_flag
& LPFC_MBX_WAKE
) {
7951 retval
= MBX_SUCCESS
;
7952 lpfc_sli4_swap_str(phba
, pmboxq
);
7954 retval
= MBX_TIMEOUT
;
7955 pmboxq
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
7957 spin_unlock_irqrestore(&phba
->hbalock
, flag
);
7964 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
7965 * @phba: Pointer to HBA context.
7967 * This function is called to shutdown the driver's mailbox sub-system.
7968 * It first marks the mailbox sub-system is in a block state to prevent
7969 * the asynchronous mailbox command from issued off the pending mailbox
7970 * command queue. If the mailbox command sub-system shutdown is due to
7971 * HBA error conditions such as EEH or ERATT, this routine shall invoke
7972 * the mailbox sub-system flush routine to forcefully bring down the
7973 * mailbox sub-system. Otherwise, if it is due to normal condition (such
7974 * as with offline or HBA function reset), this routine will wait for the
7975 * outstanding mailbox command to complete before invoking the mailbox
7976 * sub-system flush routine to gracefully bring down mailbox sub-system.
7979 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba
*phba
)
7981 struct lpfc_sli
*psli
= &phba
->sli
;
7982 uint8_t actcmd
= MBX_HEARTBEAT
;
7983 unsigned long timeout
;
7985 spin_lock_irq(&phba
->hbalock
);
7986 psli
->sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
7987 spin_unlock_irq(&phba
->hbalock
);
7989 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
7990 spin_lock_irq(&phba
->hbalock
);
7991 if (phba
->sli
.mbox_active
)
7992 actcmd
= phba
->sli
.mbox_active
->u
.mb
.mbxCommand
;
7993 spin_unlock_irq(&phba
->hbalock
);
7994 /* Determine how long we might wait for the active mailbox
7995 * command to be gracefully completed by firmware.
7997 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, actcmd
) *
7999 while (phba
->sli
.mbox_active
) {
8000 /* Check active mailbox complete status every 2ms */
8002 if (time_after(jiffies
, timeout
))
8003 /* Timeout, let the mailbox flush routine to
8004 * forcefully release active mailbox command
8009 lpfc_sli_mbox_sys_flush(phba
);
8013 * lpfc_sli_eratt_read - read sli-3 error attention events
8014 * @phba: Pointer to HBA context.
8016 * This function is called to read the SLI3 device error attention registers
8017 * for possible error attention events. The caller must hold the hostlock
8018 * with spin_lock_irq().
8020 * This fucntion returns 1 when there is Error Attention in the Host Attention
8021 * Register and returns 0 otherwise.
8024 lpfc_sli_eratt_read(struct lpfc_hba
*phba
)
8028 /* Read chip Host Attention (HA) register */
8029 ha_copy
= readl(phba
->HAregaddr
);
8030 if (ha_copy
& HA_ERATT
) {
8031 /* Read host status register to retrieve error event */
8032 lpfc_sli_read_hs(phba
);
8034 /* Check if there is a deferred error condition is active */
8035 if ((HS_FFER1
& phba
->work_hs
) &&
8036 ((HS_FFER2
| HS_FFER3
| HS_FFER4
| HS_FFER5
|
8037 HS_FFER6
| HS_FFER7
| HS_FFER8
) & phba
->work_hs
)) {
8038 phba
->hba_flag
|= DEFER_ERATT
;
8039 /* Clear all interrupt enable conditions */
8040 writel(0, phba
->HCregaddr
);
8041 readl(phba
->HCregaddr
);
8044 /* Set the driver HA work bitmap */
8045 phba
->work_ha
|= HA_ERATT
;
8046 /* Indicate polling handles this ERATT */
8047 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
8054 * lpfc_sli4_eratt_read - read sli-4 error attention events
8055 * @phba: Pointer to HBA context.
8057 * This function is called to read the SLI4 device error attention registers
8058 * for possible error attention events. The caller must hold the hostlock
8059 * with spin_lock_irq().
8061 * This fucntion returns 1 when there is Error Attention in the Host Attention
8062 * Register and returns 0 otherwise.
8065 lpfc_sli4_eratt_read(struct lpfc_hba
*phba
)
8067 uint32_t uerr_sta_hi
, uerr_sta_lo
;
8069 /* For now, use the SLI4 device internal unrecoverable error
8070 * registers for error attention. This can be changed later.
8072 uerr_sta_lo
= readl(phba
->sli4_hba
.UERRLOregaddr
);
8073 uerr_sta_hi
= readl(phba
->sli4_hba
.UERRHIregaddr
);
8074 if ((~phba
->sli4_hba
.ue_mask_lo
& uerr_sta_lo
) ||
8075 (~phba
->sli4_hba
.ue_mask_hi
& uerr_sta_hi
)) {
8076 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8077 "1423 HBA Unrecoverable error: "
8078 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
8079 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
8080 uerr_sta_lo
, uerr_sta_hi
,
8081 phba
->sli4_hba
.ue_mask_lo
,
8082 phba
->sli4_hba
.ue_mask_hi
);
8083 phba
->work_status
[0] = uerr_sta_lo
;
8084 phba
->work_status
[1] = uerr_sta_hi
;
8085 /* Set the driver HA work bitmap */
8086 phba
->work_ha
|= HA_ERATT
;
8087 /* Indicate polling handles this ERATT */
8088 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
8095 * lpfc_sli_check_eratt - check error attention events
8096 * @phba: Pointer to HBA context.
8098 * This function is called from timer soft interrupt context to check HBA's
8099 * error attention register bit for error attention events.
8101 * This fucntion returns 1 when there is Error Attention in the Host Attention
8102 * Register and returns 0 otherwise.
8105 lpfc_sli_check_eratt(struct lpfc_hba
*phba
)
8109 /* If somebody is waiting to handle an eratt, don't process it
8110 * here. The brdkill function will do this.
8112 if (phba
->link_flag
& LS_IGNORE_ERATT
)
8115 /* Check if interrupt handler handles this ERATT */
8116 spin_lock_irq(&phba
->hbalock
);
8117 if (phba
->hba_flag
& HBA_ERATT_HANDLED
) {
8118 /* Interrupt handler has handled ERATT */
8119 spin_unlock_irq(&phba
->hbalock
);
8124 * If there is deferred error attention, do not check for error
8127 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
8128 spin_unlock_irq(&phba
->hbalock
);
8132 /* If PCI channel is offline, don't process it */
8133 if (unlikely(pci_channel_offline(phba
->pcidev
))) {
8134 spin_unlock_irq(&phba
->hbalock
);
8138 switch (phba
->sli_rev
) {
8141 /* Read chip Host Attention (HA) register */
8142 ha_copy
= lpfc_sli_eratt_read(phba
);
8145 /* Read devcie Uncoverable Error (UERR) registers */
8146 ha_copy
= lpfc_sli4_eratt_read(phba
);
8149 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8150 "0299 Invalid SLI revision (%d)\n",
8155 spin_unlock_irq(&phba
->hbalock
);
8161 * lpfc_intr_state_check - Check device state for interrupt handling
8162 * @phba: Pointer to HBA context.
8164 * This inline routine checks whether a device or its PCI slot is in a state
8165 * that the interrupt should be handled.
8167 * This function returns 0 if the device or the PCI slot is in a state that
8168 * interrupt should be handled, otherwise -EIO.
8171 lpfc_intr_state_check(struct lpfc_hba
*phba
)
8173 /* If the pci channel is offline, ignore all the interrupts */
8174 if (unlikely(pci_channel_offline(phba
->pcidev
)))
8177 /* Update device level interrupt statistics */
8178 phba
->sli
.slistat
.sli_intr
++;
8180 /* Ignore all interrupts during initialization. */
8181 if (unlikely(phba
->link_state
< LPFC_LINK_DOWN
))
8188 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
8189 * @irq: Interrupt number.
8190 * @dev_id: The device context pointer.
8192 * This function is directly called from the PCI layer as an interrupt
8193 * service routine when device with SLI-3 interface spec is enabled with
8194 * MSI-X multi-message interrupt mode and there are slow-path events in
8195 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
8196 * interrupt mode, this function is called as part of the device-level
8197 * interrupt handler. When the PCI slot is in error recovery or the HBA
8198 * is undergoing initialization, the interrupt handler will not process
8199 * the interrupt. The link attention and ELS ring attention events are
8200 * handled by the worker thread. The interrupt handler signals the worker
8201 * thread and returns for these events. This function is called without
8202 * any lock held. It gets the hbalock to access and update SLI data
8205 * This function returns IRQ_HANDLED when interrupt is handled else it
8209 lpfc_sli_sp_intr_handler(int irq
, void *dev_id
)
8211 struct lpfc_hba
*phba
;
8212 uint32_t ha_copy
, hc_copy
;
8213 uint32_t work_ha_copy
;
8214 unsigned long status
;
8215 unsigned long iflag
;
8218 MAILBOX_t
*mbox
, *pmbox
;
8219 struct lpfc_vport
*vport
;
8220 struct lpfc_nodelist
*ndlp
;
8221 struct lpfc_dmabuf
*mp
;
8226 * Get the driver's phba structure from the dev_id and
8227 * assume the HBA is not interrupting.
8229 phba
= (struct lpfc_hba
*)dev_id
;
8231 if (unlikely(!phba
))
8235 * Stuff needs to be attented to when this function is invoked as an
8236 * individual interrupt handler in MSI-X multi-message interrupt mode
8238 if (phba
->intr_type
== MSIX
) {
8239 /* Check device state for handling interrupt */
8240 if (lpfc_intr_state_check(phba
))
8242 /* Need to read HA REG for slow-path events */
8243 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8244 ha_copy
= readl(phba
->HAregaddr
);
8245 /* If somebody is waiting to handle an eratt don't process it
8246 * here. The brdkill function will do this.
8248 if (phba
->link_flag
& LS_IGNORE_ERATT
)
8249 ha_copy
&= ~HA_ERATT
;
8250 /* Check the need for handling ERATT in interrupt handler */
8251 if (ha_copy
& HA_ERATT
) {
8252 if (phba
->hba_flag
& HBA_ERATT_HANDLED
)
8253 /* ERATT polling has handled ERATT */
8254 ha_copy
&= ~HA_ERATT
;
8256 /* Indicate interrupt handler handles ERATT */
8257 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
8261 * If there is deferred error attention, do not check for any
8264 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
8265 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8269 /* Clear up only attention source related to slow-path */
8270 hc_copy
= readl(phba
->HCregaddr
);
8271 writel(hc_copy
& ~(HC_MBINT_ENA
| HC_R2INT_ENA
|
8272 HC_LAINT_ENA
| HC_ERINT_ENA
),
8274 writel((ha_copy
& (HA_MBATT
| HA_R2_CLR_MSK
)),
8276 writel(hc_copy
, phba
->HCregaddr
);
8277 readl(phba
->HAregaddr
); /* flush */
8278 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8280 ha_copy
= phba
->ha_copy
;
8282 work_ha_copy
= ha_copy
& phba
->work_ha_mask
;
8285 if (work_ha_copy
& HA_LATT
) {
8286 if (phba
->sli
.sli_flag
& LPFC_PROCESS_LA
) {
8288 * Turn off Link Attention interrupts
8289 * until CLEAR_LA done
8291 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8292 phba
->sli
.sli_flag
&= ~LPFC_PROCESS_LA
;
8293 control
= readl(phba
->HCregaddr
);
8294 control
&= ~HC_LAINT_ENA
;
8295 writel(control
, phba
->HCregaddr
);
8296 readl(phba
->HCregaddr
); /* flush */
8297 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8300 work_ha_copy
&= ~HA_LATT
;
8303 if (work_ha_copy
& ~(HA_ERATT
| HA_MBATT
| HA_LATT
)) {
8305 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
8306 * the only slow ring.
8308 status
= (work_ha_copy
&
8309 (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
8310 status
>>= (4*LPFC_ELS_RING
);
8311 if (status
& HA_RXMASK
) {
8312 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8313 control
= readl(phba
->HCregaddr
);
8315 lpfc_debugfs_slow_ring_trc(phba
,
8316 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
8318 (uint32_t)phba
->sli
.slistat
.sli_intr
);
8320 if (control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
)) {
8321 lpfc_debugfs_slow_ring_trc(phba
,
8323 "pwork:x%x hawork:x%x wait:x%x",
8324 phba
->work_ha
, work_ha_copy
,
8325 (uint32_t)((unsigned long)
8326 &phba
->work_waitq
));
8329 ~(HC_R0INT_ENA
<< LPFC_ELS_RING
);
8330 writel(control
, phba
->HCregaddr
);
8331 readl(phba
->HCregaddr
); /* flush */
8334 lpfc_debugfs_slow_ring_trc(phba
,
8335 "ISR slow ring: pwork:"
8336 "x%x hawork:x%x wait:x%x",
8337 phba
->work_ha
, work_ha_copy
,
8338 (uint32_t)((unsigned long)
8339 &phba
->work_waitq
));
8341 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8344 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8345 if (work_ha_copy
& HA_ERATT
) {
8346 lpfc_sli_read_hs(phba
);
8348 * Check if there is a deferred error condition
8351 if ((HS_FFER1
& phba
->work_hs
) &&
8352 ((HS_FFER2
| HS_FFER3
| HS_FFER4
| HS_FFER5
|
8353 HS_FFER6
| HS_FFER7
| HS_FFER8
) &
8355 phba
->hba_flag
|= DEFER_ERATT
;
8356 /* Clear all interrupt enable conditions */
8357 writel(0, phba
->HCregaddr
);
8358 readl(phba
->HCregaddr
);
8362 if ((work_ha_copy
& HA_MBATT
) && (phba
->sli
.mbox_active
)) {
8363 pmb
= phba
->sli
.mbox_active
;
8368 /* First check out the status word */
8369 lpfc_sli_pcimem_bcopy(mbox
, pmbox
, sizeof(uint32_t));
8370 if (pmbox
->mbxOwner
!= OWN_HOST
) {
8371 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8373 * Stray Mailbox Interrupt, mbxCommand <cmd>
8374 * mbxStatus <status>
8376 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
8378 "(%d):0304 Stray Mailbox "
8379 "Interrupt mbxCommand x%x "
8381 (vport
? vport
->vpi
: 0),
8384 /* clear mailbox attention bit */
8385 work_ha_copy
&= ~HA_MBATT
;
8387 phba
->sli
.mbox_active
= NULL
;
8388 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8389 phba
->last_completion_time
= jiffies
;
8390 del_timer(&phba
->sli
.mbox_tmo
);
8391 if (pmb
->mbox_cmpl
) {
8392 lpfc_sli_pcimem_bcopy(mbox
, pmbox
,
8394 if (pmb
->out_ext_byte_len
&&
8396 lpfc_sli_pcimem_bcopy(
8399 pmb
->out_ext_byte_len
);
8401 if (pmb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) {
8402 pmb
->mbox_flag
&= ~LPFC_MBX_IMED_UNREG
;
8404 lpfc_debugfs_disc_trc(vport
,
8405 LPFC_DISC_TRC_MBOX_VPORT
,
8407 "status:x%x rpi:x%x",
8408 (uint32_t)pmbox
->mbxStatus
,
8409 pmbox
->un
.varWords
[0], 0);
8411 if (!pmbox
->mbxStatus
) {
8412 mp
= (struct lpfc_dmabuf
*)
8414 ndlp
= (struct lpfc_nodelist
*)
8417 /* Reg_LOGIN of dflt RPI was
8418 * successful. new lets get
8419 * rid of the RPI using the
8422 lpfc_unreg_login(phba
,
8424 pmbox
->un
.varWords
[0],
8427 lpfc_mbx_cmpl_dflt_rpi
;
8429 pmb
->context2
= ndlp
;
8431 rc
= lpfc_sli_issue_mbox(phba
,
8435 lpfc_printf_log(phba
,
8438 "0350 rc should have"
8440 if (rc
!= MBX_NOT_FINISHED
)
8441 goto send_current_mbox
;
8445 &phba
->pport
->work_port_lock
,
8447 phba
->pport
->work_port_events
&=
8449 spin_unlock_irqrestore(
8450 &phba
->pport
->work_port_lock
,
8452 lpfc_mbox_cmpl_put(phba
, pmb
);
8455 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8457 if ((work_ha_copy
& HA_MBATT
) &&
8458 (phba
->sli
.mbox_active
== NULL
)) {
8460 /* Process next mailbox command if there is one */
8462 rc
= lpfc_sli_issue_mbox(phba
, NULL
,
8464 } while (rc
== MBX_NOT_FINISHED
);
8465 if (rc
!= MBX_SUCCESS
)
8466 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
8467 LOG_SLI
, "0349 rc should be "
8471 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8472 phba
->work_ha
|= work_ha_copy
;
8473 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8474 lpfc_worker_wake_up(phba
);
8478 } /* lpfc_sli_sp_intr_handler */
8481 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
8482 * @irq: Interrupt number.
8483 * @dev_id: The device context pointer.
8485 * This function is directly called from the PCI layer as an interrupt
8486 * service routine when device with SLI-3 interface spec is enabled with
8487 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
8488 * ring event in the HBA. However, when the device is enabled with either
8489 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
8490 * device-level interrupt handler. When the PCI slot is in error recovery
8491 * or the HBA is undergoing initialization, the interrupt handler will not
8492 * process the interrupt. The SCSI FCP fast-path ring event are handled in
8493 * the intrrupt context. This function is called without any lock held.
8494 * It gets the hbalock to access and update SLI data structures.
8496 * This function returns IRQ_HANDLED when interrupt is handled else it
8500 lpfc_sli_fp_intr_handler(int irq
, void *dev_id
)
8502 struct lpfc_hba
*phba
;
8504 unsigned long status
;
8505 unsigned long iflag
;
8507 /* Get the driver's phba structure from the dev_id and
8508 * assume the HBA is not interrupting.
8510 phba
= (struct lpfc_hba
*) dev_id
;
8512 if (unlikely(!phba
))
8516 * Stuff needs to be attented to when this function is invoked as an
8517 * individual interrupt handler in MSI-X multi-message interrupt mode
8519 if (phba
->intr_type
== MSIX
) {
8520 /* Check device state for handling interrupt */
8521 if (lpfc_intr_state_check(phba
))
8523 /* Need to read HA REG for FCP ring and other ring events */
8524 ha_copy
= readl(phba
->HAregaddr
);
8525 /* Clear up only attention source related to fast-path */
8526 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8528 * If there is deferred error attention, do not check for
8531 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
8532 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8535 writel((ha_copy
& (HA_R0_CLR_MSK
| HA_R1_CLR_MSK
)),
8537 readl(phba
->HAregaddr
); /* flush */
8538 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8540 ha_copy
= phba
->ha_copy
;
8543 * Process all events on FCP ring. Take the optimized path for FCP IO.
8545 ha_copy
&= ~(phba
->work_ha_mask
);
8547 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
8548 status
>>= (4*LPFC_FCP_RING
);
8549 if (status
& HA_RXMASK
)
8550 lpfc_sli_handle_fast_ring_event(phba
,
8551 &phba
->sli
.ring
[LPFC_FCP_RING
],
8554 if (phba
->cfg_multi_ring_support
== 2) {
8556 * Process all events on extra ring. Take the optimized path
8557 * for extra ring IO.
8559 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_EXTRA_RING
)));
8560 status
>>= (4*LPFC_EXTRA_RING
);
8561 if (status
& HA_RXMASK
) {
8562 lpfc_sli_handle_fast_ring_event(phba
,
8563 &phba
->sli
.ring
[LPFC_EXTRA_RING
],
8568 } /* lpfc_sli_fp_intr_handler */
8571 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
8572 * @irq: Interrupt number.
8573 * @dev_id: The device context pointer.
8575 * This function is the HBA device-level interrupt handler to device with
8576 * SLI-3 interface spec, called from the PCI layer when either MSI or
8577 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
8578 * requires driver attention. This function invokes the slow-path interrupt
8579 * attention handling function and fast-path interrupt attention handling
8580 * function in turn to process the relevant HBA attention events. This
8581 * function is called without any lock held. It gets the hbalock to access
8582 * and update SLI data structures.
8584 * This function returns IRQ_HANDLED when interrupt is handled, else it
8588 lpfc_sli_intr_handler(int irq
, void *dev_id
)
8590 struct lpfc_hba
*phba
;
8591 irqreturn_t sp_irq_rc
, fp_irq_rc
;
8592 unsigned long status1
, status2
;
8596 * Get the driver's phba structure from the dev_id and
8597 * assume the HBA is not interrupting.
8599 phba
= (struct lpfc_hba
*) dev_id
;
8601 if (unlikely(!phba
))
8604 /* Check device state for handling interrupt */
8605 if (lpfc_intr_state_check(phba
))
8608 spin_lock(&phba
->hbalock
);
8609 phba
->ha_copy
= readl(phba
->HAregaddr
);
8610 if (unlikely(!phba
->ha_copy
)) {
8611 spin_unlock(&phba
->hbalock
);
8613 } else if (phba
->ha_copy
& HA_ERATT
) {
8614 if (phba
->hba_flag
& HBA_ERATT_HANDLED
)
8615 /* ERATT polling has handled ERATT */
8616 phba
->ha_copy
&= ~HA_ERATT
;
8618 /* Indicate interrupt handler handles ERATT */
8619 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
8623 * If there is deferred error attention, do not check for any interrupt.
8625 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
8626 spin_unlock(&phba
->hbalock
);
8630 /* Clear attention sources except link and error attentions */
8631 hc_copy
= readl(phba
->HCregaddr
);
8632 writel(hc_copy
& ~(HC_MBINT_ENA
| HC_R0INT_ENA
| HC_R1INT_ENA
8633 | HC_R2INT_ENA
| HC_LAINT_ENA
| HC_ERINT_ENA
),
8635 writel((phba
->ha_copy
& ~(HA_LATT
| HA_ERATT
)), phba
->HAregaddr
);
8636 writel(hc_copy
, phba
->HCregaddr
);
8637 readl(phba
->HAregaddr
); /* flush */
8638 spin_unlock(&phba
->hbalock
);
8641 * Invokes slow-path host attention interrupt handling as appropriate.
8644 /* status of events with mailbox and link attention */
8645 status1
= phba
->ha_copy
& (HA_MBATT
| HA_LATT
| HA_ERATT
);
8647 /* status of events with ELS ring */
8648 status2
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
8649 status2
>>= (4*LPFC_ELS_RING
);
8651 if (status1
|| (status2
& HA_RXMASK
))
8652 sp_irq_rc
= lpfc_sli_sp_intr_handler(irq
, dev_id
);
8654 sp_irq_rc
= IRQ_NONE
;
8657 * Invoke fast-path host attention interrupt handling as appropriate.
8660 /* status of events with FCP ring */
8661 status1
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
8662 status1
>>= (4*LPFC_FCP_RING
);
8664 /* status of events with extra ring */
8665 if (phba
->cfg_multi_ring_support
== 2) {
8666 status2
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_EXTRA_RING
)));
8667 status2
>>= (4*LPFC_EXTRA_RING
);
8671 if ((status1
& HA_RXMASK
) || (status2
& HA_RXMASK
))
8672 fp_irq_rc
= lpfc_sli_fp_intr_handler(irq
, dev_id
);
8674 fp_irq_rc
= IRQ_NONE
;
8676 /* Return device-level interrupt handling status */
8677 return (sp_irq_rc
== IRQ_HANDLED
) ? sp_irq_rc
: fp_irq_rc
;
8678 } /* lpfc_sli_intr_handler */
8681 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
8682 * @phba: pointer to lpfc hba data structure.
8684 * This routine is invoked by the worker thread to process all the pending
8685 * SLI4 FCP abort XRI events.
8687 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba
*phba
)
8689 struct lpfc_cq_event
*cq_event
;
8691 /* First, declare the fcp xri abort event has been handled */
8692 spin_lock_irq(&phba
->hbalock
);
8693 phba
->hba_flag
&= ~FCP_XRI_ABORT_EVENT
;
8694 spin_unlock_irq(&phba
->hbalock
);
8695 /* Now, handle all the fcp xri abort events */
8696 while (!list_empty(&phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
)) {
8697 /* Get the first event from the head of the event queue */
8698 spin_lock_irq(&phba
->hbalock
);
8699 list_remove_head(&phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
,
8700 cq_event
, struct lpfc_cq_event
, list
);
8701 spin_unlock_irq(&phba
->hbalock
);
8702 /* Notify aborted XRI for FCP work queue */
8703 lpfc_sli4_fcp_xri_aborted(phba
, &cq_event
->cqe
.wcqe_axri
);
8704 /* Free the event processed back to the free pool */
8705 lpfc_sli4_cq_event_release(phba
, cq_event
);
8710 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
8711 * @phba: pointer to lpfc hba data structure.
8713 * This routine is invoked by the worker thread to process all the pending
8714 * SLI4 els abort xri events.
8716 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba
*phba
)
8718 struct lpfc_cq_event
*cq_event
;
8720 /* First, declare the els xri abort event has been handled */
8721 spin_lock_irq(&phba
->hbalock
);
8722 phba
->hba_flag
&= ~ELS_XRI_ABORT_EVENT
;
8723 spin_unlock_irq(&phba
->hbalock
);
8724 /* Now, handle all the els xri abort events */
8725 while (!list_empty(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
)) {
8726 /* Get the first event from the head of the event queue */
8727 spin_lock_irq(&phba
->hbalock
);
8728 list_remove_head(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
,
8729 cq_event
, struct lpfc_cq_event
, list
);
8730 spin_unlock_irq(&phba
->hbalock
);
8731 /* Notify aborted XRI for ELS work queue */
8732 lpfc_sli4_els_xri_aborted(phba
, &cq_event
->cqe
.wcqe_axri
);
8733 /* Free the event processed back to the free pool */
8734 lpfc_sli4_cq_event_release(phba
, cq_event
);
8739 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
8740 * @phba: pointer to lpfc hba data structure
8741 * @pIocbIn: pointer to the rspiocbq
8742 * @pIocbOut: pointer to the cmdiocbq
8743 * @wcqe: pointer to the complete wcqe
8745 * This routine transfers the fields of a command iocbq to a response iocbq
8746 * by copying all the IOCB fields from command iocbq and transferring the
8747 * completion status information from the complete wcqe.
8750 lpfc_sli4_iocb_param_transfer(struct lpfc_hba
*phba
,
8751 struct lpfc_iocbq
*pIocbIn
,
8752 struct lpfc_iocbq
*pIocbOut
,
8753 struct lpfc_wcqe_complete
*wcqe
)
8755 unsigned long iflags
;
8756 size_t offset
= offsetof(struct lpfc_iocbq
, iocb
);
8758 memcpy((char *)pIocbIn
+ offset
, (char *)pIocbOut
+ offset
,
8759 sizeof(struct lpfc_iocbq
) - offset
);
8760 /* Map WCQE parameters into irspiocb parameters */
8761 pIocbIn
->iocb
.ulpStatus
= bf_get(lpfc_wcqe_c_status
, wcqe
);
8762 if (pIocbOut
->iocb_flag
& LPFC_IO_FCP
)
8763 if (pIocbIn
->iocb
.ulpStatus
== IOSTAT_FCP_RSP_ERROR
)
8764 pIocbIn
->iocb
.un
.fcpi
.fcpi_parm
=
8765 pIocbOut
->iocb
.un
.fcpi
.fcpi_parm
-
8766 wcqe
->total_data_placed
;
8768 pIocbIn
->iocb
.un
.ulpWord
[4] = wcqe
->parameter
;
8770 pIocbIn
->iocb
.un
.ulpWord
[4] = wcqe
->parameter
;
8771 pIocbIn
->iocb
.un
.genreq64
.bdl
.bdeSize
= wcqe
->total_data_placed
;
8774 /* Pick up HBA exchange busy condition */
8775 if (bf_get(lpfc_wcqe_c_xb
, wcqe
)) {
8776 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8777 pIocbIn
->iocb_flag
|= LPFC_EXCHANGE_BUSY
;
8778 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8783 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
8784 * @phba: Pointer to HBA context object.
8785 * @wcqe: Pointer to work-queue completion queue entry.
8787 * This routine handles an ELS work-queue completion event and construct
8788 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
8789 * discovery engine to handle.
8791 * Return: Pointer to the receive IOCBQ, NULL otherwise.
8793 static struct lpfc_iocbq
*
8794 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba
*phba
,
8795 struct lpfc_iocbq
*irspiocbq
)
8797 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
8798 struct lpfc_iocbq
*cmdiocbq
;
8799 struct lpfc_wcqe_complete
*wcqe
;
8800 unsigned long iflags
;
8802 wcqe
= &irspiocbq
->cq_event
.cqe
.wcqe_cmpl
;
8803 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8804 pring
->stats
.iocb_event
++;
8805 /* Look up the ELS command IOCB and create pseudo response IOCB */
8806 cmdiocbq
= lpfc_sli_iocbq_lookup_by_tag(phba
, pring
,
8807 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
8808 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8810 if (unlikely(!cmdiocbq
)) {
8811 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
8812 "0386 ELS complete with no corresponding "
8813 "cmdiocb: iotag (%d)\n",
8814 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
8815 lpfc_sli_release_iocbq(phba
, irspiocbq
);
8819 /* Fake the irspiocbq and copy necessary response information */
8820 lpfc_sli4_iocb_param_transfer(phba
, irspiocbq
, cmdiocbq
, wcqe
);
8826 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
8827 * @phba: Pointer to HBA context object.
8828 * @cqe: Pointer to mailbox completion queue entry.
8830 * This routine process a mailbox completion queue entry with asynchrous
8833 * Return: true if work posted to worker thread, otherwise false.
8836 lpfc_sli4_sp_handle_async_event(struct lpfc_hba
*phba
, struct lpfc_mcqe
*mcqe
)
8838 struct lpfc_cq_event
*cq_event
;
8839 unsigned long iflags
;
8841 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
8842 "0392 Async Event: word0:x%x, word1:x%x, "
8843 "word2:x%x, word3:x%x\n", mcqe
->word0
,
8844 mcqe
->mcqe_tag0
, mcqe
->mcqe_tag1
, mcqe
->trailer
);
8846 /* Allocate a new internal CQ_EVENT entry */
8847 cq_event
= lpfc_sli4_cq_event_alloc(phba
);
8849 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
8850 "0394 Failed to allocate CQ_EVENT entry\n");
8854 /* Move the CQE into an asynchronous event entry */
8855 memcpy(&cq_event
->cqe
, mcqe
, sizeof(struct lpfc_mcqe
));
8856 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8857 list_add_tail(&cq_event
->list
, &phba
->sli4_hba
.sp_asynce_work_queue
);
8858 /* Set the async event flag */
8859 phba
->hba_flag
|= ASYNC_EVENT
;
8860 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8866 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
8867 * @phba: Pointer to HBA context object.
8868 * @cqe: Pointer to mailbox completion queue entry.
8870 * This routine process a mailbox completion queue entry with mailbox
8873 * Return: true if work posted to worker thread, otherwise false.
8876 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba
*phba
, struct lpfc_mcqe
*mcqe
)
8878 uint32_t mcqe_status
;
8879 MAILBOX_t
*mbox
, *pmbox
;
8880 struct lpfc_mqe
*mqe
;
8881 struct lpfc_vport
*vport
;
8882 struct lpfc_nodelist
*ndlp
;
8883 struct lpfc_dmabuf
*mp
;
8884 unsigned long iflags
;
8886 bool workposted
= false;
8889 /* If not a mailbox complete MCQE, out by checking mailbox consume */
8890 if (!bf_get(lpfc_trailer_completed
, mcqe
))
8891 goto out_no_mqe_complete
;
8893 /* Get the reference to the active mbox command */
8894 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8895 pmb
= phba
->sli
.mbox_active
;
8896 if (unlikely(!pmb
)) {
8897 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
8898 "1832 No pending MBOX command to handle\n");
8899 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8900 goto out_no_mqe_complete
;
8902 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8904 pmbox
= (MAILBOX_t
*)&pmb
->u
.mqe
;
8908 /* Reset heartbeat timer */
8909 phba
->last_completion_time
= jiffies
;
8910 del_timer(&phba
->sli
.mbox_tmo
);
8912 /* Move mbox data to caller's mailbox region, do endian swapping */
8913 if (pmb
->mbox_cmpl
&& mbox
)
8914 lpfc_sli_pcimem_bcopy(mbox
, mqe
, sizeof(struct lpfc_mqe
));
8915 /* Set the mailbox status with SLI4 range 0x4000 */
8916 mcqe_status
= bf_get(lpfc_mcqe_status
, mcqe
);
8917 if (mcqe_status
!= MB_CQE_STATUS_SUCCESS
)
8918 bf_set(lpfc_mqe_status
, mqe
,
8919 (LPFC_MBX_ERROR_RANGE
| mcqe_status
));
8921 if (pmb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) {
8922 pmb
->mbox_flag
&= ~LPFC_MBX_IMED_UNREG
;
8923 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_MBOX_VPORT
,
8924 "MBOX dflt rpi: status:x%x rpi:x%x",
8926 pmbox
->un
.varWords
[0], 0);
8927 if (mcqe_status
== MB_CQE_STATUS_SUCCESS
) {
8928 mp
= (struct lpfc_dmabuf
*)(pmb
->context1
);
8929 ndlp
= (struct lpfc_nodelist
*)pmb
->context2
;
8930 /* Reg_LOGIN of dflt RPI was successful. Now lets get
8931 * RID of the PPI using the same mbox buffer.
8933 lpfc_unreg_login(phba
, vport
->vpi
,
8934 pmbox
->un
.varWords
[0], pmb
);
8935 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
8937 pmb
->context2
= ndlp
;
8939 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
8941 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
8942 LOG_SLI
, "0385 rc should "
8943 "have been MBX_BUSY\n");
8944 if (rc
!= MBX_NOT_FINISHED
)
8945 goto send_current_mbox
;
8948 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflags
);
8949 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
8950 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflags
);
8952 /* There is mailbox completion work to do */
8953 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8954 __lpfc_mbox_cmpl_put(phba
, pmb
);
8955 phba
->work_ha
|= HA_MBATT
;
8956 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8960 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8961 /* Release the mailbox command posting token */
8962 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
8963 /* Setting active mailbox pointer need to be in sync to flag clear */
8964 phba
->sli
.mbox_active
= NULL
;
8965 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8966 /* Wake up worker thread to post the next pending mailbox command */
8967 lpfc_worker_wake_up(phba
);
8968 out_no_mqe_complete
:
8969 if (bf_get(lpfc_trailer_consumed
, mcqe
))
8970 lpfc_sli4_mq_release(phba
->sli4_hba
.mbx_wq
);
8975 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
8976 * @phba: Pointer to HBA context object.
8977 * @cqe: Pointer to mailbox completion queue entry.
8979 * This routine process a mailbox completion queue entry, it invokes the
8980 * proper mailbox complete handling or asynchrous event handling routine
8981 * according to the MCQE's async bit.
8983 * Return: true if work posted to worker thread, otherwise false.
8986 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba
*phba
, struct lpfc_cqe
*cqe
)
8988 struct lpfc_mcqe mcqe
;
8991 /* Copy the mailbox MCQE and convert endian order as needed */
8992 lpfc_sli_pcimem_bcopy(cqe
, &mcqe
, sizeof(struct lpfc_mcqe
));
8994 /* Invoke the proper event handling routine */
8995 if (!bf_get(lpfc_trailer_async
, &mcqe
))
8996 workposted
= lpfc_sli4_sp_handle_mbox_event(phba
, &mcqe
);
8998 workposted
= lpfc_sli4_sp_handle_async_event(phba
, &mcqe
);
9003 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
9004 * @phba: Pointer to HBA context object.
9005 * @wcqe: Pointer to work-queue completion queue entry.
9007 * This routine handles an ELS work-queue completion event.
9009 * Return: true if work posted to worker thread, otherwise false.
9012 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba
*phba
,
9013 struct lpfc_wcqe_complete
*wcqe
)
9015 struct lpfc_iocbq
*irspiocbq
;
9016 unsigned long iflags
;
9017 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_FCP_RING
];
9019 /* Get an irspiocbq for later ELS response processing use */
9020 irspiocbq
= lpfc_sli_get_iocbq(phba
);
9022 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9023 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
9024 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
9025 pring
->txq_cnt
, phba
->iocb_cnt
,
9026 phba
->sli
.ring
[LPFC_FCP_RING
].txcmplq_cnt
,
9027 phba
->sli
.ring
[LPFC_ELS_RING
].txcmplq_cnt
);
9031 /* Save off the slow-path queue event for work thread to process */
9032 memcpy(&irspiocbq
->cq_event
.cqe
.wcqe_cmpl
, wcqe
, sizeof(*wcqe
));
9033 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9034 list_add_tail(&irspiocbq
->cq_event
.list
,
9035 &phba
->sli4_hba
.sp_queue_event
);
9036 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
9037 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9043 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
9044 * @phba: Pointer to HBA context object.
9045 * @wcqe: Pointer to work-queue completion queue entry.
9047 * This routine handles slow-path WQ entry comsumed event by invoking the
9048 * proper WQ release routine to the slow-path WQ.
9051 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba
*phba
,
9052 struct lpfc_wcqe_release
*wcqe
)
9054 /* Check for the slow-path ELS work queue */
9055 if (bf_get(lpfc_wcqe_r_wq_id
, wcqe
) == phba
->sli4_hba
.els_wq
->queue_id
)
9056 lpfc_sli4_wq_release(phba
->sli4_hba
.els_wq
,
9057 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
));
9059 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9060 "2579 Slow-path wqe consume event carries "
9061 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
9062 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
),
9063 phba
->sli4_hba
.els_wq
->queue_id
);
9067 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
9068 * @phba: Pointer to HBA context object.
9069 * @cq: Pointer to a WQ completion queue.
9070 * @wcqe: Pointer to work-queue completion queue entry.
9072 * This routine handles an XRI abort event.
9074 * Return: true if work posted to worker thread, otherwise false.
9077 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba
*phba
,
9078 struct lpfc_queue
*cq
,
9079 struct sli4_wcqe_xri_aborted
*wcqe
)
9081 bool workposted
= false;
9082 struct lpfc_cq_event
*cq_event
;
9083 unsigned long iflags
;
9085 /* Allocate a new internal CQ_EVENT entry */
9086 cq_event
= lpfc_sli4_cq_event_alloc(phba
);
9088 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9089 "0602 Failed to allocate CQ_EVENT entry\n");
9093 /* Move the CQE into the proper xri abort event list */
9094 memcpy(&cq_event
->cqe
, wcqe
, sizeof(struct sli4_wcqe_xri_aborted
));
9095 switch (cq
->subtype
) {
9097 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9098 list_add_tail(&cq_event
->list
,
9099 &phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
);
9100 /* Set the fcp xri abort event flag */
9101 phba
->hba_flag
|= FCP_XRI_ABORT_EVENT
;
9102 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9106 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9107 list_add_tail(&cq_event
->list
,
9108 &phba
->sli4_hba
.sp_els_xri_aborted_work_queue
);
9109 /* Set the els xri abort event flag */
9110 phba
->hba_flag
|= ELS_XRI_ABORT_EVENT
;
9111 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9115 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9116 "0603 Invalid work queue CQE subtype (x%x)\n",
9125 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
9126 * @phba: Pointer to HBA context object.
9127 * @rcqe: Pointer to receive-queue completion queue entry.
9129 * This routine process a receive-queue completion queue entry.
9131 * Return: true if work posted to worker thread, otherwise false.
9134 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba
*phba
, struct lpfc_rcqe
*rcqe
)
9136 bool workposted
= false;
9137 struct lpfc_queue
*hrq
= phba
->sli4_hba
.hdr_rq
;
9138 struct lpfc_queue
*drq
= phba
->sli4_hba
.dat_rq
;
9139 struct hbq_dmabuf
*dma_buf
;
9141 unsigned long iflags
;
9143 if (bf_get(lpfc_rcqe_rq_id
, rcqe
) != hrq
->queue_id
)
9146 status
= bf_get(lpfc_rcqe_status
, rcqe
);
9148 case FC_STATUS_RQ_BUF_LEN_EXCEEDED
:
9149 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9150 "2537 Receive Frame Truncated!!\n");
9151 case FC_STATUS_RQ_SUCCESS
:
9152 lpfc_sli4_rq_release(hrq
, drq
);
9153 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9154 dma_buf
= lpfc_sli_hbqbuf_get(&phba
->hbqs
[0].hbq_buffer_list
);
9156 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9159 memcpy(&dma_buf
->cq_event
.cqe
.rcqe_cmpl
, rcqe
, sizeof(*rcqe
));
9160 /* save off the frame for the word thread to process */
9161 list_add_tail(&dma_buf
->cq_event
.list
,
9162 &phba
->sli4_hba
.sp_queue_event
);
9163 /* Frame received */
9164 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
9165 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9168 case FC_STATUS_INSUFF_BUF_NEED_BUF
:
9169 case FC_STATUS_INSUFF_BUF_FRM_DISC
:
9170 /* Post more buffers if possible */
9171 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9172 phba
->hba_flag
|= HBA_POST_RECEIVE_BUFFER
;
9173 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9182 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
9183 * @phba: Pointer to HBA context object.
9184 * @cq: Pointer to the completion queue.
9185 * @wcqe: Pointer to a completion queue entry.
9187 * This routine process a slow-path work-queue or recieve queue completion queue
9190 * Return: true if work posted to worker thread, otherwise false.
9193 lpfc_sli4_sp_handle_cqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
9194 struct lpfc_cqe
*cqe
)
9196 struct lpfc_cqe cqevt
;
9197 bool workposted
= false;
9199 /* Copy the work queue CQE and convert endian order if needed */
9200 lpfc_sli_pcimem_bcopy(cqe
, &cqevt
, sizeof(struct lpfc_cqe
));
9202 /* Check and process for different type of WCQE and dispatch */
9203 switch (bf_get(lpfc_cqe_code
, &cqevt
)) {
9204 case CQE_CODE_COMPL_WQE
:
9205 /* Process the WQ/RQ complete event */
9206 phba
->last_completion_time
= jiffies
;
9207 workposted
= lpfc_sli4_sp_handle_els_wcqe(phba
,
9208 (struct lpfc_wcqe_complete
*)&cqevt
);
9210 case CQE_CODE_RELEASE_WQE
:
9211 /* Process the WQ release event */
9212 lpfc_sli4_sp_handle_rel_wcqe(phba
,
9213 (struct lpfc_wcqe_release
*)&cqevt
);
9215 case CQE_CODE_XRI_ABORTED
:
9216 /* Process the WQ XRI abort event */
9217 phba
->last_completion_time
= jiffies
;
9218 workposted
= lpfc_sli4_sp_handle_abort_xri_wcqe(phba
, cq
,
9219 (struct sli4_wcqe_xri_aborted
*)&cqevt
);
9221 case CQE_CODE_RECEIVE
:
9222 /* Process the RQ event */
9223 phba
->last_completion_time
= jiffies
;
9224 workposted
= lpfc_sli4_sp_handle_rcqe(phba
,
9225 (struct lpfc_rcqe
*)&cqevt
);
9228 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9229 "0388 Not a valid WCQE code: x%x\n",
9230 bf_get(lpfc_cqe_code
, &cqevt
));
9237 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
9238 * @phba: Pointer to HBA context object.
9239 * @eqe: Pointer to fast-path event queue entry.
9241 * This routine process a event queue entry from the slow-path event queue.
9242 * It will check the MajorCode and MinorCode to determine this is for a
9243 * completion event on a completion queue, if not, an error shall be logged
9244 * and just return. Otherwise, it will get to the corresponding completion
9245 * queue and process all the entries on that completion queue, rearm the
9246 * completion queue, and then return.
9250 lpfc_sli4_sp_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_eqe
*eqe
)
9252 struct lpfc_queue
*cq
= NULL
, *childq
, *speq
;
9253 struct lpfc_cqe
*cqe
;
9254 bool workposted
= false;
9258 if (bf_get_le32(lpfc_eqe_major_code
, eqe
) != 0) {
9259 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9260 "0359 Not a valid slow-path completion "
9261 "event: majorcode=x%x, minorcode=x%x\n",
9262 bf_get_le32(lpfc_eqe_major_code
, eqe
),
9263 bf_get_le32(lpfc_eqe_minor_code
, eqe
));
9267 /* Get the reference to the corresponding CQ */
9268 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
9270 /* Search for completion queue pointer matching this cqid */
9271 speq
= phba
->sli4_hba
.sp_eq
;
9272 list_for_each_entry(childq
, &speq
->child_list
, list
) {
9273 if (childq
->queue_id
== cqid
) {
9278 if (unlikely(!cq
)) {
9279 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
9280 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9281 "0365 Slow-path CQ identifier "
9282 "(%d) does not exist\n", cqid
);
9286 /* Process all the entries to the CQ */
9289 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
9290 workposted
|= lpfc_sli4_sp_handle_mcqe(phba
, cqe
);
9291 if (!(++ecount
% LPFC_GET_QE_REL_INT
))
9292 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
9296 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
9297 workposted
|= lpfc_sli4_sp_handle_cqe(phba
, cq
, cqe
);
9298 if (!(++ecount
% LPFC_GET_QE_REL_INT
))
9299 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
9303 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9304 "0370 Invalid completion queue type (%d)\n",
9309 /* Catch the no cq entry condition, log an error */
9310 if (unlikely(ecount
== 0))
9311 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9312 "0371 No entry from the CQ: identifier "
9313 "(x%x), type (%d)\n", cq
->queue_id
, cq
->type
);
9315 /* In any case, flash and re-arm the RCQ */
9316 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_REARM
);
9318 /* wake up worker thread if there are works to be done */
9320 lpfc_worker_wake_up(phba
);
9324 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
9325 * @eqe: Pointer to fast-path completion queue entry.
9327 * This routine process a fast-path work queue completion entry from fast-path
9328 * event queue for FCP command response completion.
9331 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba
*phba
,
9332 struct lpfc_wcqe_complete
*wcqe
)
9334 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_FCP_RING
];
9335 struct lpfc_iocbq
*cmdiocbq
;
9336 struct lpfc_iocbq irspiocbq
;
9337 unsigned long iflags
;
9339 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9340 pring
->stats
.iocb_event
++;
9341 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9343 /* Check for response status */
9344 if (unlikely(bf_get(lpfc_wcqe_c_status
, wcqe
))) {
9345 /* If resource errors reported from HBA, reduce queue
9346 * depth of the SCSI device.
9348 if ((bf_get(lpfc_wcqe_c_status
, wcqe
) ==
9349 IOSTAT_LOCAL_REJECT
) &&
9350 (wcqe
->parameter
== IOERR_NO_RESOURCES
)) {
9351 phba
->lpfc_rampdown_queue_depth(phba
);
9353 /* Log the error status */
9354 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9355 "0373 FCP complete error: status=x%x, "
9356 "hw_status=x%x, total_data_specified=%d, "
9357 "parameter=x%x, word3=x%x\n",
9358 bf_get(lpfc_wcqe_c_status
, wcqe
),
9359 bf_get(lpfc_wcqe_c_hw_status
, wcqe
),
9360 wcqe
->total_data_placed
, wcqe
->parameter
,
9364 /* Look up the FCP command IOCB and create pseudo response IOCB */
9365 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9366 cmdiocbq
= lpfc_sli_iocbq_lookup_by_tag(phba
, pring
,
9367 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
9368 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9369 if (unlikely(!cmdiocbq
)) {
9370 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9371 "0374 FCP complete with no corresponding "
9372 "cmdiocb: iotag (%d)\n",
9373 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
9376 if (unlikely(!cmdiocbq
->iocb_cmpl
)) {
9377 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9378 "0375 FCP cmdiocb not callback function "
9380 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
9384 /* Fake the irspiocb and copy necessary response information */
9385 lpfc_sli4_iocb_param_transfer(phba
, &irspiocbq
, cmdiocbq
, wcqe
);
9387 if (cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
9388 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9389 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
9390 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9393 /* Pass the cmd_iocb and the rsp state to the upper layer */
9394 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
, &irspiocbq
);
9398 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
9399 * @phba: Pointer to HBA context object.
9400 * @cq: Pointer to completion queue.
9401 * @wcqe: Pointer to work-queue completion queue entry.
9403 * This routine handles an fast-path WQ entry comsumed event by invoking the
9404 * proper WQ release routine to the slow-path WQ.
9407 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
9408 struct lpfc_wcqe_release
*wcqe
)
9410 struct lpfc_queue
*childwq
;
9411 bool wqid_matched
= false;
9414 /* Check for fast-path FCP work queue release */
9415 fcp_wqid
= bf_get(lpfc_wcqe_r_wq_id
, wcqe
);
9416 list_for_each_entry(childwq
, &cq
->child_list
, list
) {
9417 if (childwq
->queue_id
== fcp_wqid
) {
9418 lpfc_sli4_wq_release(childwq
,
9419 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
));
9420 wqid_matched
= true;
9424 /* Report warning log message if no match found */
9425 if (wqid_matched
!= true)
9426 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9427 "2580 Fast-path wqe consume event carries "
9428 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid
);
9432 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
9433 * @cq: Pointer to the completion queue.
9434 * @eqe: Pointer to fast-path completion queue entry.
9436 * This routine process a fast-path work queue completion entry from fast-path
9437 * event queue for FCP command response completion.
9440 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
9441 struct lpfc_cqe
*cqe
)
9443 struct lpfc_wcqe_release wcqe
;
9444 bool workposted
= false;
9446 /* Copy the work queue CQE and convert endian order if needed */
9447 lpfc_sli_pcimem_bcopy(cqe
, &wcqe
, sizeof(struct lpfc_cqe
));
9449 /* Check and process for different type of WCQE and dispatch */
9450 switch (bf_get(lpfc_wcqe_c_code
, &wcqe
)) {
9451 case CQE_CODE_COMPL_WQE
:
9452 /* Process the WQ complete event */
9453 phba
->last_completion_time
= jiffies
;
9454 lpfc_sli4_fp_handle_fcp_wcqe(phba
,
9455 (struct lpfc_wcqe_complete
*)&wcqe
);
9457 case CQE_CODE_RELEASE_WQE
:
9458 /* Process the WQ release event */
9459 lpfc_sli4_fp_handle_rel_wcqe(phba
, cq
,
9460 (struct lpfc_wcqe_release
*)&wcqe
);
9462 case CQE_CODE_XRI_ABORTED
:
9463 /* Process the WQ XRI abort event */
9464 phba
->last_completion_time
= jiffies
;
9465 workposted
= lpfc_sli4_sp_handle_abort_xri_wcqe(phba
, cq
,
9466 (struct sli4_wcqe_xri_aborted
*)&wcqe
);
9469 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9470 "0144 Not a valid WCQE code: x%x\n",
9471 bf_get(lpfc_wcqe_c_code
, &wcqe
));
9478 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
9479 * @phba: Pointer to HBA context object.
9480 * @eqe: Pointer to fast-path event queue entry.
9482 * This routine process a event queue entry from the fast-path event queue.
9483 * It will check the MajorCode and MinorCode to determine this is for a
9484 * completion event on a completion queue, if not, an error shall be logged
9485 * and just return. Otherwise, it will get to the corresponding completion
9486 * queue and process all the entries on the completion queue, rearm the
9487 * completion queue, and then return.
9490 lpfc_sli4_fp_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_eqe
*eqe
,
9493 struct lpfc_queue
*cq
;
9494 struct lpfc_cqe
*cqe
;
9495 bool workposted
= false;
9499 if (unlikely(bf_get_le32(lpfc_eqe_major_code
, eqe
) != 0)) {
9500 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9501 "0366 Not a valid fast-path completion "
9502 "event: majorcode=x%x, minorcode=x%x\n",
9503 bf_get_le32(lpfc_eqe_major_code
, eqe
),
9504 bf_get_le32(lpfc_eqe_minor_code
, eqe
));
9508 cq
= phba
->sli4_hba
.fcp_cq
[fcp_cqidx
];
9509 if (unlikely(!cq
)) {
9510 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
9511 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9512 "0367 Fast-path completion queue "
9513 "does not exist\n");
9517 /* Get the reference to the corresponding CQ */
9518 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
9519 if (unlikely(cqid
!= cq
->queue_id
)) {
9520 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9521 "0368 Miss-matched fast-path completion "
9522 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
9523 cqid
, cq
->queue_id
);
9527 /* Process all the entries to the CQ */
9528 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
9529 workposted
|= lpfc_sli4_fp_handle_wcqe(phba
, cq
, cqe
);
9530 if (!(++ecount
% LPFC_GET_QE_REL_INT
))
9531 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
9534 /* Catch the no cq entry condition */
9535 if (unlikely(ecount
== 0))
9536 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9537 "0369 No entry from fast-path completion "
9538 "queue fcpcqid=%d\n", cq
->queue_id
);
9540 /* In any case, flash and re-arm the CQ */
9541 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_REARM
);
9543 /* wake up worker thread if there are works to be done */
9545 lpfc_worker_wake_up(phba
);
9549 lpfc_sli4_eq_flush(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
)
9551 struct lpfc_eqe
*eqe
;
9553 /* walk all the EQ entries and drop on the floor */
9554 while ((eqe
= lpfc_sli4_eq_get(eq
)))
9557 /* Clear and re-arm the EQ */
9558 lpfc_sli4_eq_release(eq
, LPFC_QUEUE_REARM
);
9562 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
9563 * @irq: Interrupt number.
9564 * @dev_id: The device context pointer.
9566 * This function is directly called from the PCI layer as an interrupt
9567 * service routine when device with SLI-4 interface spec is enabled with
9568 * MSI-X multi-message interrupt mode and there are slow-path events in
9569 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
9570 * interrupt mode, this function is called as part of the device-level
9571 * interrupt handler. When the PCI slot is in error recovery or the HBA is
9572 * undergoing initialization, the interrupt handler will not process the
9573 * interrupt. The link attention and ELS ring attention events are handled
9574 * by the worker thread. The interrupt handler signals the worker thread
9575 * and returns for these events. This function is called without any lock
9576 * held. It gets the hbalock to access and update SLI data structures.
9578 * This function returns IRQ_HANDLED when interrupt is handled else it
9582 lpfc_sli4_sp_intr_handler(int irq
, void *dev_id
)
9584 struct lpfc_hba
*phba
;
9585 struct lpfc_queue
*speq
;
9586 struct lpfc_eqe
*eqe
;
9587 unsigned long iflag
;
9591 * Get the driver's phba structure from the dev_id
9593 phba
= (struct lpfc_hba
*)dev_id
;
9595 if (unlikely(!phba
))
9598 /* Get to the EQ struct associated with this vector */
9599 speq
= phba
->sli4_hba
.sp_eq
;
9601 /* Check device state for handling interrupt */
9602 if (unlikely(lpfc_intr_state_check(phba
))) {
9603 /* Check again for link_state with lock held */
9604 spin_lock_irqsave(&phba
->hbalock
, iflag
);
9605 if (phba
->link_state
< LPFC_LINK_DOWN
)
9606 /* Flush, clear interrupt, and rearm the EQ */
9607 lpfc_sli4_eq_flush(phba
, speq
);
9608 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
9613 * Process all the event on FCP slow-path EQ
9615 while ((eqe
= lpfc_sli4_eq_get(speq
))) {
9616 lpfc_sli4_sp_handle_eqe(phba
, eqe
);
9617 if (!(++ecount
% LPFC_GET_QE_REL_INT
))
9618 lpfc_sli4_eq_release(speq
, LPFC_QUEUE_NOARM
);
9621 /* Always clear and re-arm the slow-path EQ */
9622 lpfc_sli4_eq_release(speq
, LPFC_QUEUE_REARM
);
9624 /* Catch the no cq entry condition */
9625 if (unlikely(ecount
== 0)) {
9626 if (phba
->intr_type
== MSIX
)
9627 /* MSI-X treated interrupt served as no EQ share INT */
9628 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9629 "0357 MSI-X interrupt with no EQE\n");
9631 /* Non MSI-X treated on interrupt as EQ share INT */
9636 } /* lpfc_sli4_sp_intr_handler */
9639 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
9640 * @irq: Interrupt number.
9641 * @dev_id: The device context pointer.
9643 * This function is directly called from the PCI layer as an interrupt
9644 * service routine when device with SLI-4 interface spec is enabled with
9645 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
9646 * ring event in the HBA. However, when the device is enabled with either
9647 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
9648 * device-level interrupt handler. When the PCI slot is in error recovery
9649 * or the HBA is undergoing initialization, the interrupt handler will not
9650 * process the interrupt. The SCSI FCP fast-path ring event are handled in
9651 * the intrrupt context. This function is called without any lock held.
9652 * It gets the hbalock to access and update SLI data structures. Note that,
9653 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
9654 * equal to that of FCP CQ index.
9656 * This function returns IRQ_HANDLED when interrupt is handled else it
9660 lpfc_sli4_fp_intr_handler(int irq
, void *dev_id
)
9662 struct lpfc_hba
*phba
;
9663 struct lpfc_fcp_eq_hdl
*fcp_eq_hdl
;
9664 struct lpfc_queue
*fpeq
;
9665 struct lpfc_eqe
*eqe
;
9666 unsigned long iflag
;
9670 /* Get the driver's phba structure from the dev_id */
9671 fcp_eq_hdl
= (struct lpfc_fcp_eq_hdl
*)dev_id
;
9672 phba
= fcp_eq_hdl
->phba
;
9673 fcp_eqidx
= fcp_eq_hdl
->idx
;
9675 if (unlikely(!phba
))
9678 /* Get to the EQ struct associated with this vector */
9679 fpeq
= phba
->sli4_hba
.fp_eq
[fcp_eqidx
];
9681 /* Check device state for handling interrupt */
9682 if (unlikely(lpfc_intr_state_check(phba
))) {
9683 /* Check again for link_state with lock held */
9684 spin_lock_irqsave(&phba
->hbalock
, iflag
);
9685 if (phba
->link_state
< LPFC_LINK_DOWN
)
9686 /* Flush, clear interrupt, and rearm the EQ */
9687 lpfc_sli4_eq_flush(phba
, fpeq
);
9688 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
9693 * Process all the event on FCP fast-path EQ
9695 while ((eqe
= lpfc_sli4_eq_get(fpeq
))) {
9696 lpfc_sli4_fp_handle_eqe(phba
, eqe
, fcp_eqidx
);
9697 if (!(++ecount
% LPFC_GET_QE_REL_INT
))
9698 lpfc_sli4_eq_release(fpeq
, LPFC_QUEUE_NOARM
);
9701 /* Always clear and re-arm the fast-path EQ */
9702 lpfc_sli4_eq_release(fpeq
, LPFC_QUEUE_REARM
);
9704 if (unlikely(ecount
== 0)) {
9705 if (phba
->intr_type
== MSIX
)
9706 /* MSI-X treated interrupt served as no EQ share INT */
9707 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9708 "0358 MSI-X interrupt with no EQE\n");
9710 /* Non MSI-X treated on interrupt as EQ share INT */
9715 } /* lpfc_sli4_fp_intr_handler */
9718 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
9719 * @irq: Interrupt number.
9720 * @dev_id: The device context pointer.
9722 * This function is the device-level interrupt handler to device with SLI-4
9723 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
9724 * interrupt mode is enabled and there is an event in the HBA which requires
9725 * driver attention. This function invokes the slow-path interrupt attention
9726 * handling function and fast-path interrupt attention handling function in
9727 * turn to process the relevant HBA attention events. This function is called
9728 * without any lock held. It gets the hbalock to access and update SLI data
9731 * This function returns IRQ_HANDLED when interrupt is handled, else it
9735 lpfc_sli4_intr_handler(int irq
, void *dev_id
)
9737 struct lpfc_hba
*phba
;
9738 irqreturn_t sp_irq_rc
, fp_irq_rc
;
9739 bool fp_handled
= false;
9742 /* Get the driver's phba structure from the dev_id */
9743 phba
= (struct lpfc_hba
*)dev_id
;
9745 if (unlikely(!phba
))
9749 * Invokes slow-path host attention interrupt handling as appropriate.
9751 sp_irq_rc
= lpfc_sli4_sp_intr_handler(irq
, dev_id
);
9754 * Invoke fast-path host attention interrupt handling as appropriate.
9756 for (fcp_eqidx
= 0; fcp_eqidx
< phba
->cfg_fcp_eq_count
; fcp_eqidx
++) {
9757 fp_irq_rc
= lpfc_sli4_fp_intr_handler(irq
,
9758 &phba
->sli4_hba
.fcp_eq_hdl
[fcp_eqidx
]);
9759 if (fp_irq_rc
== IRQ_HANDLED
)
9763 return (fp_handled
== true) ? IRQ_HANDLED
: sp_irq_rc
;
9764 } /* lpfc_sli4_intr_handler */
9767 * lpfc_sli4_queue_free - free a queue structure and associated memory
9768 * @queue: The queue structure to free.
9770 * This function frees a queue structure and the DMAable memeory used for
9771 * the host resident queue. This function must be called after destroying the
9775 lpfc_sli4_queue_free(struct lpfc_queue
*queue
)
9777 struct lpfc_dmabuf
*dmabuf
;
9782 while (!list_empty(&queue
->page_list
)) {
9783 list_remove_head(&queue
->page_list
, dmabuf
, struct lpfc_dmabuf
,
9785 dma_free_coherent(&queue
->phba
->pcidev
->dev
, SLI4_PAGE_SIZE
,
9786 dmabuf
->virt
, dmabuf
->phys
);
9794 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
9795 * @phba: The HBA that this queue is being created on.
9796 * @entry_size: The size of each queue entry for this queue.
9797 * @entry count: The number of entries that this queue will handle.
9799 * This function allocates a queue structure and the DMAable memory used for
9800 * the host resident queue. This function must be called before creating the
9804 lpfc_sli4_queue_alloc(struct lpfc_hba
*phba
, uint32_t entry_size
,
9805 uint32_t entry_count
)
9807 struct lpfc_queue
*queue
;
9808 struct lpfc_dmabuf
*dmabuf
;
9809 int x
, total_qe_count
;
9811 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
9813 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
9814 hw_page_size
= SLI4_PAGE_SIZE
;
9816 queue
= kzalloc(sizeof(struct lpfc_queue
) +
9817 (sizeof(union sli4_qe
) * entry_count
), GFP_KERNEL
);
9820 queue
->page_count
= (ALIGN(entry_size
* entry_count
,
9821 hw_page_size
))/hw_page_size
;
9822 INIT_LIST_HEAD(&queue
->list
);
9823 INIT_LIST_HEAD(&queue
->page_list
);
9824 INIT_LIST_HEAD(&queue
->child_list
);
9825 for (x
= 0, total_qe_count
= 0; x
< queue
->page_count
; x
++) {
9826 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
9829 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
9830 hw_page_size
, &dmabuf
->phys
,
9832 if (!dmabuf
->virt
) {
9836 memset(dmabuf
->virt
, 0, hw_page_size
);
9837 dmabuf
->buffer_tag
= x
;
9838 list_add_tail(&dmabuf
->list
, &queue
->page_list
);
9839 /* initialize queue's entry array */
9840 dma_pointer
= dmabuf
->virt
;
9841 for (; total_qe_count
< entry_count
&&
9842 dma_pointer
< (hw_page_size
+ dmabuf
->virt
);
9843 total_qe_count
++, dma_pointer
+= entry_size
) {
9844 queue
->qe
[total_qe_count
].address
= dma_pointer
;
9847 queue
->entry_size
= entry_size
;
9848 queue
->entry_count
= entry_count
;
9853 lpfc_sli4_queue_free(queue
);
9858 * lpfc_eq_create - Create an Event Queue on the HBA
9859 * @phba: HBA structure that indicates port to create a queue on.
9860 * @eq: The queue structure to use to create the event queue.
9861 * @imax: The maximum interrupt per second limit.
9863 * This function creates an event queue, as detailed in @eq, on a port,
9864 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
9866 * The @phba struct is used to send mailbox command to HBA. The @eq struct
9867 * is used to get the entry count and entry size that are necessary to
9868 * determine the number of pages to allocate and use for this queue. This
9869 * function will send the EQ_CREATE mailbox command to the HBA to setup the
9870 * event queue. This function is asynchronous and will wait for the mailbox
9871 * command to finish before continuing.
9873 * On success this function will return a zero. If unable to allocate enough
9874 * memory this function will return -ENOMEM. If the queue create mailbox command
9875 * fails this function will return -ENXIO.
9878 lpfc_eq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
, uint16_t imax
)
9880 struct lpfc_mbx_eq_create
*eq_create
;
9882 int rc
, length
, status
= 0;
9883 struct lpfc_dmabuf
*dmabuf
;
9884 uint32_t shdr_status
, shdr_add_status
;
9885 union lpfc_sli4_cfg_shdr
*shdr
;
9887 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
9889 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
9890 hw_page_size
= SLI4_PAGE_SIZE
;
9892 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
9895 length
= (sizeof(struct lpfc_mbx_eq_create
) -
9896 sizeof(struct lpfc_sli4_cfg_mhdr
));
9897 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
9898 LPFC_MBOX_OPCODE_EQ_CREATE
,
9899 length
, LPFC_SLI4_MBX_EMBED
);
9900 eq_create
= &mbox
->u
.mqe
.un
.eq_create
;
9901 bf_set(lpfc_mbx_eq_create_num_pages
, &eq_create
->u
.request
,
9903 bf_set(lpfc_eq_context_size
, &eq_create
->u
.request
.context
,
9905 bf_set(lpfc_eq_context_valid
, &eq_create
->u
.request
.context
, 1);
9906 /* Calculate delay multiper from maximum interrupt per second */
9907 dmult
= LPFC_DMULT_CONST
/imax
- 1;
9908 bf_set(lpfc_eq_context_delay_multi
, &eq_create
->u
.request
.context
,
9910 switch (eq
->entry_count
) {
9912 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9913 "0360 Unsupported EQ count. (%d)\n",
9915 if (eq
->entry_count
< 256)
9917 /* otherwise default to smallest count (drop through) */
9919 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
9923 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
9927 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
9931 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
9935 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
9939 list_for_each_entry(dmabuf
, &eq
->page_list
, list
) {
9940 memset(dmabuf
->virt
, 0, hw_page_size
);
9941 eq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
9942 putPaddrLow(dmabuf
->phys
);
9943 eq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
9944 putPaddrHigh(dmabuf
->phys
);
9946 mbox
->vport
= phba
->pport
;
9947 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
9948 mbox
->context1
= NULL
;
9949 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
9950 shdr
= (union lpfc_sli4_cfg_shdr
*) &eq_create
->header
.cfg_shdr
;
9951 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
9952 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
9953 if (shdr_status
|| shdr_add_status
|| rc
) {
9954 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9955 "2500 EQ_CREATE mailbox failed with "
9956 "status x%x add_status x%x, mbx status x%x\n",
9957 shdr_status
, shdr_add_status
, rc
);
9961 eq
->subtype
= LPFC_NONE
;
9962 eq
->queue_id
= bf_get(lpfc_mbx_eq_create_q_id
, &eq_create
->u
.response
);
9963 if (eq
->queue_id
== 0xFFFF)
9968 mempool_free(mbox
, phba
->mbox_mem_pool
);
9973 * lpfc_cq_create - Create a Completion Queue on the HBA
9974 * @phba: HBA structure that indicates port to create a queue on.
9975 * @cq: The queue structure to use to create the completion queue.
9976 * @eq: The event queue to bind this completion queue to.
9978 * This function creates a completion queue, as detailed in @wq, on a port,
9979 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
9981 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9982 * is used to get the entry count and entry size that are necessary to
9983 * determine the number of pages to allocate and use for this queue. The @eq
9984 * is used to indicate which event queue to bind this completion queue to. This
9985 * function will send the CQ_CREATE mailbox command to the HBA to setup the
9986 * completion queue. This function is asynchronous and will wait for the mailbox
9987 * command to finish before continuing.
9989 * On success this function will return a zero. If unable to allocate enough
9990 * memory this function will return -ENOMEM. If the queue create mailbox command
9991 * fails this function will return -ENXIO.
9994 lpfc_cq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
9995 struct lpfc_queue
*eq
, uint32_t type
, uint32_t subtype
)
9997 struct lpfc_mbx_cq_create
*cq_create
;
9998 struct lpfc_dmabuf
*dmabuf
;
10000 int rc
, length
, status
= 0;
10001 uint32_t shdr_status
, shdr_add_status
;
10002 union lpfc_sli4_cfg_shdr
*shdr
;
10003 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
10005 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
10006 hw_page_size
= SLI4_PAGE_SIZE
;
10009 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
10012 length
= (sizeof(struct lpfc_mbx_cq_create
) -
10013 sizeof(struct lpfc_sli4_cfg_mhdr
));
10014 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
10015 LPFC_MBOX_OPCODE_CQ_CREATE
,
10016 length
, LPFC_SLI4_MBX_EMBED
);
10017 cq_create
= &mbox
->u
.mqe
.un
.cq_create
;
10018 bf_set(lpfc_mbx_cq_create_num_pages
, &cq_create
->u
.request
,
10020 bf_set(lpfc_cq_context_event
, &cq_create
->u
.request
.context
, 1);
10021 bf_set(lpfc_cq_context_valid
, &cq_create
->u
.request
.context
, 1);
10022 bf_set(lpfc_cq_eq_id
, &cq_create
->u
.request
.context
, eq
->queue_id
);
10023 switch (cq
->entry_count
) {
10025 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10026 "0361 Unsupported CQ count. (%d)\n",
10028 if (cq
->entry_count
< 256)
10030 /* otherwise default to smallest count (drop through) */
10032 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
10036 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
10040 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
10044 list_for_each_entry(dmabuf
, &cq
->page_list
, list
) {
10045 memset(dmabuf
->virt
, 0, hw_page_size
);
10046 cq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
10047 putPaddrLow(dmabuf
->phys
);
10048 cq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
10049 putPaddrHigh(dmabuf
->phys
);
10051 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
10053 /* The IOCTL status is embedded in the mailbox subheader. */
10054 shdr
= (union lpfc_sli4_cfg_shdr
*) &cq_create
->header
.cfg_shdr
;
10055 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10056 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10057 if (shdr_status
|| shdr_add_status
|| rc
) {
10058 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10059 "2501 CQ_CREATE mailbox failed with "
10060 "status x%x add_status x%x, mbx status x%x\n",
10061 shdr_status
, shdr_add_status
, rc
);
10065 cq
->queue_id
= bf_get(lpfc_mbx_cq_create_q_id
, &cq_create
->u
.response
);
10066 if (cq
->queue_id
== 0xFFFF) {
10070 /* link the cq onto the parent eq child list */
10071 list_add_tail(&cq
->list
, &eq
->child_list
);
10072 /* Set up completion queue's type and subtype */
10074 cq
->subtype
= subtype
;
10075 cq
->queue_id
= bf_get(lpfc_mbx_cq_create_q_id
, &cq_create
->u
.response
);
10076 cq
->host_index
= 0;
10080 mempool_free(mbox
, phba
->mbox_mem_pool
);
10085 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
10086 * @phba: HBA structure that indicates port to create a queue on.
10087 * @mq: The queue structure to use to create the mailbox queue.
10088 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
10089 * @cq: The completion queue to associate with this cq.
10091 * This function provides failback (fb) functionality when the
10092 * mq_create_ext fails on older FW generations. It's purpose is identical
10093 * to mq_create_ext otherwise.
10095 * This routine cannot fail as all attributes were previously accessed and
10096 * initialized in mq_create_ext.
10099 lpfc_mq_create_fb_init(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
,
10100 LPFC_MBOXQ_t
*mbox
, struct lpfc_queue
*cq
)
10102 struct lpfc_mbx_mq_create
*mq_create
;
10103 struct lpfc_dmabuf
*dmabuf
;
10106 length
= (sizeof(struct lpfc_mbx_mq_create
) -
10107 sizeof(struct lpfc_sli4_cfg_mhdr
));
10108 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
10109 LPFC_MBOX_OPCODE_MQ_CREATE
,
10110 length
, LPFC_SLI4_MBX_EMBED
);
10111 mq_create
= &mbox
->u
.mqe
.un
.mq_create
;
10112 bf_set(lpfc_mbx_mq_create_num_pages
, &mq_create
->u
.request
,
10114 bf_set(lpfc_mq_context_cq_id
, &mq_create
->u
.request
.context
,
10116 bf_set(lpfc_mq_context_valid
, &mq_create
->u
.request
.context
, 1);
10117 switch (mq
->entry_count
) {
10119 bf_set(lpfc_mq_context_count
, &mq_create
->u
.request
.context
,
10123 bf_set(lpfc_mq_context_count
, &mq_create
->u
.request
.context
,
10127 bf_set(lpfc_mq_context_count
, &mq_create
->u
.request
.context
,
10131 bf_set(lpfc_mq_context_count
, &mq_create
->u
.request
.context
,
10135 list_for_each_entry(dmabuf
, &mq
->page_list
, list
) {
10136 mq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
10137 putPaddrLow(dmabuf
->phys
);
10138 mq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
10139 putPaddrHigh(dmabuf
->phys
);
10144 * lpfc_mq_create - Create a mailbox Queue on the HBA
10145 * @phba: HBA structure that indicates port to create a queue on.
10146 * @mq: The queue structure to use to create the mailbox queue.
10147 * @cq: The completion queue to associate with this cq.
10148 * @subtype: The queue's subtype.
10150 * This function creates a mailbox queue, as detailed in @mq, on a port,
10151 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
10153 * The @phba struct is used to send mailbox command to HBA. The @cq struct
10154 * is used to get the entry count and entry size that are necessary to
10155 * determine the number of pages to allocate and use for this queue. This
10156 * function will send the MQ_CREATE mailbox command to the HBA to setup the
10157 * mailbox queue. This function is asynchronous and will wait for the mailbox
10158 * command to finish before continuing.
10160 * On success this function will return a zero. If unable to allocate enough
10161 * memory this function will return -ENOMEM. If the queue create mailbox command
10162 * fails this function will return -ENXIO.
10165 lpfc_mq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
,
10166 struct lpfc_queue
*cq
, uint32_t subtype
)
10168 struct lpfc_mbx_mq_create
*mq_create
;
10169 struct lpfc_mbx_mq_create_ext
*mq_create_ext
;
10170 struct lpfc_dmabuf
*dmabuf
;
10171 LPFC_MBOXQ_t
*mbox
;
10172 int rc
, length
, status
= 0;
10173 uint32_t shdr_status
, shdr_add_status
;
10174 union lpfc_sli4_cfg_shdr
*shdr
;
10175 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
10177 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
10178 hw_page_size
= SLI4_PAGE_SIZE
;
10180 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
10183 length
= (sizeof(struct lpfc_mbx_mq_create_ext
) -
10184 sizeof(struct lpfc_sli4_cfg_mhdr
));
10185 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
10186 LPFC_MBOX_OPCODE_MQ_CREATE_EXT
,
10187 length
, LPFC_SLI4_MBX_EMBED
);
10189 mq_create_ext
= &mbox
->u
.mqe
.un
.mq_create_ext
;
10190 bf_set(lpfc_mbx_mq_create_ext_num_pages
, &mq_create_ext
->u
.request
,
10192 bf_set(lpfc_mbx_mq_create_ext_async_evt_link
, &mq_create_ext
->u
.request
,
10194 bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste
,
10195 &mq_create_ext
->u
.request
, 1);
10196 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5
,
10197 &mq_create_ext
->u
.request
, 1);
10198 bf_set(lpfc_mq_context_cq_id
, &mq_create_ext
->u
.request
.context
,
10200 bf_set(lpfc_mq_context_valid
, &mq_create_ext
->u
.request
.context
, 1);
10201 switch (mq
->entry_count
) {
10203 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10204 "0362 Unsupported MQ count. (%d)\n",
10206 if (mq
->entry_count
< 16)
10208 /* otherwise default to smallest count (drop through) */
10210 bf_set(lpfc_mq_context_count
, &mq_create_ext
->u
.request
.context
,
10214 bf_set(lpfc_mq_context_count
, &mq_create_ext
->u
.request
.context
,
10218 bf_set(lpfc_mq_context_count
, &mq_create_ext
->u
.request
.context
,
10222 bf_set(lpfc_mq_context_count
, &mq_create_ext
->u
.request
.context
,
10226 list_for_each_entry(dmabuf
, &mq
->page_list
, list
) {
10227 memset(dmabuf
->virt
, 0, hw_page_size
);
10228 mq_create_ext
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
10229 putPaddrLow(dmabuf
->phys
);
10230 mq_create_ext
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
10231 putPaddrHigh(dmabuf
->phys
);
10233 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
10234 shdr
= (union lpfc_sli4_cfg_shdr
*) &mq_create_ext
->header
.cfg_shdr
;
10235 mq
->queue_id
= bf_get(lpfc_mbx_mq_create_q_id
,
10236 &mq_create_ext
->u
.response
);
10237 if (rc
!= MBX_SUCCESS
) {
10238 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
10239 "2795 MQ_CREATE_EXT failed with "
10240 "status x%x. Failback to MQ_CREATE.\n",
10242 lpfc_mq_create_fb_init(phba
, mq
, mbox
, cq
);
10243 mq_create
= &mbox
->u
.mqe
.un
.mq_create
;
10244 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
10245 shdr
= (union lpfc_sli4_cfg_shdr
*) &mq_create
->header
.cfg_shdr
;
10246 mq
->queue_id
= bf_get(lpfc_mbx_mq_create_q_id
,
10247 &mq_create
->u
.response
);
10250 /* The IOCTL status is embedded in the mailbox subheader. */
10251 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10252 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10253 if (shdr_status
|| shdr_add_status
|| rc
) {
10254 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10255 "2502 MQ_CREATE mailbox failed with "
10256 "status x%x add_status x%x, mbx status x%x\n",
10257 shdr_status
, shdr_add_status
, rc
);
10261 if (mq
->queue_id
== 0xFFFF) {
10265 mq
->type
= LPFC_MQ
;
10266 mq
->subtype
= subtype
;
10267 mq
->host_index
= 0;
10270 /* link the mq onto the parent cq child list */
10271 list_add_tail(&mq
->list
, &cq
->child_list
);
10273 mempool_free(mbox
, phba
->mbox_mem_pool
);
10278 * lpfc_wq_create - Create a Work Queue on the HBA
10279 * @phba: HBA structure that indicates port to create a queue on.
10280 * @wq: The queue structure to use to create the work queue.
10281 * @cq: The completion queue to bind this work queue to.
10282 * @subtype: The subtype of the work queue indicating its functionality.
10284 * This function creates a work queue, as detailed in @wq, on a port, described
10285 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
10287 * The @phba struct is used to send mailbox command to HBA. The @wq struct
10288 * is used to get the entry count and entry size that are necessary to
10289 * determine the number of pages to allocate and use for this queue. The @cq
10290 * is used to indicate which completion queue to bind this work queue to. This
10291 * function will send the WQ_CREATE mailbox command to the HBA to setup the
10292 * work queue. This function is asynchronous and will wait for the mailbox
10293 * command to finish before continuing.
10295 * On success this function will return a zero. If unable to allocate enough
10296 * memory this function will return -ENOMEM. If the queue create mailbox command
10297 * fails this function will return -ENXIO.
10300 lpfc_wq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
,
10301 struct lpfc_queue
*cq
, uint32_t subtype
)
10303 struct lpfc_mbx_wq_create
*wq_create
;
10304 struct lpfc_dmabuf
*dmabuf
;
10305 LPFC_MBOXQ_t
*mbox
;
10306 int rc
, length
, status
= 0;
10307 uint32_t shdr_status
, shdr_add_status
;
10308 union lpfc_sli4_cfg_shdr
*shdr
;
10309 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
10311 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
10312 hw_page_size
= SLI4_PAGE_SIZE
;
10314 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
10317 length
= (sizeof(struct lpfc_mbx_wq_create
) -
10318 sizeof(struct lpfc_sli4_cfg_mhdr
));
10319 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
10320 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE
,
10321 length
, LPFC_SLI4_MBX_EMBED
);
10322 wq_create
= &mbox
->u
.mqe
.un
.wq_create
;
10323 bf_set(lpfc_mbx_wq_create_num_pages
, &wq_create
->u
.request
,
10325 bf_set(lpfc_mbx_wq_create_cq_id
, &wq_create
->u
.request
,
10327 list_for_each_entry(dmabuf
, &wq
->page_list
, list
) {
10328 memset(dmabuf
->virt
, 0, hw_page_size
);
10329 wq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
10330 putPaddrLow(dmabuf
->phys
);
10331 wq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
10332 putPaddrHigh(dmabuf
->phys
);
10334 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
10335 /* The IOCTL status is embedded in the mailbox subheader. */
10336 shdr
= (union lpfc_sli4_cfg_shdr
*) &wq_create
->header
.cfg_shdr
;
10337 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10338 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10339 if (shdr_status
|| shdr_add_status
|| rc
) {
10340 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10341 "2503 WQ_CREATE mailbox failed with "
10342 "status x%x add_status x%x, mbx status x%x\n",
10343 shdr_status
, shdr_add_status
, rc
);
10347 wq
->queue_id
= bf_get(lpfc_mbx_wq_create_q_id
, &wq_create
->u
.response
);
10348 if (wq
->queue_id
== 0xFFFF) {
10352 wq
->type
= LPFC_WQ
;
10353 wq
->subtype
= subtype
;
10354 wq
->host_index
= 0;
10357 /* link the wq onto the parent cq child list */
10358 list_add_tail(&wq
->list
, &cq
->child_list
);
10360 mempool_free(mbox
, phba
->mbox_mem_pool
);
10365 * lpfc_rq_create - Create a Receive Queue on the HBA
10366 * @phba: HBA structure that indicates port to create a queue on.
10367 * @hrq: The queue structure to use to create the header receive queue.
10368 * @drq: The queue structure to use to create the data receive queue.
10369 * @cq: The completion queue to bind this work queue to.
10371 * This function creates a receive buffer queue pair , as detailed in @hrq and
10372 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
10375 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
10376 * struct is used to get the entry count that is necessary to determine the
10377 * number of pages to use for this queue. The @cq is used to indicate which
10378 * completion queue to bind received buffers that are posted to these queues to.
10379 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
10380 * receive queue pair. This function is asynchronous and will wait for the
10381 * mailbox command to finish before continuing.
10383 * On success this function will return a zero. If unable to allocate enough
10384 * memory this function will return -ENOMEM. If the queue create mailbox command
10385 * fails this function will return -ENXIO.
10388 lpfc_rq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
10389 struct lpfc_queue
*drq
, struct lpfc_queue
*cq
, uint32_t subtype
)
10391 struct lpfc_mbx_rq_create
*rq_create
;
10392 struct lpfc_dmabuf
*dmabuf
;
10393 LPFC_MBOXQ_t
*mbox
;
10394 int rc
, length
, status
= 0;
10395 uint32_t shdr_status
, shdr_add_status
;
10396 union lpfc_sli4_cfg_shdr
*shdr
;
10397 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
10399 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
10400 hw_page_size
= SLI4_PAGE_SIZE
;
10402 if (hrq
->entry_count
!= drq
->entry_count
)
10404 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
10407 length
= (sizeof(struct lpfc_mbx_rq_create
) -
10408 sizeof(struct lpfc_sli4_cfg_mhdr
));
10409 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
10410 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
,
10411 length
, LPFC_SLI4_MBX_EMBED
);
10412 rq_create
= &mbox
->u
.mqe
.un
.rq_create
;
10413 switch (hrq
->entry_count
) {
10415 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10416 "2535 Unsupported RQ count. (%d)\n",
10418 if (hrq
->entry_count
< 512)
10420 /* otherwise default to smallest count (drop through) */
10422 bf_set(lpfc_rq_context_rq_size
, &rq_create
->u
.request
.context
,
10423 LPFC_RQ_RING_SIZE_512
);
10426 bf_set(lpfc_rq_context_rq_size
, &rq_create
->u
.request
.context
,
10427 LPFC_RQ_RING_SIZE_1024
);
10430 bf_set(lpfc_rq_context_rq_size
, &rq_create
->u
.request
.context
,
10431 LPFC_RQ_RING_SIZE_2048
);
10434 bf_set(lpfc_rq_context_rq_size
, &rq_create
->u
.request
.context
,
10435 LPFC_RQ_RING_SIZE_4096
);
10438 bf_set(lpfc_rq_context_cq_id
, &rq_create
->u
.request
.context
,
10440 bf_set(lpfc_mbx_rq_create_num_pages
, &rq_create
->u
.request
,
10442 bf_set(lpfc_rq_context_buf_size
, &rq_create
->u
.request
.context
,
10443 LPFC_HDR_BUF_SIZE
);
10444 list_for_each_entry(dmabuf
, &hrq
->page_list
, list
) {
10445 memset(dmabuf
->virt
, 0, hw_page_size
);
10446 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
10447 putPaddrLow(dmabuf
->phys
);
10448 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
10449 putPaddrHigh(dmabuf
->phys
);
10451 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
10452 /* The IOCTL status is embedded in the mailbox subheader. */
10453 shdr
= (union lpfc_sli4_cfg_shdr
*) &rq_create
->header
.cfg_shdr
;
10454 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10455 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10456 if (shdr_status
|| shdr_add_status
|| rc
) {
10457 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10458 "2504 RQ_CREATE mailbox failed with "
10459 "status x%x add_status x%x, mbx status x%x\n",
10460 shdr_status
, shdr_add_status
, rc
);
10464 hrq
->queue_id
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
10465 if (hrq
->queue_id
== 0xFFFF) {
10469 hrq
->type
= LPFC_HRQ
;
10470 hrq
->subtype
= subtype
;
10471 hrq
->host_index
= 0;
10472 hrq
->hba_index
= 0;
10474 /* now create the data queue */
10475 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
10476 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
,
10477 length
, LPFC_SLI4_MBX_EMBED
);
10478 switch (drq
->entry_count
) {
10480 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10481 "2536 Unsupported RQ count. (%d)\n",
10483 if (drq
->entry_count
< 512)
10485 /* otherwise default to smallest count (drop through) */
10487 bf_set(lpfc_rq_context_rq_size
, &rq_create
->u
.request
.context
,
10488 LPFC_RQ_RING_SIZE_512
);
10491 bf_set(lpfc_rq_context_rq_size
, &rq_create
->u
.request
.context
,
10492 LPFC_RQ_RING_SIZE_1024
);
10495 bf_set(lpfc_rq_context_rq_size
, &rq_create
->u
.request
.context
,
10496 LPFC_RQ_RING_SIZE_2048
);
10499 bf_set(lpfc_rq_context_rq_size
, &rq_create
->u
.request
.context
,
10500 LPFC_RQ_RING_SIZE_4096
);
10503 bf_set(lpfc_rq_context_cq_id
, &rq_create
->u
.request
.context
,
10505 bf_set(lpfc_mbx_rq_create_num_pages
, &rq_create
->u
.request
,
10507 bf_set(lpfc_rq_context_buf_size
, &rq_create
->u
.request
.context
,
10508 LPFC_DATA_BUF_SIZE
);
10509 list_for_each_entry(dmabuf
, &drq
->page_list
, list
) {
10510 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
10511 putPaddrLow(dmabuf
->phys
);
10512 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
10513 putPaddrHigh(dmabuf
->phys
);
10515 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
10516 /* The IOCTL status is embedded in the mailbox subheader. */
10517 shdr
= (union lpfc_sli4_cfg_shdr
*) &rq_create
->header
.cfg_shdr
;
10518 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10519 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10520 if (shdr_status
|| shdr_add_status
|| rc
) {
10524 drq
->queue_id
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
10525 if (drq
->queue_id
== 0xFFFF) {
10529 drq
->type
= LPFC_DRQ
;
10530 drq
->subtype
= subtype
;
10531 drq
->host_index
= 0;
10532 drq
->hba_index
= 0;
10534 /* link the header and data RQs onto the parent cq child list */
10535 list_add_tail(&hrq
->list
, &cq
->child_list
);
10536 list_add_tail(&drq
->list
, &cq
->child_list
);
10539 mempool_free(mbox
, phba
->mbox_mem_pool
);
10544 * lpfc_eq_destroy - Destroy an event Queue on the HBA
10545 * @eq: The queue structure associated with the queue to destroy.
10547 * This function destroys a queue, as detailed in @eq by sending an mailbox
10548 * command, specific to the type of queue, to the HBA.
10550 * The @eq struct is used to get the queue ID of the queue to destroy.
10552 * On success this function will return a zero. If the queue destroy mailbox
10553 * command fails this function will return -ENXIO.
10556 lpfc_eq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
)
10558 LPFC_MBOXQ_t
*mbox
;
10559 int rc
, length
, status
= 0;
10560 uint32_t shdr_status
, shdr_add_status
;
10561 union lpfc_sli4_cfg_shdr
*shdr
;
10565 mbox
= mempool_alloc(eq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
10568 length
= (sizeof(struct lpfc_mbx_eq_destroy
) -
10569 sizeof(struct lpfc_sli4_cfg_mhdr
));
10570 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
10571 LPFC_MBOX_OPCODE_EQ_DESTROY
,
10572 length
, LPFC_SLI4_MBX_EMBED
);
10573 bf_set(lpfc_mbx_eq_destroy_q_id
, &mbox
->u
.mqe
.un
.eq_destroy
.u
.request
,
10575 mbox
->vport
= eq
->phba
->pport
;
10576 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
10578 rc
= lpfc_sli_issue_mbox(eq
->phba
, mbox
, MBX_POLL
);
10579 /* The IOCTL status is embedded in the mailbox subheader. */
10580 shdr
= (union lpfc_sli4_cfg_shdr
*)
10581 &mbox
->u
.mqe
.un
.eq_destroy
.header
.cfg_shdr
;
10582 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10583 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10584 if (shdr_status
|| shdr_add_status
|| rc
) {
10585 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10586 "2505 EQ_DESTROY mailbox failed with "
10587 "status x%x add_status x%x, mbx status x%x\n",
10588 shdr_status
, shdr_add_status
, rc
);
10592 /* Remove eq from any list */
10593 list_del_init(&eq
->list
);
10594 mempool_free(mbox
, eq
->phba
->mbox_mem_pool
);
10599 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
10600 * @cq: The queue structure associated with the queue to destroy.
10602 * This function destroys a queue, as detailed in @cq by sending an mailbox
10603 * command, specific to the type of queue, to the HBA.
10605 * The @cq struct is used to get the queue ID of the queue to destroy.
10607 * On success this function will return a zero. If the queue destroy mailbox
10608 * command fails this function will return -ENXIO.
10611 lpfc_cq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
)
10613 LPFC_MBOXQ_t
*mbox
;
10614 int rc
, length
, status
= 0;
10615 uint32_t shdr_status
, shdr_add_status
;
10616 union lpfc_sli4_cfg_shdr
*shdr
;
10620 mbox
= mempool_alloc(cq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
10623 length
= (sizeof(struct lpfc_mbx_cq_destroy
) -
10624 sizeof(struct lpfc_sli4_cfg_mhdr
));
10625 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
10626 LPFC_MBOX_OPCODE_CQ_DESTROY
,
10627 length
, LPFC_SLI4_MBX_EMBED
);
10628 bf_set(lpfc_mbx_cq_destroy_q_id
, &mbox
->u
.mqe
.un
.cq_destroy
.u
.request
,
10630 mbox
->vport
= cq
->phba
->pport
;
10631 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
10632 rc
= lpfc_sli_issue_mbox(cq
->phba
, mbox
, MBX_POLL
);
10633 /* The IOCTL status is embedded in the mailbox subheader. */
10634 shdr
= (union lpfc_sli4_cfg_shdr
*)
10635 &mbox
->u
.mqe
.un
.wq_create
.header
.cfg_shdr
;
10636 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10637 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10638 if (shdr_status
|| shdr_add_status
|| rc
) {
10639 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10640 "2506 CQ_DESTROY mailbox failed with "
10641 "status x%x add_status x%x, mbx status x%x\n",
10642 shdr_status
, shdr_add_status
, rc
);
10645 /* Remove cq from any list */
10646 list_del_init(&cq
->list
);
10647 mempool_free(mbox
, cq
->phba
->mbox_mem_pool
);
10652 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
10653 * @qm: The queue structure associated with the queue to destroy.
10655 * This function destroys a queue, as detailed in @mq by sending an mailbox
10656 * command, specific to the type of queue, to the HBA.
10658 * The @mq struct is used to get the queue ID of the queue to destroy.
10660 * On success this function will return a zero. If the queue destroy mailbox
10661 * command fails this function will return -ENXIO.
10664 lpfc_mq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
)
10666 LPFC_MBOXQ_t
*mbox
;
10667 int rc
, length
, status
= 0;
10668 uint32_t shdr_status
, shdr_add_status
;
10669 union lpfc_sli4_cfg_shdr
*shdr
;
10673 mbox
= mempool_alloc(mq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
10676 length
= (sizeof(struct lpfc_mbx_mq_destroy
) -
10677 sizeof(struct lpfc_sli4_cfg_mhdr
));
10678 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
10679 LPFC_MBOX_OPCODE_MQ_DESTROY
,
10680 length
, LPFC_SLI4_MBX_EMBED
);
10681 bf_set(lpfc_mbx_mq_destroy_q_id
, &mbox
->u
.mqe
.un
.mq_destroy
.u
.request
,
10683 mbox
->vport
= mq
->phba
->pport
;
10684 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
10685 rc
= lpfc_sli_issue_mbox(mq
->phba
, mbox
, MBX_POLL
);
10686 /* The IOCTL status is embedded in the mailbox subheader. */
10687 shdr
= (union lpfc_sli4_cfg_shdr
*)
10688 &mbox
->u
.mqe
.un
.mq_destroy
.header
.cfg_shdr
;
10689 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10690 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10691 if (shdr_status
|| shdr_add_status
|| rc
) {
10692 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10693 "2507 MQ_DESTROY mailbox failed with "
10694 "status x%x add_status x%x, mbx status x%x\n",
10695 shdr_status
, shdr_add_status
, rc
);
10698 /* Remove mq from any list */
10699 list_del_init(&mq
->list
);
10700 mempool_free(mbox
, mq
->phba
->mbox_mem_pool
);
10705 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
10706 * @wq: The queue structure associated with the queue to destroy.
10708 * This function destroys a queue, as detailed in @wq by sending an mailbox
10709 * command, specific to the type of queue, to the HBA.
10711 * The @wq struct is used to get the queue ID of the queue to destroy.
10713 * On success this function will return a zero. If the queue destroy mailbox
10714 * command fails this function will return -ENXIO.
10717 lpfc_wq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
)
10719 LPFC_MBOXQ_t
*mbox
;
10720 int rc
, length
, status
= 0;
10721 uint32_t shdr_status
, shdr_add_status
;
10722 union lpfc_sli4_cfg_shdr
*shdr
;
10726 mbox
= mempool_alloc(wq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
10729 length
= (sizeof(struct lpfc_mbx_wq_destroy
) -
10730 sizeof(struct lpfc_sli4_cfg_mhdr
));
10731 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
10732 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY
,
10733 length
, LPFC_SLI4_MBX_EMBED
);
10734 bf_set(lpfc_mbx_wq_destroy_q_id
, &mbox
->u
.mqe
.un
.wq_destroy
.u
.request
,
10736 mbox
->vport
= wq
->phba
->pport
;
10737 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
10738 rc
= lpfc_sli_issue_mbox(wq
->phba
, mbox
, MBX_POLL
);
10739 shdr
= (union lpfc_sli4_cfg_shdr
*)
10740 &mbox
->u
.mqe
.un
.wq_destroy
.header
.cfg_shdr
;
10741 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10742 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10743 if (shdr_status
|| shdr_add_status
|| rc
) {
10744 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10745 "2508 WQ_DESTROY mailbox failed with "
10746 "status x%x add_status x%x, mbx status x%x\n",
10747 shdr_status
, shdr_add_status
, rc
);
10750 /* Remove wq from any list */
10751 list_del_init(&wq
->list
);
10752 mempool_free(mbox
, wq
->phba
->mbox_mem_pool
);
10757 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
10758 * @rq: The queue structure associated with the queue to destroy.
10760 * This function destroys a queue, as detailed in @rq by sending an mailbox
10761 * command, specific to the type of queue, to the HBA.
10763 * The @rq struct is used to get the queue ID of the queue to destroy.
10765 * On success this function will return a zero. If the queue destroy mailbox
10766 * command fails this function will return -ENXIO.
10769 lpfc_rq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
10770 struct lpfc_queue
*drq
)
10772 LPFC_MBOXQ_t
*mbox
;
10773 int rc
, length
, status
= 0;
10774 uint32_t shdr_status
, shdr_add_status
;
10775 union lpfc_sli4_cfg_shdr
*shdr
;
10779 mbox
= mempool_alloc(hrq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
10782 length
= (sizeof(struct lpfc_mbx_rq_destroy
) -
10783 sizeof(struct mbox_header
));
10784 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
10785 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY
,
10786 length
, LPFC_SLI4_MBX_EMBED
);
10787 bf_set(lpfc_mbx_rq_destroy_q_id
, &mbox
->u
.mqe
.un
.rq_destroy
.u
.request
,
10789 mbox
->vport
= hrq
->phba
->pport
;
10790 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
10791 rc
= lpfc_sli_issue_mbox(hrq
->phba
, mbox
, MBX_POLL
);
10792 /* The IOCTL status is embedded in the mailbox subheader. */
10793 shdr
= (union lpfc_sli4_cfg_shdr
*)
10794 &mbox
->u
.mqe
.un
.rq_destroy
.header
.cfg_shdr
;
10795 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10796 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10797 if (shdr_status
|| shdr_add_status
|| rc
) {
10798 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10799 "2509 RQ_DESTROY mailbox failed with "
10800 "status x%x add_status x%x, mbx status x%x\n",
10801 shdr_status
, shdr_add_status
, rc
);
10802 if (rc
!= MBX_TIMEOUT
)
10803 mempool_free(mbox
, hrq
->phba
->mbox_mem_pool
);
10806 bf_set(lpfc_mbx_rq_destroy_q_id
, &mbox
->u
.mqe
.un
.rq_destroy
.u
.request
,
10808 rc
= lpfc_sli_issue_mbox(drq
->phba
, mbox
, MBX_POLL
);
10809 shdr
= (union lpfc_sli4_cfg_shdr
*)
10810 &mbox
->u
.mqe
.un
.rq_destroy
.header
.cfg_shdr
;
10811 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10812 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10813 if (shdr_status
|| shdr_add_status
|| rc
) {
10814 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10815 "2510 RQ_DESTROY mailbox failed with "
10816 "status x%x add_status x%x, mbx status x%x\n",
10817 shdr_status
, shdr_add_status
, rc
);
10820 list_del_init(&hrq
->list
);
10821 list_del_init(&drq
->list
);
10822 mempool_free(mbox
, hrq
->phba
->mbox_mem_pool
);
10827 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
10828 * @phba: The virtual port for which this call being executed.
10829 * @pdma_phys_addr0: Physical address of the 1st SGL page.
10830 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
10831 * @xritag: the xritag that ties this io to the SGL pages.
10833 * This routine will post the sgl pages for the IO that has the xritag
10834 * that is in the iocbq structure. The xritag is assigned during iocbq
10835 * creation and persists for as long as the driver is loaded.
10836 * if the caller has fewer than 256 scatter gather segments to map then
10837 * pdma_phys_addr1 should be 0.
10838 * If the caller needs to map more than 256 scatter gather segment then
10839 * pdma_phys_addr1 should be a valid physical address.
10840 * physical address for SGLs must be 64 byte aligned.
10841 * If you are going to map 2 SGL's then the first one must have 256 entries
10842 * the second sgl can have between 1 and 256 entries.
10846 * -ENXIO, -ENOMEM - Failure
10849 lpfc_sli4_post_sgl(struct lpfc_hba
*phba
,
10850 dma_addr_t pdma_phys_addr0
,
10851 dma_addr_t pdma_phys_addr1
,
10854 struct lpfc_mbx_post_sgl_pages
*post_sgl_pages
;
10855 LPFC_MBOXQ_t
*mbox
;
10857 uint32_t shdr_status
, shdr_add_status
;
10858 union lpfc_sli4_cfg_shdr
*shdr
;
10860 if (xritag
== NO_XRI
) {
10861 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10862 "0364 Invalid param:\n");
10866 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
10870 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
10871 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
,
10872 sizeof(struct lpfc_mbx_post_sgl_pages
) -
10873 sizeof(struct mbox_header
), LPFC_SLI4_MBX_EMBED
);
10875 post_sgl_pages
= (struct lpfc_mbx_post_sgl_pages
*)
10876 &mbox
->u
.mqe
.un
.post_sgl_pages
;
10877 bf_set(lpfc_post_sgl_pages_xri
, post_sgl_pages
, xritag
);
10878 bf_set(lpfc_post_sgl_pages_xricnt
, post_sgl_pages
, 1);
10880 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg0_addr_lo
=
10881 cpu_to_le32(putPaddrLow(pdma_phys_addr0
));
10882 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg0_addr_hi
=
10883 cpu_to_le32(putPaddrHigh(pdma_phys_addr0
));
10885 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg1_addr_lo
=
10886 cpu_to_le32(putPaddrLow(pdma_phys_addr1
));
10887 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg1_addr_hi
=
10888 cpu_to_le32(putPaddrHigh(pdma_phys_addr1
));
10889 if (!phba
->sli4_hba
.intr_enable
)
10890 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
10892 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
10893 /* The IOCTL status is embedded in the mailbox subheader. */
10894 shdr
= (union lpfc_sli4_cfg_shdr
*) &post_sgl_pages
->header
.cfg_shdr
;
10895 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10896 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10897 if (rc
!= MBX_TIMEOUT
)
10898 mempool_free(mbox
, phba
->mbox_mem_pool
);
10899 if (shdr_status
|| shdr_add_status
|| rc
) {
10900 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10901 "2511 POST_SGL mailbox failed with "
10902 "status x%x add_status x%x, mbx status x%x\n",
10903 shdr_status
, shdr_add_status
, rc
);
10910 * lpfc_sli4_next_xritag - Get an xritag for the io
10911 * @phba: Pointer to HBA context object.
10913 * This function gets an xritag for the iocb. If there is no unused xritag
10914 * it will return 0xffff.
10915 * The function returns the allocated xritag if successful, else returns zero.
10916 * Zero is not a valid xritag.
10917 * The caller is not required to hold any lock.
10920 lpfc_sli4_next_xritag(struct lpfc_hba
*phba
)
10924 spin_lock_irq(&phba
->hbalock
);
10925 xritag
= phba
->sli4_hba
.next_xri
;
10926 if ((xritag
!= (uint16_t) -1) && xritag
<
10927 (phba
->sli4_hba
.max_cfg_param
.max_xri
10928 + phba
->sli4_hba
.max_cfg_param
.xri_base
)) {
10929 phba
->sli4_hba
.next_xri
++;
10930 phba
->sli4_hba
.max_cfg_param
.xri_used
++;
10931 spin_unlock_irq(&phba
->hbalock
);
10934 spin_unlock_irq(&phba
->hbalock
);
10935 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
10936 "2004 Failed to allocate XRI.last XRITAG is %d"
10937 " Max XRI is %d, Used XRI is %d\n",
10938 phba
->sli4_hba
.next_xri
,
10939 phba
->sli4_hba
.max_cfg_param
.max_xri
,
10940 phba
->sli4_hba
.max_cfg_param
.xri_used
);
10945 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
10946 * @phba: pointer to lpfc hba data structure.
10948 * This routine is invoked to post a block of driver's sgl pages to the
10949 * HBA using non-embedded mailbox command. No Lock is held. This routine
10950 * is only called when the driver is loading and after all IO has been
10954 lpfc_sli4_post_sgl_list(struct lpfc_hba
*phba
)
10956 struct lpfc_sglq
*sglq_entry
;
10957 struct lpfc_mbx_post_uembed_sgl_page1
*sgl
;
10958 struct sgl_page_pairs
*sgl_pg_pairs
;
10960 LPFC_MBOXQ_t
*mbox
;
10961 uint32_t reqlen
, alloclen
, pg_pairs
;
10963 uint16_t xritag_start
= 0;
10964 int els_xri_cnt
, rc
= 0;
10965 uint32_t shdr_status
, shdr_add_status
;
10966 union lpfc_sli4_cfg_shdr
*shdr
;
10968 /* The number of sgls to be posted */
10969 els_xri_cnt
= lpfc_sli4_get_els_iocb_cnt(phba
);
10971 reqlen
= els_xri_cnt
* sizeof(struct sgl_page_pairs
) +
10972 sizeof(union lpfc_sli4_cfg_shdr
) + sizeof(uint32_t);
10973 if (reqlen
> SLI4_PAGE_SIZE
) {
10974 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
10975 "2559 Block sgl registration required DMA "
10976 "size (%d) great than a page\n", reqlen
);
10979 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
10981 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10982 "2560 Failed to allocate mbox cmd memory\n");
10986 /* Allocate DMA memory and set up the non-embedded mailbox command */
10987 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
10988 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
, reqlen
,
10989 LPFC_SLI4_MBX_NEMBED
);
10991 if (alloclen
< reqlen
) {
10992 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10993 "0285 Allocated DMA memory size (%d) is "
10994 "less than the requested DMA memory "
10995 "size (%d)\n", alloclen
, reqlen
);
10996 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
10999 /* Get the first SGE entry from the non-embedded DMA memory */
11000 viraddr
= mbox
->sge_array
->addr
[0];
11002 /* Set up the SGL pages in the non-embedded DMA pages */
11003 sgl
= (struct lpfc_mbx_post_uembed_sgl_page1
*)viraddr
;
11004 sgl_pg_pairs
= &sgl
->sgl_pg_pairs
;
11006 for (pg_pairs
= 0; pg_pairs
< els_xri_cnt
; pg_pairs
++) {
11007 sglq_entry
= phba
->sli4_hba
.lpfc_els_sgl_array
[pg_pairs
];
11008 /* Set up the sge entry */
11009 sgl_pg_pairs
->sgl_pg0_addr_lo
=
11010 cpu_to_le32(putPaddrLow(sglq_entry
->phys
));
11011 sgl_pg_pairs
->sgl_pg0_addr_hi
=
11012 cpu_to_le32(putPaddrHigh(sglq_entry
->phys
));
11013 sgl_pg_pairs
->sgl_pg1_addr_lo
=
11014 cpu_to_le32(putPaddrLow(0));
11015 sgl_pg_pairs
->sgl_pg1_addr_hi
=
11016 cpu_to_le32(putPaddrHigh(0));
11017 /* Keep the first xritag on the list */
11019 xritag_start
= sglq_entry
->sli4_xritag
;
11022 bf_set(lpfc_post_sgl_pages_xri
, sgl
, xritag_start
);
11023 bf_set(lpfc_post_sgl_pages_xricnt
, sgl
, els_xri_cnt
);
11024 /* Perform endian conversion if necessary */
11025 sgl
->word0
= cpu_to_le32(sgl
->word0
);
11027 if (!phba
->sli4_hba
.intr_enable
)
11028 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
11030 mbox_tmo
= lpfc_mbox_tmo_val(phba
, MBX_SLI4_CONFIG
);
11031 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
11033 shdr
= (union lpfc_sli4_cfg_shdr
*) &sgl
->cfg_shdr
;
11034 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11035 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11036 if (rc
!= MBX_TIMEOUT
)
11037 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
11038 if (shdr_status
|| shdr_add_status
|| rc
) {
11039 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11040 "2513 POST_SGL_BLOCK mailbox command failed "
11041 "status x%x add_status x%x mbx status x%x\n",
11042 shdr_status
, shdr_add_status
, rc
);
11049 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
11050 * @phba: pointer to lpfc hba data structure.
11051 * @sblist: pointer to scsi buffer list.
11052 * @count: number of scsi buffers on the list.
11054 * This routine is invoked to post a block of @count scsi sgl pages from a
11055 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
11060 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba
*phba
, struct list_head
*sblist
,
11063 struct lpfc_scsi_buf
*psb
;
11064 struct lpfc_mbx_post_uembed_sgl_page1
*sgl
;
11065 struct sgl_page_pairs
*sgl_pg_pairs
;
11067 LPFC_MBOXQ_t
*mbox
;
11068 uint32_t reqlen
, alloclen
, pg_pairs
;
11070 uint16_t xritag_start
= 0;
11072 uint32_t shdr_status
, shdr_add_status
;
11073 dma_addr_t pdma_phys_bpl1
;
11074 union lpfc_sli4_cfg_shdr
*shdr
;
11076 /* Calculate the requested length of the dma memory */
11077 reqlen
= cnt
* sizeof(struct sgl_page_pairs
) +
11078 sizeof(union lpfc_sli4_cfg_shdr
) + sizeof(uint32_t);
11079 if (reqlen
> SLI4_PAGE_SIZE
) {
11080 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
11081 "0217 Block sgl registration required DMA "
11082 "size (%d) great than a page\n", reqlen
);
11085 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
11087 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11088 "0283 Failed to allocate mbox cmd memory\n");
11092 /* Allocate DMA memory and set up the non-embedded mailbox command */
11093 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
11094 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
, reqlen
,
11095 LPFC_SLI4_MBX_NEMBED
);
11097 if (alloclen
< reqlen
) {
11098 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11099 "2561 Allocated DMA memory size (%d) is "
11100 "less than the requested DMA memory "
11101 "size (%d)\n", alloclen
, reqlen
);
11102 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
11105 /* Get the first SGE entry from the non-embedded DMA memory */
11106 viraddr
= mbox
->sge_array
->addr
[0];
11108 /* Set up the SGL pages in the non-embedded DMA pages */
11109 sgl
= (struct lpfc_mbx_post_uembed_sgl_page1
*)viraddr
;
11110 sgl_pg_pairs
= &sgl
->sgl_pg_pairs
;
11113 list_for_each_entry(psb
, sblist
, list
) {
11114 /* Set up the sge entry */
11115 sgl_pg_pairs
->sgl_pg0_addr_lo
=
11116 cpu_to_le32(putPaddrLow(psb
->dma_phys_bpl
));
11117 sgl_pg_pairs
->sgl_pg0_addr_hi
=
11118 cpu_to_le32(putPaddrHigh(psb
->dma_phys_bpl
));
11119 if (phba
->cfg_sg_dma_buf_size
> SGL_PAGE_SIZE
)
11120 pdma_phys_bpl1
= psb
->dma_phys_bpl
+ SGL_PAGE_SIZE
;
11122 pdma_phys_bpl1
= 0;
11123 sgl_pg_pairs
->sgl_pg1_addr_lo
=
11124 cpu_to_le32(putPaddrLow(pdma_phys_bpl1
));
11125 sgl_pg_pairs
->sgl_pg1_addr_hi
=
11126 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1
));
11127 /* Keep the first xritag on the list */
11129 xritag_start
= psb
->cur_iocbq
.sli4_xritag
;
11133 bf_set(lpfc_post_sgl_pages_xri
, sgl
, xritag_start
);
11134 bf_set(lpfc_post_sgl_pages_xricnt
, sgl
, pg_pairs
);
11135 /* Perform endian conversion if necessary */
11136 sgl
->word0
= cpu_to_le32(sgl
->word0
);
11138 if (!phba
->sli4_hba
.intr_enable
)
11139 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
11141 mbox_tmo
= lpfc_mbox_tmo_val(phba
, MBX_SLI4_CONFIG
);
11142 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
11144 shdr
= (union lpfc_sli4_cfg_shdr
*) &sgl
->cfg_shdr
;
11145 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11146 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11147 if (rc
!= MBX_TIMEOUT
)
11148 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
11149 if (shdr_status
|| shdr_add_status
|| rc
) {
11150 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11151 "2564 POST_SGL_BLOCK mailbox command failed "
11152 "status x%x add_status x%x mbx status x%x\n",
11153 shdr_status
, shdr_add_status
, rc
);
11160 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
11161 * @phba: pointer to lpfc_hba struct that the frame was received on
11162 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
11164 * This function checks the fields in the @fc_hdr to see if the FC frame is a
11165 * valid type of frame that the LPFC driver will handle. This function will
11166 * return a zero if the frame is a valid frame or a non zero value when the
11167 * frame does not pass the check.
11170 lpfc_fc_frame_check(struct lpfc_hba
*phba
, struct fc_frame_header
*fc_hdr
)
11172 char *rctl_names
[] = FC_RCTL_NAMES_INIT
;
11173 char *type_names
[] = FC_TYPE_NAMES_INIT
;
11174 struct fc_vft_header
*fc_vft_hdr
;
11176 switch (fc_hdr
->fh_r_ctl
) {
11177 case FC_RCTL_DD_UNCAT
: /* uncategorized information */
11178 case FC_RCTL_DD_SOL_DATA
: /* solicited data */
11179 case FC_RCTL_DD_UNSOL_CTL
: /* unsolicited control */
11180 case FC_RCTL_DD_SOL_CTL
: /* solicited control or reply */
11181 case FC_RCTL_DD_UNSOL_DATA
: /* unsolicited data */
11182 case FC_RCTL_DD_DATA_DESC
: /* data descriptor */
11183 case FC_RCTL_DD_UNSOL_CMD
: /* unsolicited command */
11184 case FC_RCTL_DD_CMD_STATUS
: /* command status */
11185 case FC_RCTL_ELS_REQ
: /* extended link services request */
11186 case FC_RCTL_ELS_REP
: /* extended link services reply */
11187 case FC_RCTL_ELS4_REQ
: /* FC-4 ELS request */
11188 case FC_RCTL_ELS4_REP
: /* FC-4 ELS reply */
11189 case FC_RCTL_BA_NOP
: /* basic link service NOP */
11190 case FC_RCTL_BA_ABTS
: /* basic link service abort */
11191 case FC_RCTL_BA_RMC
: /* remove connection */
11192 case FC_RCTL_BA_ACC
: /* basic accept */
11193 case FC_RCTL_BA_RJT
: /* basic reject */
11194 case FC_RCTL_BA_PRMT
:
11195 case FC_RCTL_ACK_1
: /* acknowledge_1 */
11196 case FC_RCTL_ACK_0
: /* acknowledge_0 */
11197 case FC_RCTL_P_RJT
: /* port reject */
11198 case FC_RCTL_F_RJT
: /* fabric reject */
11199 case FC_RCTL_P_BSY
: /* port busy */
11200 case FC_RCTL_F_BSY
: /* fabric busy to data frame */
11201 case FC_RCTL_F_BSYL
: /* fabric busy to link control frame */
11202 case FC_RCTL_LCR
: /* link credit reset */
11203 case FC_RCTL_END
: /* end */
11205 case FC_RCTL_VFTH
: /* Virtual Fabric tagging Header */
11206 fc_vft_hdr
= (struct fc_vft_header
*)fc_hdr
;
11207 fc_hdr
= &((struct fc_frame_header
*)fc_vft_hdr
)[1];
11208 return lpfc_fc_frame_check(phba
, fc_hdr
);
11212 switch (fc_hdr
->fh_type
) {
11223 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
11224 "2538 Received frame rctl:%s type:%s\n",
11225 rctl_names
[fc_hdr
->fh_r_ctl
],
11226 type_names
[fc_hdr
->fh_type
]);
11229 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
11230 "2539 Dropped frame rctl:%s type:%s\n",
11231 rctl_names
[fc_hdr
->fh_r_ctl
],
11232 type_names
[fc_hdr
->fh_type
]);
11237 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
11238 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
11240 * This function processes the FC header to retrieve the VFI from the VF
11241 * header, if one exists. This function will return the VFI if one exists
11242 * or 0 if no VSAN Header exists.
11245 lpfc_fc_hdr_get_vfi(struct fc_frame_header
*fc_hdr
)
11247 struct fc_vft_header
*fc_vft_hdr
= (struct fc_vft_header
*)fc_hdr
;
11249 if (fc_hdr
->fh_r_ctl
!= FC_RCTL_VFTH
)
11251 return bf_get(fc_vft_hdr_vf_id
, fc_vft_hdr
);
11255 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
11256 * @phba: Pointer to the HBA structure to search for the vport on
11257 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
11258 * @fcfi: The FC Fabric ID that the frame came from
11260 * This function searches the @phba for a vport that matches the content of the
11261 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
11262 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
11263 * returns the matching vport pointer or NULL if unable to match frame to a
11266 static struct lpfc_vport
*
11267 lpfc_fc_frame_to_vport(struct lpfc_hba
*phba
, struct fc_frame_header
*fc_hdr
,
11270 struct lpfc_vport
**vports
;
11271 struct lpfc_vport
*vport
= NULL
;
11273 uint32_t did
= (fc_hdr
->fh_d_id
[0] << 16 |
11274 fc_hdr
->fh_d_id
[1] << 8 |
11275 fc_hdr
->fh_d_id
[2]);
11277 vports
= lpfc_create_vport_work_array(phba
);
11278 if (vports
!= NULL
)
11279 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
11280 if (phba
->fcf
.fcfi
== fcfi
&&
11281 vports
[i
]->vfi
== lpfc_fc_hdr_get_vfi(fc_hdr
) &&
11282 vports
[i
]->fc_myDID
== did
) {
11287 lpfc_destroy_vport_work_array(phba
, vports
);
11292 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
11293 * @vport: The vport to work on.
11295 * This function updates the receive sequence time stamp for this vport. The
11296 * receive sequence time stamp indicates the time that the last frame of the
11297 * the sequence that has been idle for the longest amount of time was received.
11298 * the driver uses this time stamp to indicate if any received sequences have
11302 lpfc_update_rcv_time_stamp(struct lpfc_vport
*vport
)
11304 struct lpfc_dmabuf
*h_buf
;
11305 struct hbq_dmabuf
*dmabuf
= NULL
;
11307 /* get the oldest sequence on the rcv list */
11308 h_buf
= list_get_first(&vport
->rcv_buffer_list
,
11309 struct lpfc_dmabuf
, list
);
11312 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
11313 vport
->rcv_buffer_time_stamp
= dmabuf
->time_stamp
;
11317 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
11318 * @vport: The vport that the received sequences were sent to.
11320 * This function cleans up all outstanding received sequences. This is called
11321 * by the driver when a link event or user action invalidates all the received
11325 lpfc_cleanup_rcv_buffers(struct lpfc_vport
*vport
)
11327 struct lpfc_dmabuf
*h_buf
, *hnext
;
11328 struct lpfc_dmabuf
*d_buf
, *dnext
;
11329 struct hbq_dmabuf
*dmabuf
= NULL
;
11331 /* start with the oldest sequence on the rcv list */
11332 list_for_each_entry_safe(h_buf
, hnext
, &vport
->rcv_buffer_list
, list
) {
11333 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
11334 list_del_init(&dmabuf
->hbuf
.list
);
11335 list_for_each_entry_safe(d_buf
, dnext
,
11336 &dmabuf
->dbuf
.list
, list
) {
11337 list_del_init(&d_buf
->list
);
11338 lpfc_in_buf_free(vport
->phba
, d_buf
);
11340 lpfc_in_buf_free(vport
->phba
, &dmabuf
->dbuf
);
11345 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
11346 * @vport: The vport that the received sequences were sent to.
11348 * This function determines whether any received sequences have timed out by
11349 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
11350 * indicates that there is at least one timed out sequence this routine will
11351 * go through the received sequences one at a time from most inactive to most
11352 * active to determine which ones need to be cleaned up. Once it has determined
11353 * that a sequence needs to be cleaned up it will simply free up the resources
11354 * without sending an abort.
11357 lpfc_rcv_seq_check_edtov(struct lpfc_vport
*vport
)
11359 struct lpfc_dmabuf
*h_buf
, *hnext
;
11360 struct lpfc_dmabuf
*d_buf
, *dnext
;
11361 struct hbq_dmabuf
*dmabuf
= NULL
;
11362 unsigned long timeout
;
11363 int abort_count
= 0;
11365 timeout
= (msecs_to_jiffies(vport
->phba
->fc_edtov
) +
11366 vport
->rcv_buffer_time_stamp
);
11367 if (list_empty(&vport
->rcv_buffer_list
) ||
11368 time_before(jiffies
, timeout
))
11370 /* start with the oldest sequence on the rcv list */
11371 list_for_each_entry_safe(h_buf
, hnext
, &vport
->rcv_buffer_list
, list
) {
11372 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
11373 timeout
= (msecs_to_jiffies(vport
->phba
->fc_edtov
) +
11374 dmabuf
->time_stamp
);
11375 if (time_before(jiffies
, timeout
))
11378 list_del_init(&dmabuf
->hbuf
.list
);
11379 list_for_each_entry_safe(d_buf
, dnext
,
11380 &dmabuf
->dbuf
.list
, list
) {
11381 list_del_init(&d_buf
->list
);
11382 lpfc_in_buf_free(vport
->phba
, d_buf
);
11384 lpfc_in_buf_free(vport
->phba
, &dmabuf
->dbuf
);
11387 lpfc_update_rcv_time_stamp(vport
);
11391 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
11392 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
11394 * This function searches through the existing incomplete sequences that have
11395 * been sent to this @vport. If the frame matches one of the incomplete
11396 * sequences then the dbuf in the @dmabuf is added to the list of frames that
11397 * make up that sequence. If no sequence is found that matches this frame then
11398 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
11399 * This function returns a pointer to the first dmabuf in the sequence list that
11400 * the frame was linked to.
11402 static struct hbq_dmabuf
*
11403 lpfc_fc_frame_add(struct lpfc_vport
*vport
, struct hbq_dmabuf
*dmabuf
)
11405 struct fc_frame_header
*new_hdr
;
11406 struct fc_frame_header
*temp_hdr
;
11407 struct lpfc_dmabuf
*d_buf
;
11408 struct lpfc_dmabuf
*h_buf
;
11409 struct hbq_dmabuf
*seq_dmabuf
= NULL
;
11410 struct hbq_dmabuf
*temp_dmabuf
= NULL
;
11412 INIT_LIST_HEAD(&dmabuf
->dbuf
.list
);
11413 dmabuf
->time_stamp
= jiffies
;
11414 new_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
11415 /* Use the hdr_buf to find the sequence that this frame belongs to */
11416 list_for_each_entry(h_buf
, &vport
->rcv_buffer_list
, list
) {
11417 temp_hdr
= (struct fc_frame_header
*)h_buf
->virt
;
11418 if ((temp_hdr
->fh_seq_id
!= new_hdr
->fh_seq_id
) ||
11419 (temp_hdr
->fh_ox_id
!= new_hdr
->fh_ox_id
) ||
11420 (memcmp(&temp_hdr
->fh_s_id
, &new_hdr
->fh_s_id
, 3)))
11422 /* found a pending sequence that matches this frame */
11423 seq_dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
11428 * This indicates first frame received for this sequence.
11429 * Queue the buffer on the vport's rcv_buffer_list.
11431 list_add_tail(&dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
11432 lpfc_update_rcv_time_stamp(vport
);
11435 temp_hdr
= seq_dmabuf
->hbuf
.virt
;
11436 if (be16_to_cpu(new_hdr
->fh_seq_cnt
) <
11437 be16_to_cpu(temp_hdr
->fh_seq_cnt
)) {
11438 list_del_init(&seq_dmabuf
->hbuf
.list
);
11439 list_add_tail(&dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
11440 list_add_tail(&dmabuf
->dbuf
.list
, &seq_dmabuf
->dbuf
.list
);
11441 lpfc_update_rcv_time_stamp(vport
);
11444 /* move this sequence to the tail to indicate a young sequence */
11445 list_move_tail(&seq_dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
11446 seq_dmabuf
->time_stamp
= jiffies
;
11447 lpfc_update_rcv_time_stamp(vport
);
11448 if (list_empty(&seq_dmabuf
->dbuf
.list
)) {
11449 temp_hdr
= dmabuf
->hbuf
.virt
;
11450 list_add_tail(&dmabuf
->dbuf
.list
, &seq_dmabuf
->dbuf
.list
);
11453 /* find the correct place in the sequence to insert this frame */
11454 list_for_each_entry_reverse(d_buf
, &seq_dmabuf
->dbuf
.list
, list
) {
11455 temp_dmabuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
11456 temp_hdr
= (struct fc_frame_header
*)temp_dmabuf
->hbuf
.virt
;
11458 * If the frame's sequence count is greater than the frame on
11459 * the list then insert the frame right after this frame
11461 if (be16_to_cpu(new_hdr
->fh_seq_cnt
) >
11462 be16_to_cpu(temp_hdr
->fh_seq_cnt
)) {
11463 list_add(&dmabuf
->dbuf
.list
, &temp_dmabuf
->dbuf
.list
);
11471 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
11472 * @vport: pointer to a vitural port
11473 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11475 * This function tries to abort from the partially assembed sequence, described
11476 * by the information from basic abbort @dmabuf. It checks to see whether such
11477 * partially assembled sequence held by the driver. If so, it shall free up all
11478 * the frames from the partially assembled sequence.
11481 * true -- if there is matching partially assembled sequence present and all
11482 * the frames freed with the sequence;
11483 * false -- if there is no matching partially assembled sequence present so
11484 * nothing got aborted in the lower layer driver
11487 lpfc_sli4_abort_partial_seq(struct lpfc_vport
*vport
,
11488 struct hbq_dmabuf
*dmabuf
)
11490 struct fc_frame_header
*new_hdr
;
11491 struct fc_frame_header
*temp_hdr
;
11492 struct lpfc_dmabuf
*d_buf
, *n_buf
, *h_buf
;
11493 struct hbq_dmabuf
*seq_dmabuf
= NULL
;
11495 /* Use the hdr_buf to find the sequence that matches this frame */
11496 INIT_LIST_HEAD(&dmabuf
->dbuf
.list
);
11497 INIT_LIST_HEAD(&dmabuf
->hbuf
.list
);
11498 new_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
11499 list_for_each_entry(h_buf
, &vport
->rcv_buffer_list
, list
) {
11500 temp_hdr
= (struct fc_frame_header
*)h_buf
->virt
;
11501 if ((temp_hdr
->fh_seq_id
!= new_hdr
->fh_seq_id
) ||
11502 (temp_hdr
->fh_ox_id
!= new_hdr
->fh_ox_id
) ||
11503 (memcmp(&temp_hdr
->fh_s_id
, &new_hdr
->fh_s_id
, 3)))
11505 /* found a pending sequence that matches this frame */
11506 seq_dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
11510 /* Free up all the frames from the partially assembled sequence */
11512 list_for_each_entry_safe(d_buf
, n_buf
,
11513 &seq_dmabuf
->dbuf
.list
, list
) {
11514 list_del_init(&d_buf
->list
);
11515 lpfc_in_buf_free(vport
->phba
, d_buf
);
11523 * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler
11524 * @phba: Pointer to HBA context object.
11525 * @cmd_iocbq: pointer to the command iocbq structure.
11526 * @rsp_iocbq: pointer to the response iocbq structure.
11528 * This function handles the sequence abort accept iocb command complete
11529 * event. It properly releases the memory allocated to the sequence abort
11533 lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba
*phba
,
11534 struct lpfc_iocbq
*cmd_iocbq
,
11535 struct lpfc_iocbq
*rsp_iocbq
)
11538 lpfc_sli_release_iocbq(phba
, cmd_iocbq
);
11542 * lpfc_sli4_seq_abort_acc - Accept sequence abort
11543 * @phba: Pointer to HBA context object.
11544 * @fc_hdr: pointer to a FC frame header.
11546 * This function sends a basic accept to a previous unsol sequence abort
11547 * event after aborting the sequence handling.
11550 lpfc_sli4_seq_abort_acc(struct lpfc_hba
*phba
,
11551 struct fc_frame_header
*fc_hdr
)
11553 struct lpfc_iocbq
*ctiocb
= NULL
;
11554 struct lpfc_nodelist
*ndlp
;
11555 uint16_t oxid
, rxid
;
11556 uint32_t sid
, fctl
;
11559 if (!lpfc_is_link_up(phba
))
11562 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
11563 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
11564 rxid
= be16_to_cpu(fc_hdr
->fh_rx_id
);
11566 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
11568 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
11569 "1268 Find ndlp returned NULL for oxid:x%x "
11570 "SID:x%x\n", oxid
, sid
);
11574 /* Allocate buffer for acc iocb */
11575 ctiocb
= lpfc_sli_get_iocbq(phba
);
11579 /* Extract the F_CTL field from FC_HDR */
11580 fctl
= sli4_fctl_from_fc_hdr(fc_hdr
);
11582 icmd
= &ctiocb
->iocb
;
11583 icmd
->un
.xseq64
.bdl
.bdeSize
= 0;
11584 icmd
->un
.xseq64
.bdl
.ulpIoTag32
= 0;
11585 icmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
11586 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_ACC
;
11587 icmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_BLS
;
11589 /* Fill in the rest of iocb fields */
11590 icmd
->ulpCommand
= CMD_XMIT_BLS_RSP64_CX
;
11591 icmd
->ulpBdeCount
= 0;
11593 icmd
->ulpClass
= CLASS3
;
11594 icmd
->ulpContext
= ndlp
->nlp_rpi
;
11596 ctiocb
->iocb_cmpl
= NULL
;
11597 ctiocb
->vport
= phba
->pport
;
11598 ctiocb
->iocb_cmpl
= lpfc_sli4_seq_abort_acc_cmpl
;
11600 if (fctl
& FC_FC_EX_CTX
) {
11601 /* ABTS sent by responder to CT exchange, construction
11602 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
11603 * field and RX_ID from ABTS for RX_ID field.
11605 bf_set(lpfc_abts_orig
, &icmd
->un
.bls_acc
, LPFC_ABTS_UNSOL_RSP
);
11606 bf_set(lpfc_abts_rxid
, &icmd
->un
.bls_acc
, rxid
);
11607 ctiocb
->sli4_xritag
= oxid
;
11609 /* ABTS sent by initiator to CT exchange, construction
11610 * of BA_ACC will need to allocate a new XRI as for the
11611 * XRI_TAG and RX_ID fields.
11613 bf_set(lpfc_abts_orig
, &icmd
->un
.bls_acc
, LPFC_ABTS_UNSOL_INT
);
11614 bf_set(lpfc_abts_rxid
, &icmd
->un
.bls_acc
, NO_XRI
);
11615 ctiocb
->sli4_xritag
= NO_XRI
;
11617 bf_set(lpfc_abts_oxid
, &icmd
->un
.bls_acc
, oxid
);
11619 /* Xmit CT abts accept on exchange <xid> */
11620 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
11621 "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n",
11622 CMD_XMIT_BLS_RSP64_CX
, phba
->link_state
);
11623 lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, ctiocb
, 0);
11627 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
11628 * @vport: Pointer to the vport on which this sequence was received
11629 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11631 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
11632 * receive sequence is only partially assembed by the driver, it shall abort
11633 * the partially assembled frames for the sequence. Otherwise, if the
11634 * unsolicited receive sequence has been completely assembled and passed to
11635 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
11636 * unsolicited sequence has been aborted. After that, it will issue a basic
11637 * accept to accept the abort.
11640 lpfc_sli4_handle_unsol_abort(struct lpfc_vport
*vport
,
11641 struct hbq_dmabuf
*dmabuf
)
11643 struct lpfc_hba
*phba
= vport
->phba
;
11644 struct fc_frame_header fc_hdr
;
11648 /* Make a copy of fc_hdr before the dmabuf being released */
11649 memcpy(&fc_hdr
, dmabuf
->hbuf
.virt
, sizeof(struct fc_frame_header
));
11650 fctl
= sli4_fctl_from_fc_hdr(&fc_hdr
);
11652 if (fctl
& FC_FC_EX_CTX
) {
11654 * ABTS sent by responder to exchange, just free the buffer
11656 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
11659 * ABTS sent by initiator to exchange, need to do cleanup
11661 /* Try to abort partially assembled seq */
11662 abts_par
= lpfc_sli4_abort_partial_seq(vport
, dmabuf
);
11664 /* Send abort to ULP if partially seq abort failed */
11665 if (abts_par
== false)
11666 lpfc_sli4_send_seq_to_ulp(vport
, dmabuf
);
11668 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
11670 /* Send basic accept (BA_ACC) to the abort requester */
11671 lpfc_sli4_seq_abort_acc(phba
, &fc_hdr
);
11675 * lpfc_seq_complete - Indicates if a sequence is complete
11676 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11678 * This function checks the sequence, starting with the frame described by
11679 * @dmabuf, to see if all the frames associated with this sequence are present.
11680 * the frames associated with this sequence are linked to the @dmabuf using the
11681 * dbuf list. This function looks for two major things. 1) That the first frame
11682 * has a sequence count of zero. 2) There is a frame with last frame of sequence
11683 * set. 3) That there are no holes in the sequence count. The function will
11684 * return 1 when the sequence is complete, otherwise it will return 0.
11687 lpfc_seq_complete(struct hbq_dmabuf
*dmabuf
)
11689 struct fc_frame_header
*hdr
;
11690 struct lpfc_dmabuf
*d_buf
;
11691 struct hbq_dmabuf
*seq_dmabuf
;
11695 hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
11696 /* make sure first fame of sequence has a sequence count of zero */
11697 if (hdr
->fh_seq_cnt
!= seq_count
)
11699 fctl
= (hdr
->fh_f_ctl
[0] << 16 |
11700 hdr
->fh_f_ctl
[1] << 8 |
11702 /* If last frame of sequence we can return success. */
11703 if (fctl
& FC_FC_END_SEQ
)
11705 list_for_each_entry(d_buf
, &dmabuf
->dbuf
.list
, list
) {
11706 seq_dmabuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
11707 hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
11708 /* If there is a hole in the sequence count then fail. */
11709 if (++seq_count
!= be16_to_cpu(hdr
->fh_seq_cnt
))
11711 fctl
= (hdr
->fh_f_ctl
[0] << 16 |
11712 hdr
->fh_f_ctl
[1] << 8 |
11714 /* If last frame of sequence we can return success. */
11715 if (fctl
& FC_FC_END_SEQ
)
11722 * lpfc_prep_seq - Prep sequence for ULP processing
11723 * @vport: Pointer to the vport on which this sequence was received
11724 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11726 * This function takes a sequence, described by a list of frames, and creates
11727 * a list of iocbq structures to describe the sequence. This iocbq list will be
11728 * used to issue to the generic unsolicited sequence handler. This routine
11729 * returns a pointer to the first iocbq in the list. If the function is unable
11730 * to allocate an iocbq then it throw out the received frames that were not
11731 * able to be described and return a pointer to the first iocbq. If unable to
11732 * allocate any iocbqs (including the first) this function will return NULL.
11734 static struct lpfc_iocbq
*
11735 lpfc_prep_seq(struct lpfc_vport
*vport
, struct hbq_dmabuf
*seq_dmabuf
)
11737 struct lpfc_dmabuf
*d_buf
, *n_buf
;
11738 struct lpfc_iocbq
*first_iocbq
, *iocbq
;
11739 struct fc_frame_header
*fc_hdr
;
11741 struct ulp_bde64
*pbde
;
11743 fc_hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
11744 /* remove from receive buffer list */
11745 list_del_init(&seq_dmabuf
->hbuf
.list
);
11746 lpfc_update_rcv_time_stamp(vport
);
11747 /* get the Remote Port's SID */
11748 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
11749 /* Get an iocbq struct to fill in. */
11750 first_iocbq
= lpfc_sli_get_iocbq(vport
->phba
);
11752 /* Initialize the first IOCB. */
11753 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= 0;
11754 first_iocbq
->iocb
.ulpStatus
= IOSTAT_SUCCESS
;
11755 first_iocbq
->iocb
.ulpCommand
= CMD_IOCB_RCV_SEQ64_CX
;
11756 first_iocbq
->iocb
.ulpContext
= be16_to_cpu(fc_hdr
->fh_ox_id
);
11757 first_iocbq
->iocb
.unsli3
.rcvsli3
.vpi
=
11758 vport
->vpi
+ vport
->phba
->vpi_base
;
11759 /* put the first buffer into the first IOCBq */
11760 first_iocbq
->context2
= &seq_dmabuf
->dbuf
;
11761 first_iocbq
->context3
= NULL
;
11762 first_iocbq
->iocb
.ulpBdeCount
= 1;
11763 first_iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
=
11764 LPFC_DATA_BUF_SIZE
;
11765 first_iocbq
->iocb
.un
.rcvels
.remoteID
= sid
;
11766 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
+=
11767 bf_get(lpfc_rcqe_length
,
11768 &seq_dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
11770 iocbq
= first_iocbq
;
11772 * Each IOCBq can have two Buffers assigned, so go through the list
11773 * of buffers for this sequence and save two buffers in each IOCBq
11775 list_for_each_entry_safe(d_buf
, n_buf
, &seq_dmabuf
->dbuf
.list
, list
) {
11777 lpfc_in_buf_free(vport
->phba
, d_buf
);
11780 if (!iocbq
->context3
) {
11781 iocbq
->context3
= d_buf
;
11782 iocbq
->iocb
.ulpBdeCount
++;
11783 pbde
= (struct ulp_bde64
*)
11784 &iocbq
->iocb
.unsli3
.sli3Words
[4];
11785 pbde
->tus
.f
.bdeSize
= LPFC_DATA_BUF_SIZE
;
11786 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
+=
11787 bf_get(lpfc_rcqe_length
,
11788 &seq_dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
11790 iocbq
= lpfc_sli_get_iocbq(vport
->phba
);
11793 first_iocbq
->iocb
.ulpStatus
=
11794 IOSTAT_FCP_RSP_ERROR
;
11795 first_iocbq
->iocb
.un
.ulpWord
[4] =
11796 IOERR_NO_RESOURCES
;
11798 lpfc_in_buf_free(vport
->phba
, d_buf
);
11801 iocbq
->context2
= d_buf
;
11802 iocbq
->context3
= NULL
;
11803 iocbq
->iocb
.ulpBdeCount
= 1;
11804 iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
=
11805 LPFC_DATA_BUF_SIZE
;
11806 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
+=
11807 bf_get(lpfc_rcqe_length
,
11808 &seq_dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
11809 iocbq
->iocb
.un
.rcvels
.remoteID
= sid
;
11810 list_add_tail(&iocbq
->list
, &first_iocbq
->list
);
11813 return first_iocbq
;
11817 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport
*vport
,
11818 struct hbq_dmabuf
*seq_dmabuf
)
11820 struct fc_frame_header
*fc_hdr
;
11821 struct lpfc_iocbq
*iocbq
, *curr_iocb
, *next_iocb
;
11822 struct lpfc_hba
*phba
= vport
->phba
;
11824 fc_hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
11825 iocbq
= lpfc_prep_seq(vport
, seq_dmabuf
);
11827 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11828 "2707 Ring %d handler: Failed to allocate "
11829 "iocb Rctl x%x Type x%x received\n",
11831 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
11834 if (!lpfc_complete_unsol_iocb(phba
,
11835 &phba
->sli
.ring
[LPFC_ELS_RING
],
11836 iocbq
, fc_hdr
->fh_r_ctl
,
11838 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
11839 "2540 Ring %d handler: unexpected Rctl "
11840 "x%x Type x%x received\n",
11842 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
11844 /* Free iocb created in lpfc_prep_seq */
11845 list_for_each_entry_safe(curr_iocb
, next_iocb
,
11846 &iocbq
->list
, list
) {
11847 list_del_init(&curr_iocb
->list
);
11848 lpfc_sli_release_iocbq(phba
, curr_iocb
);
11850 lpfc_sli_release_iocbq(phba
, iocbq
);
11854 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
11855 * @phba: Pointer to HBA context object.
11857 * This function is called with no lock held. This function processes all
11858 * the received buffers and gives it to upper layers when a received buffer
11859 * indicates that it is the final frame in the sequence. The interrupt
11860 * service routine processes received buffers at interrupt contexts and adds
11861 * received dma buffers to the rb_pend_list queue and signals the worker thread.
11862 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
11863 * appropriate receive function when the final frame in a sequence is received.
11866 lpfc_sli4_handle_received_buffer(struct lpfc_hba
*phba
,
11867 struct hbq_dmabuf
*dmabuf
)
11869 struct hbq_dmabuf
*seq_dmabuf
;
11870 struct fc_frame_header
*fc_hdr
;
11871 struct lpfc_vport
*vport
;
11874 /* Process each received buffer */
11875 fc_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
11876 /* check to see if this a valid type of frame */
11877 if (lpfc_fc_frame_check(phba
, fc_hdr
)) {
11878 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
11881 fcfi
= bf_get(lpfc_rcqe_fcf_id
, &dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
11882 vport
= lpfc_fc_frame_to_vport(phba
, fc_hdr
, fcfi
);
11883 if (!vport
|| !(vport
->vpi_state
& LPFC_VPI_REGISTERED
)) {
11884 /* throw out the frame */
11885 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
11888 /* Handle the basic abort sequence (BA_ABTS) event */
11889 if (fc_hdr
->fh_r_ctl
== FC_RCTL_BA_ABTS
) {
11890 lpfc_sli4_handle_unsol_abort(vport
, dmabuf
);
11894 /* Link this frame */
11895 seq_dmabuf
= lpfc_fc_frame_add(vport
, dmabuf
);
11897 /* unable to add frame to vport - throw it out */
11898 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
11901 /* If not last frame in sequence continue processing frames. */
11902 if (!lpfc_seq_complete(seq_dmabuf
))
11905 /* Send the complete sequence to the upper layer protocol */
11906 lpfc_sli4_send_seq_to_ulp(vport
, seq_dmabuf
);
11910 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
11911 * @phba: pointer to lpfc hba data structure.
11913 * This routine is invoked to post rpi header templates to the
11914 * HBA consistent with the SLI-4 interface spec. This routine
11915 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
11916 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
11918 * This routine does not require any locks. It's usage is expected
11919 * to be driver load or reset recovery when the driver is
11924 * -EIO - The mailbox failed to complete successfully.
11925 * When this error occurs, the driver is not guaranteed
11926 * to have any rpi regions posted to the device and
11927 * must either attempt to repost the regions or take a
11931 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba
*phba
)
11933 struct lpfc_rpi_hdr
*rpi_page
;
11936 /* Post all rpi memory regions to the port. */
11937 list_for_each_entry(rpi_page
, &phba
->sli4_hba
.lpfc_rpi_hdr_list
, list
) {
11938 rc
= lpfc_sli4_post_rpi_hdr(phba
, rpi_page
);
11939 if (rc
!= MBX_SUCCESS
) {
11940 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11941 "2008 Error %d posting all rpi "
11952 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
11953 * @phba: pointer to lpfc hba data structure.
11954 * @rpi_page: pointer to the rpi memory region.
11956 * This routine is invoked to post a single rpi header to the
11957 * HBA consistent with the SLI-4 interface spec. This memory region
11958 * maps up to 64 rpi context regions.
11962 * -ENOMEM - No available memory
11963 * -EIO - The mailbox failed to complete successfully.
11966 lpfc_sli4_post_rpi_hdr(struct lpfc_hba
*phba
, struct lpfc_rpi_hdr
*rpi_page
)
11968 LPFC_MBOXQ_t
*mboxq
;
11969 struct lpfc_mbx_post_hdr_tmpl
*hdr_tmpl
;
11972 uint32_t shdr_status
, shdr_add_status
;
11973 union lpfc_sli4_cfg_shdr
*shdr
;
11975 /* The port is notified of the header region via a mailbox command. */
11976 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
11978 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11979 "2001 Unable to allocate memory for issuing "
11980 "SLI_CONFIG_SPECIAL mailbox command\n");
11984 /* Post all rpi memory regions to the port. */
11985 hdr_tmpl
= &mboxq
->u
.mqe
.un
.hdr_tmpl
;
11986 mbox_tmo
= lpfc_mbox_tmo_val(phba
, MBX_SLI4_CONFIG
);
11987 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
11988 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE
,
11989 sizeof(struct lpfc_mbx_post_hdr_tmpl
) -
11990 sizeof(struct mbox_header
), LPFC_SLI4_MBX_EMBED
);
11991 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt
,
11992 hdr_tmpl
, rpi_page
->page_count
);
11993 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset
, hdr_tmpl
,
11994 rpi_page
->start_rpi
);
11995 hdr_tmpl
->rpi_paddr_lo
= putPaddrLow(rpi_page
->dmabuf
->phys
);
11996 hdr_tmpl
->rpi_paddr_hi
= putPaddrHigh(rpi_page
->dmabuf
->phys
);
11997 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
11998 shdr
= (union lpfc_sli4_cfg_shdr
*) &hdr_tmpl
->header
.cfg_shdr
;
11999 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
12000 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
12001 if (rc
!= MBX_TIMEOUT
)
12002 mempool_free(mboxq
, phba
->mbox_mem_pool
);
12003 if (shdr_status
|| shdr_add_status
|| rc
) {
12004 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12005 "2514 POST_RPI_HDR mailbox failed with "
12006 "status x%x add_status x%x, mbx status x%x\n",
12007 shdr_status
, shdr_add_status
, rc
);
12014 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
12015 * @phba: pointer to lpfc hba data structure.
12017 * This routine is invoked to post rpi header templates to the
12018 * HBA consistent with the SLI-4 interface spec. This routine
12019 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
12020 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
12023 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
12024 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
12027 lpfc_sli4_alloc_rpi(struct lpfc_hba
*phba
)
12030 uint16_t max_rpi
, rpi_base
, rpi_limit
;
12031 uint16_t rpi_remaining
;
12032 struct lpfc_rpi_hdr
*rpi_hdr
;
12034 max_rpi
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
12035 rpi_base
= phba
->sli4_hba
.max_cfg_param
.rpi_base
;
12036 rpi_limit
= phba
->sli4_hba
.next_rpi
;
12039 * The valid rpi range is not guaranteed to be zero-based. Start
12040 * the search at the rpi_base as reported by the port.
12042 spin_lock_irq(&phba
->hbalock
);
12043 rpi
= find_next_zero_bit(phba
->sli4_hba
.rpi_bmask
, rpi_limit
, rpi_base
);
12044 if (rpi
>= rpi_limit
|| rpi
< rpi_base
)
12045 rpi
= LPFC_RPI_ALLOC_ERROR
;
12047 set_bit(rpi
, phba
->sli4_hba
.rpi_bmask
);
12048 phba
->sli4_hba
.max_cfg_param
.rpi_used
++;
12049 phba
->sli4_hba
.rpi_count
++;
12053 * Don't try to allocate more rpi header regions if the device limit
12054 * on available rpis max has been exhausted.
12056 if ((rpi
== LPFC_RPI_ALLOC_ERROR
) &&
12057 (phba
->sli4_hba
.rpi_count
>= max_rpi
)) {
12058 spin_unlock_irq(&phba
->hbalock
);
12063 * If the driver is running low on rpi resources, allocate another
12064 * page now. Note that the next_rpi value is used because
12065 * it represents how many are actually in use whereas max_rpi notes
12066 * how many are supported max by the device.
12068 rpi_remaining
= phba
->sli4_hba
.next_rpi
- rpi_base
-
12069 phba
->sli4_hba
.rpi_count
;
12070 spin_unlock_irq(&phba
->hbalock
);
12071 if (rpi_remaining
< LPFC_RPI_LOW_WATER_MARK
) {
12072 rpi_hdr
= lpfc_sli4_create_rpi_hdr(phba
);
12074 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12075 "2002 Error Could not grow rpi "
12078 lpfc_sli4_post_rpi_hdr(phba
, rpi_hdr
);
12086 * lpfc_sli4_free_rpi - Release an rpi for reuse.
12087 * @phba: pointer to lpfc hba data structure.
12089 * This routine is invoked to release an rpi to the pool of
12090 * available rpis maintained by the driver.
12093 __lpfc_sli4_free_rpi(struct lpfc_hba
*phba
, int rpi
)
12095 if (test_and_clear_bit(rpi
, phba
->sli4_hba
.rpi_bmask
)) {
12096 phba
->sli4_hba
.rpi_count
--;
12097 phba
->sli4_hba
.max_cfg_param
.rpi_used
--;
12102 * lpfc_sli4_free_rpi - Release an rpi for reuse.
12103 * @phba: pointer to lpfc hba data structure.
12105 * This routine is invoked to release an rpi to the pool of
12106 * available rpis maintained by the driver.
12109 lpfc_sli4_free_rpi(struct lpfc_hba
*phba
, int rpi
)
12111 spin_lock_irq(&phba
->hbalock
);
12112 __lpfc_sli4_free_rpi(phba
, rpi
);
12113 spin_unlock_irq(&phba
->hbalock
);
12117 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
12118 * @phba: pointer to lpfc hba data structure.
12120 * This routine is invoked to remove the memory region that
12121 * provided rpi via a bitmask.
12124 lpfc_sli4_remove_rpis(struct lpfc_hba
*phba
)
12126 kfree(phba
->sli4_hba
.rpi_bmask
);
12130 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
12131 * @phba: pointer to lpfc hba data structure.
12133 * This routine is invoked to remove the memory region that
12134 * provided rpi via a bitmask.
12137 lpfc_sli4_resume_rpi(struct lpfc_nodelist
*ndlp
)
12139 LPFC_MBOXQ_t
*mboxq
;
12140 struct lpfc_hba
*phba
= ndlp
->phba
;
12143 /* The port is notified of the header region via a mailbox command. */
12144 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12148 /* Post all rpi memory regions to the port. */
12149 lpfc_resume_rpi(mboxq
, ndlp
);
12150 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
12151 if (rc
== MBX_NOT_FINISHED
) {
12152 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12153 "2010 Resume RPI Mailbox failed "
12154 "status %d, mbxStatus x%x\n", rc
,
12155 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
12156 mempool_free(mboxq
, phba
->mbox_mem_pool
);
12163 * lpfc_sli4_init_vpi - Initialize a vpi with the port
12164 * @vport: Pointer to the vport for which the vpi is being initialized
12166 * This routine is invoked to activate a vpi with the port.
12170 * -Evalue otherwise
12173 lpfc_sli4_init_vpi(struct lpfc_vport
*vport
)
12175 LPFC_MBOXQ_t
*mboxq
;
12177 int retval
= MBX_SUCCESS
;
12179 struct lpfc_hba
*phba
= vport
->phba
;
12180 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12183 lpfc_init_vpi(phba
, mboxq
, vport
->vpi
);
12184 mbox_tmo
= lpfc_mbox_tmo_val(phba
, MBX_INIT_VPI
);
12185 rc
= lpfc_sli_issue_mbox_wait(phba
, mboxq
, mbox_tmo
);
12186 if (rc
!= MBX_SUCCESS
) {
12187 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_SLI
,
12188 "2022 INIT VPI Mailbox failed "
12189 "status %d, mbxStatus x%x\n", rc
,
12190 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
12193 if (rc
!= MBX_TIMEOUT
)
12194 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
12200 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
12201 * @phba: pointer to lpfc hba data structure.
12202 * @mboxq: Pointer to mailbox object.
12204 * This routine is invoked to manually add a single FCF record. The caller
12205 * must pass a completely initialized FCF_Record. This routine takes
12206 * care of the nonembedded mailbox operations.
12209 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
12212 union lpfc_sli4_cfg_shdr
*shdr
;
12213 uint32_t shdr_status
, shdr_add_status
;
12215 virt_addr
= mboxq
->sge_array
->addr
[0];
12216 /* The IOCTL status is embedded in the mailbox subheader. */
12217 shdr
= (union lpfc_sli4_cfg_shdr
*) virt_addr
;
12218 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
12219 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
12221 if ((shdr_status
|| shdr_add_status
) &&
12222 (shdr_status
!= STATUS_FCF_IN_USE
))
12223 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12224 "2558 ADD_FCF_RECORD mailbox failed with "
12225 "status x%x add_status x%x\n",
12226 shdr_status
, shdr_add_status
);
12228 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
12232 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
12233 * @phba: pointer to lpfc hba data structure.
12234 * @fcf_record: pointer to the initialized fcf record to add.
12236 * This routine is invoked to manually add a single FCF record. The caller
12237 * must pass a completely initialized FCF_Record. This routine takes
12238 * care of the nonembedded mailbox operations.
12241 lpfc_sli4_add_fcf_record(struct lpfc_hba
*phba
, struct fcf_record
*fcf_record
)
12244 LPFC_MBOXQ_t
*mboxq
;
12247 dma_addr_t phys_addr
;
12248 struct lpfc_mbx_sge sge
;
12249 uint32_t alloc_len
, req_len
;
12252 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12254 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12255 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
12259 req_len
= sizeof(struct fcf_record
) + sizeof(union lpfc_sli4_cfg_shdr
) +
12262 /* Allocate DMA memory and set up the non-embedded mailbox command */
12263 alloc_len
= lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
12264 LPFC_MBOX_OPCODE_FCOE_ADD_FCF
,
12265 req_len
, LPFC_SLI4_MBX_NEMBED
);
12266 if (alloc_len
< req_len
) {
12267 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12268 "2523 Allocated DMA memory size (x%x) is "
12269 "less than the requested DMA memory "
12270 "size (x%x)\n", alloc_len
, req_len
);
12271 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
12276 * Get the first SGE entry from the non-embedded DMA memory. This
12277 * routine only uses a single SGE.
12279 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
12280 phys_addr
= getPaddr(sge
.pa_hi
, sge
.pa_lo
);
12281 virt_addr
= mboxq
->sge_array
->addr
[0];
12283 * Configure the FCF record for FCFI 0. This is the driver's
12284 * hardcoded default and gets used in nonFIP mode.
12286 fcfindex
= bf_get(lpfc_fcf_record_fcf_index
, fcf_record
);
12287 bytep
= virt_addr
+ sizeof(union lpfc_sli4_cfg_shdr
);
12288 lpfc_sli_pcimem_bcopy(&fcfindex
, bytep
, sizeof(uint32_t));
12291 * Copy the fcf_index and the FCF Record Data. The data starts after
12292 * the FCoE header plus word10. The data copy needs to be endian
12295 bytep
+= sizeof(uint32_t);
12296 lpfc_sli_pcimem_bcopy(fcf_record
, bytep
, sizeof(struct fcf_record
));
12297 mboxq
->vport
= phba
->pport
;
12298 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_add_fcf_record
;
12299 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
12300 if (rc
== MBX_NOT_FINISHED
) {
12301 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12302 "2515 ADD_FCF_RECORD mailbox failed with "
12303 "status 0x%x\n", rc
);
12304 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
12313 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
12314 * @phba: pointer to lpfc hba data structure.
12315 * @fcf_record: pointer to the fcf record to write the default data.
12316 * @fcf_index: FCF table entry index.
12318 * This routine is invoked to build the driver's default FCF record. The
12319 * values used are hardcoded. This routine handles memory initialization.
12323 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba
*phba
,
12324 struct fcf_record
*fcf_record
,
12325 uint16_t fcf_index
)
12327 memset(fcf_record
, 0, sizeof(struct fcf_record
));
12328 fcf_record
->max_rcv_size
= LPFC_FCOE_MAX_RCV_SIZE
;
12329 fcf_record
->fka_adv_period
= LPFC_FCOE_FKA_ADV_PER
;
12330 fcf_record
->fip_priority
= LPFC_FCOE_FIP_PRIORITY
;
12331 bf_set(lpfc_fcf_record_mac_0
, fcf_record
, phba
->fc_map
[0]);
12332 bf_set(lpfc_fcf_record_mac_1
, fcf_record
, phba
->fc_map
[1]);
12333 bf_set(lpfc_fcf_record_mac_2
, fcf_record
, phba
->fc_map
[2]);
12334 bf_set(lpfc_fcf_record_mac_3
, fcf_record
, LPFC_FCOE_FCF_MAC3
);
12335 bf_set(lpfc_fcf_record_mac_4
, fcf_record
, LPFC_FCOE_FCF_MAC4
);
12336 bf_set(lpfc_fcf_record_mac_5
, fcf_record
, LPFC_FCOE_FCF_MAC5
);
12337 bf_set(lpfc_fcf_record_fc_map_0
, fcf_record
, phba
->fc_map
[0]);
12338 bf_set(lpfc_fcf_record_fc_map_1
, fcf_record
, phba
->fc_map
[1]);
12339 bf_set(lpfc_fcf_record_fc_map_2
, fcf_record
, phba
->fc_map
[2]);
12340 bf_set(lpfc_fcf_record_fcf_valid
, fcf_record
, 1);
12341 bf_set(lpfc_fcf_record_fcf_avail
, fcf_record
, 1);
12342 bf_set(lpfc_fcf_record_fcf_index
, fcf_record
, fcf_index
);
12343 bf_set(lpfc_fcf_record_mac_addr_prov
, fcf_record
,
12344 LPFC_FCF_FPMA
| LPFC_FCF_SPMA
);
12345 /* Set the VLAN bit map */
12346 if (phba
->valid_vlan
) {
12347 fcf_record
->vlan_bitmap
[phba
->vlan_id
/ 8]
12348 = 1 << (phba
->vlan_id
% 8);
12353 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
12354 * @phba: pointer to lpfc hba data structure.
12355 * @fcf_index: FCF table entry offset.
12357 * This routine is invoked to scan the entire FCF table by reading FCF
12358 * record and processing it one at a time starting from the @fcf_index
12359 * for initial FCF discovery or fast FCF failover rediscovery.
12361 * Return 0 if the mailbox command is submitted sucessfully, none 0
12365 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
12368 LPFC_MBOXQ_t
*mboxq
;
12370 phba
->fcoe_eventtag_at_fcf_scan
= phba
->fcoe_eventtag
;
12371 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12373 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12374 "2000 Failed to allocate mbox for "
12377 goto fail_fcf_scan
;
12379 /* Construct the read FCF record mailbox command */
12380 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
12383 goto fail_fcf_scan
;
12385 /* Issue the mailbox command asynchronously */
12386 mboxq
->vport
= phba
->pport
;
12387 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_fcf_scan_read_fcf_rec
;
12389 spin_lock_irq(&phba
->hbalock
);
12390 phba
->hba_flag
|= FCF_TS_INPROG
;
12391 spin_unlock_irq(&phba
->hbalock
);
12393 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
12394 if (rc
== MBX_NOT_FINISHED
)
12397 /* Reset eligible FCF count for new scan */
12398 if (fcf_index
== LPFC_FCOE_FCF_GET_FIRST
)
12399 phba
->fcf
.eligible_fcf_cnt
= 0;
12405 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
12406 /* FCF scan failed, clear FCF_TS_INPROG flag */
12407 spin_lock_irq(&phba
->hbalock
);
12408 phba
->hba_flag
&= ~FCF_TS_INPROG
;
12409 spin_unlock_irq(&phba
->hbalock
);
12415 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
12416 * @phba: pointer to lpfc hba data structure.
12417 * @fcf_index: FCF table entry offset.
12419 * This routine is invoked to read an FCF record indicated by @fcf_index
12420 * and to use it for FLOGI roundrobin FCF failover.
12422 * Return 0 if the mailbox command is submitted sucessfully, none 0
12426 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
12429 LPFC_MBOXQ_t
*mboxq
;
12431 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12433 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_INIT
,
12434 "2763 Failed to allocate mbox for "
12437 goto fail_fcf_read
;
12439 /* Construct the read FCF record mailbox command */
12440 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
12443 goto fail_fcf_read
;
12445 /* Issue the mailbox command asynchronously */
12446 mboxq
->vport
= phba
->pport
;
12447 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_fcf_rr_read_fcf_rec
;
12448 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
12449 if (rc
== MBX_NOT_FINISHED
)
12455 if (error
&& mboxq
)
12456 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
12461 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
12462 * @phba: pointer to lpfc hba data structure.
12463 * @fcf_index: FCF table entry offset.
12465 * This routine is invoked to read an FCF record indicated by @fcf_index to
12466 * determine whether it's eligible for FLOGI roundrobin failover list.
12468 * Return 0 if the mailbox command is submitted sucessfully, none 0
12472 lpfc_sli4_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
12475 LPFC_MBOXQ_t
*mboxq
;
12477 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12479 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_INIT
,
12480 "2758 Failed to allocate mbox for "
12483 goto fail_fcf_read
;
12485 /* Construct the read FCF record mailbox command */
12486 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
12489 goto fail_fcf_read
;
12491 /* Issue the mailbox command asynchronously */
12492 mboxq
->vport
= phba
->pport
;
12493 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_read_fcf_rec
;
12494 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
12495 if (rc
== MBX_NOT_FINISHED
)
12501 if (error
&& mboxq
)
12502 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
12507 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
12508 * @phba: pointer to lpfc hba data structure.
12510 * This routine is to get the next eligible FCF record index in a round
12511 * robin fashion. If the next eligible FCF record index equals to the
12512 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
12513 * shall be returned, otherwise, the next eligible FCF record's index
12514 * shall be returned.
12517 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba
*phba
)
12519 uint16_t next_fcf_index
;
12521 /* Search start from next bit of currently registered FCF index */
12522 next_fcf_index
= (phba
->fcf
.current_rec
.fcf_indx
+ 1) %
12523 LPFC_SLI4_FCF_TBL_INDX_MAX
;
12524 next_fcf_index
= find_next_bit(phba
->fcf
.fcf_rr_bmask
,
12525 LPFC_SLI4_FCF_TBL_INDX_MAX
,
12528 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
12529 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
)
12530 next_fcf_index
= find_next_bit(phba
->fcf
.fcf_rr_bmask
,
12531 LPFC_SLI4_FCF_TBL_INDX_MAX
, 0);
12533 /* Check roundrobin failover list empty condition */
12534 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
12535 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
12536 "2844 No roundrobin failover FCF available\n");
12537 return LPFC_FCOE_FCF_NEXT_NONE
;
12540 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
12541 "2845 Get next roundrobin failover FCF (x%x)\n",
12544 return next_fcf_index
;
12548 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
12549 * @phba: pointer to lpfc hba data structure.
12551 * This routine sets the FCF record index in to the eligible bmask for
12552 * roundrobin failover search. It checks to make sure that the index
12553 * does not go beyond the range of the driver allocated bmask dimension
12554 * before setting the bit.
12556 * Returns 0 if the index bit successfully set, otherwise, it returns
12560 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba
*phba
, uint16_t fcf_index
)
12562 if (fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
12563 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
12564 "2610 FCF (x%x) reached driver's book "
12565 "keeping dimension:x%x\n",
12566 fcf_index
, LPFC_SLI4_FCF_TBL_INDX_MAX
);
12569 /* Set the eligible FCF record index bmask */
12570 set_bit(fcf_index
, phba
->fcf
.fcf_rr_bmask
);
12572 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
12573 "2790 Set FCF (x%x) to roundrobin FCF failover "
12574 "bmask\n", fcf_index
);
12580 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
12581 * @phba: pointer to lpfc hba data structure.
12583 * This routine clears the FCF record index from the eligible bmask for
12584 * roundrobin failover search. It checks to make sure that the index
12585 * does not go beyond the range of the driver allocated bmask dimension
12586 * before clearing the bit.
12589 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba
*phba
, uint16_t fcf_index
)
12591 if (fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
12592 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
12593 "2762 FCF (x%x) reached driver's book "
12594 "keeping dimension:x%x\n",
12595 fcf_index
, LPFC_SLI4_FCF_TBL_INDX_MAX
);
12598 /* Clear the eligible FCF record index bmask */
12599 clear_bit(fcf_index
, phba
->fcf
.fcf_rr_bmask
);
12601 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
12602 "2791 Clear FCF (x%x) from roundrobin failover "
12603 "bmask\n", fcf_index
);
12607 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
12608 * @phba: pointer to lpfc hba data structure.
12610 * This routine is the completion routine for the rediscover FCF table mailbox
12611 * command. If the mailbox command returned failure, it will try to stop the
12612 * FCF rediscover wait timer.
12615 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mbox
)
12617 struct lpfc_mbx_redisc_fcf_tbl
*redisc_fcf
;
12618 uint32_t shdr_status
, shdr_add_status
;
12620 redisc_fcf
= &mbox
->u
.mqe
.un
.redisc_fcf_tbl
;
12622 shdr_status
= bf_get(lpfc_mbox_hdr_status
,
12623 &redisc_fcf
->header
.cfg_shdr
.response
);
12624 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
12625 &redisc_fcf
->header
.cfg_shdr
.response
);
12626 if (shdr_status
|| shdr_add_status
) {
12627 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
12628 "2746 Requesting for FCF rediscovery failed "
12629 "status x%x add_status x%x\n",
12630 shdr_status
, shdr_add_status
);
12631 if (phba
->fcf
.fcf_flag
& FCF_ACVL_DISC
) {
12632 spin_lock_irq(&phba
->hbalock
);
12633 phba
->fcf
.fcf_flag
&= ~FCF_ACVL_DISC
;
12634 spin_unlock_irq(&phba
->hbalock
);
12636 * CVL event triggered FCF rediscover request failed,
12637 * last resort to re-try current registered FCF entry.
12639 lpfc_retry_pport_discovery(phba
);
12641 spin_lock_irq(&phba
->hbalock
);
12642 phba
->fcf
.fcf_flag
&= ~FCF_DEAD_DISC
;
12643 spin_unlock_irq(&phba
->hbalock
);
12645 * DEAD FCF event triggered FCF rediscover request
12646 * failed, last resort to fail over as a link down
12647 * to FCF registration.
12649 lpfc_sli4_fcf_dead_failthrough(phba
);
12652 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
12653 "2775 Start FCF rediscover quiescent timer\n");
12655 * Start FCF rediscovery wait timer for pending FCF
12656 * before rescan FCF record table.
12658 lpfc_fcf_redisc_wait_start_timer(phba
);
12661 mempool_free(mbox
, phba
->mbox_mem_pool
);
12665 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
12666 * @phba: pointer to lpfc hba data structure.
12668 * This routine is invoked to request for rediscovery of the entire FCF table
12672 lpfc_sli4_redisc_fcf_table(struct lpfc_hba
*phba
)
12674 LPFC_MBOXQ_t
*mbox
;
12675 struct lpfc_mbx_redisc_fcf_tbl
*redisc_fcf
;
12678 /* Cancel retry delay timers to all vports before FCF rediscover */
12679 lpfc_cancel_all_vport_retry_delay_timer(phba
);
12681 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12683 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12684 "2745 Failed to allocate mbox for "
12685 "requesting FCF rediscover.\n");
12689 length
= (sizeof(struct lpfc_mbx_redisc_fcf_tbl
) -
12690 sizeof(struct lpfc_sli4_cfg_mhdr
));
12691 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
12692 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF
,
12693 length
, LPFC_SLI4_MBX_EMBED
);
12695 redisc_fcf
= &mbox
->u
.mqe
.un
.redisc_fcf_tbl
;
12696 /* Set count to 0 for invalidating the entire FCF database */
12697 bf_set(lpfc_mbx_redisc_fcf_count
, redisc_fcf
, 0);
12699 /* Issue the mailbox command asynchronously */
12700 mbox
->vport
= phba
->pport
;
12701 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_redisc_fcf_table
;
12702 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
12704 if (rc
== MBX_NOT_FINISHED
) {
12705 mempool_free(mbox
, phba
->mbox_mem_pool
);
12712 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
12713 * @phba: pointer to lpfc hba data structure.
12715 * This function is the failover routine as a last resort to the FCF DEAD
12716 * event when driver failed to perform fast FCF failover.
12719 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba
*phba
)
12721 uint32_t link_state
;
12724 * Last resort as FCF DEAD event failover will treat this as
12725 * a link down, but save the link state because we don't want
12726 * it to be changed to Link Down unless it is already down.
12728 link_state
= phba
->link_state
;
12729 lpfc_linkdown(phba
);
12730 phba
->link_state
= link_state
;
12732 /* Unregister FCF if no devices connected to it */
12733 lpfc_unregister_unused_fcf(phba
);
12737 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
12738 * @phba: pointer to lpfc hba data structure.
12740 * This function read region 23 and parse TLV for port status to
12741 * decide if the user disaled the port. If the TLV indicates the
12742 * port is disabled, the hba_flag is set accordingly.
12745 lpfc_sli_read_link_ste(struct lpfc_hba
*phba
)
12747 LPFC_MBOXQ_t
*pmb
= NULL
;
12749 uint8_t *rgn23_data
= NULL
;
12750 uint32_t offset
= 0, data_size
, sub_tlv_len
, tlv_offset
;
12753 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12755 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12756 "2600 lpfc_sli_read_serdes_param failed to"
12757 " allocate mailbox memory\n");
12762 /* Get adapter Region 23 data */
12763 rgn23_data
= kzalloc(DMP_RGN23_SIZE
, GFP_KERNEL
);
12768 lpfc_dump_mem(phba
, pmb
, offset
, DMP_REGION_23
);
12769 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
12771 if (rc
!= MBX_SUCCESS
) {
12772 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
12773 "2601 lpfc_sli_read_link_ste failed to"
12774 " read config region 23 rc 0x%x Status 0x%x\n",
12775 rc
, mb
->mbxStatus
);
12776 mb
->un
.varDmp
.word_cnt
= 0;
12779 * dump mem may return a zero when finished or we got a
12780 * mailbox error, either way we are done.
12782 if (mb
->un
.varDmp
.word_cnt
== 0)
12784 if (mb
->un
.varDmp
.word_cnt
> DMP_RGN23_SIZE
- offset
)
12785 mb
->un
.varDmp
.word_cnt
= DMP_RGN23_SIZE
- offset
;
12787 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
12788 rgn23_data
+ offset
,
12789 mb
->un
.varDmp
.word_cnt
);
12790 offset
+= mb
->un
.varDmp
.word_cnt
;
12791 } while (mb
->un
.varDmp
.word_cnt
&& offset
< DMP_RGN23_SIZE
);
12793 data_size
= offset
;
12799 /* Check the region signature first */
12800 if (memcmp(&rgn23_data
[offset
], LPFC_REGION23_SIGNATURE
, 4)) {
12801 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12802 "2619 Config region 23 has bad signature\n");
12807 /* Check the data structure version */
12808 if (rgn23_data
[offset
] != LPFC_REGION23_VERSION
) {
12809 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12810 "2620 Config region 23 has bad version\n");
12815 /* Parse TLV entries in the region */
12816 while (offset
< data_size
) {
12817 if (rgn23_data
[offset
] == LPFC_REGION23_LAST_REC
)
12820 * If the TLV is not driver specific TLV or driver id is
12821 * not linux driver id, skip the record.
12823 if ((rgn23_data
[offset
] != DRIVER_SPECIFIC_TYPE
) ||
12824 (rgn23_data
[offset
+ 2] != LINUX_DRIVER_ID
) ||
12825 (rgn23_data
[offset
+ 3] != 0)) {
12826 offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
12830 /* Driver found a driver specific TLV in the config region */
12831 sub_tlv_len
= rgn23_data
[offset
+ 1] * 4;
12836 * Search for configured port state sub-TLV.
12838 while ((offset
< data_size
) &&
12839 (tlv_offset
< sub_tlv_len
)) {
12840 if (rgn23_data
[offset
] == LPFC_REGION23_LAST_REC
) {
12845 if (rgn23_data
[offset
] != PORT_STE_TYPE
) {
12846 offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
12847 tlv_offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
12851 /* This HBA contains PORT_STE configured */
12852 if (!rgn23_data
[offset
+ 2])
12853 phba
->hba_flag
|= LINK_DISABLED
;
12860 mempool_free(pmb
, phba
->mbox_mem_pool
);
12866 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
12867 * @vport: pointer to vport data structure.
12869 * This function iterate through the mailboxq and clean up all REG_LOGIN
12870 * and REG_VPI mailbox commands associated with the vport. This function
12871 * is called when driver want to restart discovery of the vport due to
12872 * a Clear Virtual Link event.
12875 lpfc_cleanup_pending_mbox(struct lpfc_vport
*vport
)
12877 struct lpfc_hba
*phba
= vport
->phba
;
12878 LPFC_MBOXQ_t
*mb
, *nextmb
;
12879 struct lpfc_dmabuf
*mp
;
12880 struct lpfc_nodelist
*ndlp
;
12881 struct lpfc_nodelist
*act_mbx_ndlp
= NULL
;
12882 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
12883 LIST_HEAD(mbox_cmd_list
);
12884 uint8_t restart_loop
;
12886 /* Clean up internally queued mailbox commands with the vport */
12887 spin_lock_irq(&phba
->hbalock
);
12888 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
12889 if (mb
->vport
!= vport
)
12892 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) &&
12893 (mb
->u
.mb
.mbxCommand
!= MBX_REG_VPI
))
12896 list_del(&mb
->list
);
12897 list_add_tail(&mb
->list
, &mbox_cmd_list
);
12899 /* Clean up active mailbox command with the vport */
12900 mb
= phba
->sli
.mbox_active
;
12901 if (mb
&& (mb
->vport
== vport
)) {
12902 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) ||
12903 (mb
->u
.mb
.mbxCommand
== MBX_REG_VPI
))
12904 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
12905 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
12906 act_mbx_ndlp
= (struct lpfc_nodelist
*)mb
->context2
;
12907 /* Put reference count for delayed processing */
12908 act_mbx_ndlp
= lpfc_nlp_get(act_mbx_ndlp
);
12909 /* Unregister the RPI when mailbox complete */
12910 mb
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
12913 /* Cleanup any mailbox completions which are not yet processed */
12916 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
12918 * If this mailox is already processed or it is
12919 * for another vport ignore it.
12921 if ((mb
->vport
!= vport
) ||
12922 (mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
))
12925 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) &&
12926 (mb
->u
.mb
.mbxCommand
!= MBX_REG_VPI
))
12929 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
12930 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
12931 ndlp
= (struct lpfc_nodelist
*)mb
->context2
;
12932 /* Unregister the RPI when mailbox complete */
12933 mb
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
12935 spin_unlock_irq(&phba
->hbalock
);
12936 spin_lock(shost
->host_lock
);
12937 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
12938 spin_unlock(shost
->host_lock
);
12939 spin_lock_irq(&phba
->hbalock
);
12943 } while (restart_loop
);
12945 spin_unlock_irq(&phba
->hbalock
);
12947 /* Release the cleaned-up mailbox commands */
12948 while (!list_empty(&mbox_cmd_list
)) {
12949 list_remove_head(&mbox_cmd_list
, mb
, LPFC_MBOXQ_t
, list
);
12950 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
12951 if (phba
->sli_rev
== LPFC_SLI_REV4
)
12952 __lpfc_sli4_free_rpi(phba
,
12953 mb
->u
.mb
.un
.varRegLogin
.rpi
);
12954 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
12956 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
12959 ndlp
= (struct lpfc_nodelist
*) mb
->context2
;
12960 mb
->context2
= NULL
;
12962 spin_lock(shost
->host_lock
);
12963 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
12964 spin_unlock(shost
->host_lock
);
12965 lpfc_nlp_put(ndlp
);
12968 mempool_free(mb
, phba
->mbox_mem_pool
);
12971 /* Release the ndlp with the cleaned-up active mailbox command */
12972 if (act_mbx_ndlp
) {
12973 spin_lock(shost
->host_lock
);
12974 act_mbx_ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
12975 spin_unlock(shost
->host_lock
);
12976 lpfc_nlp_put(act_mbx_ndlp
);
12981 * lpfc_drain_txq - Drain the txq
12982 * @phba: Pointer to HBA context object.
12984 * This function attempt to submit IOCBs on the txq
12985 * to the adapter. For SLI4 adapters, the txq contains
12986 * ELS IOCBs that have been deferred because the there
12987 * are no SGLs. This congestion can occur with large
12988 * vport counts during node discovery.
12992 lpfc_drain_txq(struct lpfc_hba
*phba
)
12994 LIST_HEAD(completions
);
12995 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
12996 struct lpfc_iocbq
*piocbq
= 0;
12997 unsigned long iflags
= 0;
12998 char *fail_msg
= NULL
;
12999 struct lpfc_sglq
*sglq
;
13000 union lpfc_wqe wqe
;
13002 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13003 if (pring
->txq_cnt
> pring
->txq_max
)
13004 pring
->txq_max
= pring
->txq_cnt
;
13006 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13008 while (pring
->txq_cnt
) {
13009 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13011 sglq
= __lpfc_sli_get_sglq(phba
);
13013 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13016 piocbq
= lpfc_sli_ringtx_get(phba
, pring
);
13018 /* The txq_cnt out of sync. This should
13021 sglq
= __lpfc_clear_active_sglq(phba
,
13022 sglq
->sli4_xritag
);
13023 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13024 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13025 "2823 txq empty and txq_cnt is %d\n ",
13031 /* The xri and iocb resources secured,
13032 * attempt to issue request
13034 piocbq
->sli4_xritag
= sglq
->sli4_xritag
;
13035 if (NO_XRI
== lpfc_sli4_bpl2sgl(phba
, piocbq
, sglq
))
13036 fail_msg
= "to convert bpl to sgl";
13037 else if (lpfc_sli4_iocb2wqe(phba
, piocbq
, &wqe
))
13038 fail_msg
= "to convert iocb to wqe";
13039 else if (lpfc_sli4_wq_put(phba
->sli4_hba
.els_wq
, &wqe
))
13040 fail_msg
= " - Wq is full";
13042 lpfc_sli_ringtxcmpl_put(phba
, pring
, piocbq
);
13045 /* Failed means we can't issue and need to cancel */
13046 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13047 "2822 IOCB failed %s iotag 0x%x "
13050 piocbq
->iotag
, piocbq
->sli4_xritag
);
13051 list_add_tail(&piocbq
->list
, &completions
);
13053 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13056 /* Cancel all the IOCBs that cannot be issued */
13057 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
13058 IOERR_SLI_ABORTED
);
13060 return pring
->txq_cnt
;