1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_transport_fc.h>
28 #include <scsi/scsi_bsg_fc.h>
29 #include <scsi/fc/fc_fs.h>
34 #include "lpfc_sli4.h"
36 #include "lpfc_disc.h"
37 #include "lpfc_scsi.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_crtn.h"
41 #include "lpfc_vport.h"
42 #include "lpfc_version.h"
45 * lpfc_bsg_rport_ct - send a CT command from a bsg request
46 * @job: fc_bsg_job to handle
49 lpfc_bsg_rport_ct(struct fc_bsg_job
*job
)
51 struct Scsi_Host
*shost
= job
->shost
;
52 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
53 struct lpfc_hba
*phba
= vport
->phba
;
54 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
55 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
56 struct ulp_bde64
*bpl
= NULL
;
58 struct lpfc_iocbq
*cmdiocbq
= NULL
;
59 struct lpfc_iocbq
*rspiocbq
= NULL
;
62 struct lpfc_dmabuf
*bmp
= NULL
;
65 struct scatterlist
*sgel
= NULL
;
70 /* in case no data is transferred */
71 job
->reply
->reply_payload_rcv_len
= 0;
73 if (!lpfc_nlp_get(ndlp
)) {
74 job
->reply
->result
= -ENODEV
;
78 if (ndlp
->nlp_flag
& NLP_ELS_SND_MASK
) {
83 spin_lock_irq(shost
->host_lock
);
84 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
87 spin_unlock_irq(shost
->host_lock
);
90 cmd
= &cmdiocbq
->iocb
;
92 rspiocbq
= lpfc_sli_get_iocbq(phba
);
97 spin_unlock_irq(shost
->host_lock
);
99 rsp
= &rspiocbq
->iocb
;
101 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
104 spin_lock_irq(shost
->host_lock
);
108 spin_lock_irq(shost
->host_lock
);
109 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
114 spin_unlock_irq(shost
->host_lock
);
116 INIT_LIST_HEAD(&bmp
->list
);
117 bpl
= (struct ulp_bde64
*) bmp
->virt
;
119 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
120 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
121 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
122 busaddr
= sg_dma_address(sgel
);
123 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
124 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
125 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
126 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
127 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
131 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
132 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
133 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
134 busaddr
= sg_dma_address(sgel
);
135 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
136 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
137 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
138 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
139 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
143 cmd
->un
.genreq64
.bdl
.ulpIoTag32
= 0;
144 cmd
->un
.genreq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
145 cmd
->un
.genreq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
146 cmd
->un
.genreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
147 cmd
->un
.genreq64
.bdl
.bdeSize
=
148 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
149 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CR
;
150 cmd
->un
.genreq64
.w5
.hcsw
.Fctl
= (SI
| LA
);
151 cmd
->un
.genreq64
.w5
.hcsw
.Dfctl
= 0;
152 cmd
->un
.genreq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
153 cmd
->un
.genreq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
154 cmd
->ulpBdeCount
= 1;
156 cmd
->ulpClass
= CLASS3
;
157 cmd
->ulpContext
= ndlp
->nlp_rpi
;
158 cmd
->ulpOwner
= OWN_CHIP
;
159 cmdiocbq
->vport
= phba
->pport
;
160 cmdiocbq
->context1
= NULL
;
161 cmdiocbq
->context2
= NULL
;
162 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
164 timeout
= phba
->fc_ratov
* 2;
165 job
->dd_data
= cmdiocbq
;
167 rc
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
, rspiocbq
,
168 timeout
+ LPFC_DRVR_TIMEOUT
);
170 if (rc
!= IOCB_TIMEDOUT
) {
171 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
172 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
173 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
174 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
177 if (rc
== IOCB_TIMEDOUT
) {
178 lpfc_sli_release_iocbq(phba
, rspiocbq
);
183 if (rc
!= IOCB_SUCCESS
) {
188 if (rsp
->ulpStatus
) {
189 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
190 switch (rsp
->un
.ulpWord
[4] & 0xff) {
191 case IOERR_SEQUENCE_TIMEOUT
:
194 case IOERR_INVALID_RPI
:
204 job
->reply
->reply_payload_rcv_len
=
205 rsp
->un
.genreq64
.bdl
.bdeSize
;
208 spin_lock_irq(shost
->host_lock
);
209 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
213 lpfc_sli_release_iocbq(phba
, rspiocbq
);
215 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
216 spin_unlock_irq(shost
->host_lock
);
220 /* make error code available to userspace */
221 job
->reply
->result
= rc
;
222 /* complete the job back to userspace */
229 * lpfc_bsg_rport_els - send an ELS command from a bsg request
230 * @job: fc_bsg_job to handle
233 lpfc_bsg_rport_els(struct fc_bsg_job
*job
)
235 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
236 struct lpfc_hba
*phba
= vport
->phba
;
237 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
238 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
243 struct lpfc_iocbq
*rspiocbq
;
244 struct lpfc_iocbq
*cmdiocbq
;
247 struct lpfc_dmabuf
*pcmd
;
248 struct lpfc_dmabuf
*prsp
;
249 struct lpfc_dmabuf
*pbuflist
= NULL
;
250 struct ulp_bde64
*bpl
;
254 struct scatterlist
*sgel
= NULL
;
259 /* in case no data is transferred */
260 job
->reply
->reply_payload_rcv_len
= 0;
262 if (!lpfc_nlp_get(ndlp
)) {
267 elscmd
= job
->request
->rqst_data
.r_els
.els_code
;
268 cmdsize
= job
->request_payload
.payload_len
;
269 rspsize
= job
->reply_payload
.payload_len
;
270 rspiocbq
= lpfc_sli_get_iocbq(phba
);
277 rsp
= &rspiocbq
->iocb
;
280 cmdiocbq
= lpfc_prep_els_iocb(phba
->pport
, 1, cmdsize
, 0, ndlp
,
281 ndlp
->nlp_DID
, elscmd
);
284 lpfc_sli_release_iocbq(phba
, rspiocbq
);
288 job
->dd_data
= cmdiocbq
;
289 pcmd
= (struct lpfc_dmabuf
*) cmdiocbq
->context2
;
290 prsp
= (struct lpfc_dmabuf
*) pcmd
->list
.next
;
292 lpfc_mbuf_free(phba
, pcmd
->virt
, pcmd
->phys
);
294 lpfc_mbuf_free(phba
, prsp
->virt
, prsp
->phys
);
296 cmdiocbq
->context2
= NULL
;
298 pbuflist
= (struct lpfc_dmabuf
*) cmdiocbq
->context3
;
299 bpl
= (struct ulp_bde64
*) pbuflist
->virt
;
301 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
302 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
304 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
305 busaddr
= sg_dma_address(sgel
);
306 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
307 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
308 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
309 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
310 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
314 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
315 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
316 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
317 busaddr
= sg_dma_address(sgel
);
318 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
319 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
320 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
321 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
322 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
326 cmdiocbq
->iocb
.un
.elsreq64
.bdl
.bdeSize
=
327 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
328 cmdiocbq
->iocb
.ulpContext
= rpi
;
329 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
330 cmdiocbq
->context1
= NULL
;
331 cmdiocbq
->context2
= NULL
;
333 iocb_status
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
,
334 rspiocbq
, (phba
->fc_ratov
* 2)
335 + LPFC_DRVR_TIMEOUT
);
337 /* release the new ndlp once the iocb completes */
339 if (iocb_status
!= IOCB_TIMEDOUT
) {
340 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
341 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
342 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
343 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
346 if (iocb_status
== IOCB_SUCCESS
) {
347 if (rsp
->ulpStatus
== IOSTAT_SUCCESS
) {
348 job
->reply
->reply_payload_rcv_len
=
349 rsp
->un
.elsreq64
.bdl
.bdeSize
;
351 } else if (rsp
->ulpStatus
== IOSTAT_LS_RJT
) {
352 struct fc_bsg_ctels_reply
*els_reply
;
353 /* LS_RJT data returned in word 4 */
354 uint8_t *rjt_data
= (uint8_t *)&rsp
->un
.ulpWord
[4];
356 els_reply
= &job
->reply
->reply_data
.ctels_reply
;
357 job
->reply
->result
= 0;
358 els_reply
->status
= FC_CTELS_STATUS_REJECT
;
359 els_reply
->rjt_data
.action
= rjt_data
[0];
360 els_reply
->rjt_data
.reason_code
= rjt_data
[1];
361 els_reply
->rjt_data
.reason_explanation
= rjt_data
[2];
362 els_reply
->rjt_data
.vendor_unique
= rjt_data
[3];
368 if (iocb_status
!= IOCB_TIMEDOUT
)
369 lpfc_els_free_iocb(phba
, cmdiocbq
);
371 lpfc_sli_release_iocbq(phba
, rspiocbq
);
374 /* make error code available to userspace */
375 job
->reply
->result
= rc
;
376 /* complete the job back to userspace */
382 struct lpfc_ct_event
{
383 struct list_head node
;
385 wait_queue_head_t wq
;
387 /* Event type and waiter identifiers */
392 /* next two flags are here for the auto-delete logic */
393 unsigned long wait_time_stamp
;
396 /* seen and not seen events */
397 struct list_head events_to_get
;
398 struct list_head events_to_see
;
402 struct list_head node
;
409 static struct lpfc_ct_event
*
410 lpfc_ct_event_new(int ev_reg_id
, uint32_t ev_req_id
)
412 struct lpfc_ct_event
*evt
= kzalloc(sizeof(*evt
), GFP_KERNEL
);
416 INIT_LIST_HEAD(&evt
->events_to_get
);
417 INIT_LIST_HEAD(&evt
->events_to_see
);
418 evt
->req_id
= ev_req_id
;
419 evt
->reg_id
= ev_reg_id
;
420 evt
->wait_time_stamp
= jiffies
;
421 init_waitqueue_head(&evt
->wq
);
427 lpfc_ct_event_free(struct lpfc_ct_event
*evt
)
429 struct event_data
*ed
;
431 list_del(&evt
->node
);
433 while (!list_empty(&evt
->events_to_get
)) {
434 ed
= list_entry(evt
->events_to_get
.next
, typeof(*ed
), node
);
440 while (!list_empty(&evt
->events_to_see
)) {
441 ed
= list_entry(evt
->events_to_see
.next
, typeof(*ed
), node
);
451 lpfc_ct_event_ref(struct lpfc_ct_event
*evt
)
457 lpfc_ct_event_unref(struct lpfc_ct_event
*evt
)
460 lpfc_ct_event_free(evt
);
463 #define SLI_CT_ELX_LOOPBACK 0x10
465 enum ELX_LOOPBACK_CMD
{
466 ELX_LOOPBACK_XRI_SETUP
,
471 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
476 * This function is called when an unsolicited CT command is received. It
477 * forwards the event to any processes registerd to receive CT events.
480 lpfc_bsg_ct_unsol_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
481 struct lpfc_iocbq
*piocbq
)
483 uint32_t evt_req_id
= 0;
486 struct lpfc_dmabuf
*dmabuf
= NULL
;
487 struct lpfc_ct_event
*evt
;
488 struct event_data
*evt_dat
= NULL
;
489 struct lpfc_iocbq
*iocbq
;
491 struct list_head head
;
492 struct ulp_bde64
*bde
;
495 struct lpfc_dmabuf
*bdeBuf1
= piocbq
->context2
;
496 struct lpfc_dmabuf
*bdeBuf2
= piocbq
->context3
;
497 struct lpfc_hbq_entry
*hbqe
;
498 struct lpfc_sli_ct_request
*ct_req
;
500 INIT_LIST_HEAD(&head
);
501 list_add_tail(&head
, &piocbq
->list
);
503 if (piocbq
->iocb
.ulpBdeCount
== 0 ||
504 piocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
== 0)
505 goto error_ct_unsol_exit
;
507 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)
510 dma_addr
= getPaddr(piocbq
->iocb
.un
.cont64
[0].addrHigh
,
511 piocbq
->iocb
.un
.cont64
[0].addrLow
);
512 dmabuf
= lpfc_sli_ringpostbuf_get(phba
, pring
, dma_addr
);
515 ct_req
= (struct lpfc_sli_ct_request
*)dmabuf
->virt
;
516 evt_req_id
= ct_req
->FsType
;
517 cmd
= ct_req
->CommandResponse
.bits
.CmdRsp
;
518 len
= ct_req
->CommandResponse
.bits
.Size
;
519 if (!(phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
))
520 lpfc_sli_ringpostbuf_put(phba
, pring
, dmabuf
);
522 mutex_lock(&phba
->ct_event_mutex
);
523 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
524 if (evt
->req_id
!= evt_req_id
)
527 lpfc_ct_event_ref(evt
);
529 evt_dat
= kzalloc(sizeof(*evt_dat
), GFP_KERNEL
);
531 lpfc_ct_event_unref(evt
);
532 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
533 "2614 Memory allocation failed for "
538 mutex_unlock(&phba
->ct_event_mutex
);
540 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
541 /* take accumulated byte count from the last iocbq */
542 iocbq
= list_entry(head
.prev
, typeof(*iocbq
), list
);
543 evt_dat
->len
= iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
;
545 list_for_each_entry(iocbq
, &head
, list
) {
546 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++)
548 iocbq
->iocb
.un
.cont64
[i
].tus
.f
.bdeSize
;
552 evt_dat
->data
= kzalloc(evt_dat
->len
, GFP_KERNEL
);
553 if (!evt_dat
->data
) {
554 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
555 "2615 Memory allocation failed for "
556 "CT event data, size %d\n",
559 mutex_lock(&phba
->ct_event_mutex
);
560 lpfc_ct_event_unref(evt
);
561 mutex_unlock(&phba
->ct_event_mutex
);
562 goto error_ct_unsol_exit
;
565 list_for_each_entry(iocbq
, &head
, list
) {
566 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
567 bdeBuf1
= iocbq
->context2
;
568 bdeBuf2
= iocbq
->context3
;
570 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++) {
572 if (phba
->sli3_options
&
573 LPFC_SLI3_HBQ_ENABLED
) {
575 hbqe
= (struct lpfc_hbq_entry
*)
576 &iocbq
->iocb
.un
.ulpWord
[0];
577 size
= hbqe
->bde
.tus
.f
.bdeSize
;
580 hbqe
= (struct lpfc_hbq_entry
*)
583 size
= hbqe
->bde
.tus
.f
.bdeSize
;
586 if ((offset
+ size
) > evt_dat
->len
)
587 size
= evt_dat
->len
- offset
;
589 size
= iocbq
->iocb
.un
.cont64
[i
].
591 bde
= &iocbq
->iocb
.un
.cont64
[i
];
592 dma_addr
= getPaddr(bde
->addrHigh
,
594 dmabuf
= lpfc_sli_ringpostbuf_get(phba
,
598 lpfc_printf_log(phba
, KERN_ERR
,
599 LOG_LIBDFC
, "2616 No dmabuf "
600 "found for iocbq 0x%p\n",
602 kfree(evt_dat
->data
);
604 mutex_lock(&phba
->ct_event_mutex
);
605 lpfc_ct_event_unref(evt
);
606 mutex_unlock(&phba
->ct_event_mutex
);
607 goto error_ct_unsol_exit
;
609 memcpy((char *)(evt_dat
->data
) + offset
,
612 if (evt_req_id
!= SLI_CT_ELX_LOOPBACK
&&
613 !(phba
->sli3_options
&
614 LPFC_SLI3_HBQ_ENABLED
)) {
615 lpfc_sli_ringpostbuf_put(phba
, pring
,
619 case ELX_LOOPBACK_XRI_SETUP
:
620 if (!(phba
->sli3_options
&
621 LPFC_SLI3_HBQ_ENABLED
))
622 lpfc_post_buffer(phba
,
626 lpfc_in_buf_free(phba
,
630 if (!(phba
->sli3_options
&
631 LPFC_SLI3_HBQ_ENABLED
))
632 lpfc_post_buffer(phba
,
641 mutex_lock(&phba
->ct_event_mutex
);
642 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
643 evt_dat
->immed_dat
= phba
->ctx_idx
;
644 phba
->ctx_idx
= (phba
->ctx_idx
+ 1) % 64;
645 phba
->ct_ctx
[evt_dat
->immed_dat
].oxid
=
646 piocbq
->iocb
.ulpContext
;
647 phba
->ct_ctx
[evt_dat
->immed_dat
].SID
=
648 piocbq
->iocb
.un
.rcvels
.remoteID
;
650 evt_dat
->immed_dat
= piocbq
->iocb
.ulpContext
;
652 evt_dat
->type
= FC_REG_CT_EVENT
;
653 list_add(&evt_dat
->node
, &evt
->events_to_see
);
654 wake_up_interruptible(&evt
->wq
);
655 lpfc_ct_event_unref(evt
);
656 if (evt_req_id
== SLI_CT_ELX_LOOPBACK
)
659 mutex_unlock(&phba
->ct_event_mutex
);
662 if (!list_empty(&head
))
669 * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
670 * @job: SET_EVENT fc_bsg_job
673 lpfc_bsg_set_event(struct fc_bsg_job
*job
)
675 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
676 struct lpfc_hba
*phba
= vport
->phba
;
677 struct set_ct_event
*event_req
;
678 struct lpfc_ct_event
*evt
;
681 if (job
->request_len
<
682 sizeof(struct fc_bsg_request
) + sizeof(struct set_ct_event
)) {
683 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
684 "2612 Received SET_CT_EVENT below minimum "
689 event_req
= (struct set_ct_event
*)
690 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
692 mutex_lock(&phba
->ct_event_mutex
);
693 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
694 if (evt
->reg_id
== event_req
->ev_reg_id
) {
695 lpfc_ct_event_ref(evt
);
696 evt
->wait_time_stamp
= jiffies
;
700 mutex_unlock(&phba
->ct_event_mutex
);
702 if (&evt
->node
== &phba
->ct_ev_waiters
) {
703 /* no event waiting struct yet - first call */
704 evt
= lpfc_ct_event_new(event_req
->ev_reg_id
,
705 event_req
->ev_req_id
);
707 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
708 "2617 Failed allocation of event "
713 mutex_lock(&phba
->ct_event_mutex
);
714 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
715 lpfc_ct_event_ref(evt
);
716 mutex_unlock(&phba
->ct_event_mutex
);
720 if (wait_event_interruptible(evt
->wq
,
721 !list_empty(&evt
->events_to_see
))) {
722 mutex_lock(&phba
->ct_event_mutex
);
723 lpfc_ct_event_unref(evt
); /* release ref */
724 lpfc_ct_event_unref(evt
); /* delete */
725 mutex_unlock(&phba
->ct_event_mutex
);
730 evt
->wait_time_stamp
= jiffies
;
733 mutex_lock(&phba
->ct_event_mutex
);
734 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
735 lpfc_ct_event_unref(evt
); /* release ref */
736 mutex_unlock(&phba
->ct_event_mutex
);
739 /* set_event carries no reply payload */
740 job
->reply
->reply_payload_rcv_len
= 0;
741 /* make error code available to userspace */
742 job
->reply
->result
= rc
;
743 /* complete the job back to userspace */
750 * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
751 * @job: GET_EVENT fc_bsg_job
754 lpfc_bsg_get_event(struct fc_bsg_job
*job
)
756 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
757 struct lpfc_hba
*phba
= vport
->phba
;
758 struct get_ct_event
*event_req
;
759 struct get_ct_event_reply
*event_reply
;
760 struct lpfc_ct_event
*evt
;
761 struct event_data
*evt_dat
= NULL
;
764 if (job
->request_len
<
765 sizeof(struct fc_bsg_request
) + sizeof(struct get_ct_event
)) {
766 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
767 "2613 Received GET_CT_EVENT request below "
772 event_req
= (struct get_ct_event
*)
773 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
775 event_reply
= (struct get_ct_event_reply
*)
776 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
778 mutex_lock(&phba
->ct_event_mutex
);
779 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
780 if (evt
->reg_id
== event_req
->ev_reg_id
) {
781 if (list_empty(&evt
->events_to_get
))
783 lpfc_ct_event_ref(evt
);
784 evt
->wait_time_stamp
= jiffies
;
785 evt_dat
= list_entry(evt
->events_to_get
.prev
,
786 struct event_data
, node
);
787 list_del(&evt_dat
->node
);
791 mutex_unlock(&phba
->ct_event_mutex
);
794 job
->reply
->reply_payload_rcv_len
= 0;
796 goto error_get_event_exit
;
799 if (evt_dat
->len
> job
->reply_payload
.payload_len
) {
800 evt_dat
->len
= job
->reply_payload
.payload_len
;
801 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
802 "2618 Truncated event data at %d "
804 job
->reply_payload
.payload_len
);
807 event_reply
->immed_data
= evt_dat
->immed_dat
;
809 if (evt_dat
->len
> 0)
810 job
->reply
->reply_payload_rcv_len
=
811 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
812 job
->reply_payload
.sg_cnt
,
813 evt_dat
->data
, evt_dat
->len
);
815 job
->reply
->reply_payload_rcv_len
= 0;
819 kfree(evt_dat
->data
);
821 mutex_lock(&phba
->ct_event_mutex
);
822 lpfc_ct_event_unref(evt
);
823 mutex_unlock(&phba
->ct_event_mutex
);
825 error_get_event_exit
:
826 /* make error code available to userspace */
827 job
->reply
->result
= rc
;
828 /* complete the job back to userspace */
835 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
836 * @job: fc_bsg_job to handle
839 lpfc_bsg_hst_vendor(struct fc_bsg_job
*job
)
841 int command
= job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0];
844 case LPFC_BSG_VENDOR_SET_CT_EVENT
:
845 return lpfc_bsg_set_event(job
);
848 case LPFC_BSG_VENDOR_GET_CT_EVENT
:
849 return lpfc_bsg_get_event(job
);
858 * lpfc_bsg_request - handle a bsg request from the FC transport
859 * @job: fc_bsg_job to handle
862 lpfc_bsg_request(struct fc_bsg_job
*job
)
867 msgcode
= job
->request
->msgcode
;
870 case FC_BSG_HST_VENDOR
:
871 rc
= lpfc_bsg_hst_vendor(job
);
874 rc
= lpfc_bsg_rport_els(job
);
877 rc
= lpfc_bsg_rport_ct(job
);
887 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
888 * @job: fc_bsg_job that has timed out
890 * This function just aborts the job's IOCB. The aborted IOCB will return to
891 * the waiting function which will handle passing the error back to userspace
894 lpfc_bsg_timeout(struct fc_bsg_job
*job
)
896 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
897 struct lpfc_hba
*phba
= vport
->phba
;
898 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*)job
->dd_data
;
899 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
902 lpfc_sli_issue_abort_iotag(phba
, pring
, cmdiocb
);