1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_transport_fc.h>
28 #include <scsi/scsi_bsg_fc.h>
29 #include <scsi/fc/fc_fs.h>
34 #include "lpfc_sli4.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_version.h"
46 * lpfc_bsg_rport_ct - send a CT command from a bsg request
47 * @job: fc_bsg_job to handle
50 lpfc_bsg_rport_ct(struct fc_bsg_job
*job
)
52 struct Scsi_Host
*shost
= job
->shost
;
53 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
54 struct lpfc_hba
*phba
= vport
->phba
;
55 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
56 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
57 struct ulp_bde64
*bpl
= NULL
;
59 struct lpfc_iocbq
*cmdiocbq
= NULL
;
60 struct lpfc_iocbq
*rspiocbq
= NULL
;
63 struct lpfc_dmabuf
*bmp
= NULL
;
66 struct scatterlist
*sgel
= NULL
;
71 /* in case no data is transferred */
72 job
->reply
->reply_payload_rcv_len
= 0;
74 if (!lpfc_nlp_get(ndlp
)) {
75 job
->reply
->result
= -ENODEV
;
79 if (ndlp
->nlp_flag
& NLP_ELS_SND_MASK
) {
84 spin_lock_irq(shost
->host_lock
);
85 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
88 spin_unlock_irq(shost
->host_lock
);
91 cmd
= &cmdiocbq
->iocb
;
93 rspiocbq
= lpfc_sli_get_iocbq(phba
);
98 spin_unlock_irq(shost
->host_lock
);
100 rsp
= &rspiocbq
->iocb
;
102 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
105 spin_lock_irq(shost
->host_lock
);
109 spin_lock_irq(shost
->host_lock
);
110 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
115 spin_unlock_irq(shost
->host_lock
);
117 INIT_LIST_HEAD(&bmp
->list
);
118 bpl
= (struct ulp_bde64
*) bmp
->virt
;
120 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
121 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
122 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
123 busaddr
= sg_dma_address(sgel
);
124 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
125 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
126 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
127 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
128 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
132 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
133 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
134 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
135 busaddr
= sg_dma_address(sgel
);
136 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
137 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
138 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
139 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
140 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
144 cmd
->un
.genreq64
.bdl
.ulpIoTag32
= 0;
145 cmd
->un
.genreq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
146 cmd
->un
.genreq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
147 cmd
->un
.genreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
148 cmd
->un
.genreq64
.bdl
.bdeSize
=
149 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
150 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CR
;
151 cmd
->un
.genreq64
.w5
.hcsw
.Fctl
= (SI
| LA
);
152 cmd
->un
.genreq64
.w5
.hcsw
.Dfctl
= 0;
153 cmd
->un
.genreq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
154 cmd
->un
.genreq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
155 cmd
->ulpBdeCount
= 1;
157 cmd
->ulpClass
= CLASS3
;
158 cmd
->ulpContext
= ndlp
->nlp_rpi
;
159 cmd
->ulpOwner
= OWN_CHIP
;
160 cmdiocbq
->vport
= phba
->pport
;
161 cmdiocbq
->context1
= NULL
;
162 cmdiocbq
->context2
= NULL
;
163 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
165 timeout
= phba
->fc_ratov
* 2;
166 job
->dd_data
= cmdiocbq
;
168 rc
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
, rspiocbq
,
169 timeout
+ LPFC_DRVR_TIMEOUT
);
171 if (rc
!= IOCB_TIMEDOUT
) {
172 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
173 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
174 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
175 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
178 if (rc
== IOCB_TIMEDOUT
) {
179 lpfc_sli_release_iocbq(phba
, rspiocbq
);
184 if (rc
!= IOCB_SUCCESS
) {
189 if (rsp
->ulpStatus
) {
190 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
191 switch (rsp
->un
.ulpWord
[4] & 0xff) {
192 case IOERR_SEQUENCE_TIMEOUT
:
195 case IOERR_INVALID_RPI
:
205 job
->reply
->reply_payload_rcv_len
=
206 rsp
->un
.genreq64
.bdl
.bdeSize
;
209 spin_lock_irq(shost
->host_lock
);
210 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
214 lpfc_sli_release_iocbq(phba
, rspiocbq
);
216 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
217 spin_unlock_irq(shost
->host_lock
);
221 /* make error code available to userspace */
222 job
->reply
->result
= rc
;
223 /* complete the job back to userspace */
230 * lpfc_bsg_rport_els - send an ELS command from a bsg request
231 * @job: fc_bsg_job to handle
234 lpfc_bsg_rport_els(struct fc_bsg_job
*job
)
236 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
237 struct lpfc_hba
*phba
= vport
->phba
;
238 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
239 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
244 struct lpfc_iocbq
*rspiocbq
;
245 struct lpfc_iocbq
*cmdiocbq
;
248 struct lpfc_dmabuf
*pcmd
;
249 struct lpfc_dmabuf
*prsp
;
250 struct lpfc_dmabuf
*pbuflist
= NULL
;
251 struct ulp_bde64
*bpl
;
255 struct scatterlist
*sgel
= NULL
;
260 /* in case no data is transferred */
261 job
->reply
->reply_payload_rcv_len
= 0;
263 if (!lpfc_nlp_get(ndlp
)) {
268 elscmd
= job
->request
->rqst_data
.r_els
.els_code
;
269 cmdsize
= job
->request_payload
.payload_len
;
270 rspsize
= job
->reply_payload
.payload_len
;
271 rspiocbq
= lpfc_sli_get_iocbq(phba
);
278 rsp
= &rspiocbq
->iocb
;
281 cmdiocbq
= lpfc_prep_els_iocb(phba
->pport
, 1, cmdsize
, 0, ndlp
,
282 ndlp
->nlp_DID
, elscmd
);
285 lpfc_sli_release_iocbq(phba
, rspiocbq
);
289 job
->dd_data
= cmdiocbq
;
290 pcmd
= (struct lpfc_dmabuf
*) cmdiocbq
->context2
;
291 prsp
= (struct lpfc_dmabuf
*) pcmd
->list
.next
;
293 lpfc_mbuf_free(phba
, pcmd
->virt
, pcmd
->phys
);
295 lpfc_mbuf_free(phba
, prsp
->virt
, prsp
->phys
);
297 cmdiocbq
->context2
= NULL
;
299 pbuflist
= (struct lpfc_dmabuf
*) cmdiocbq
->context3
;
300 bpl
= (struct ulp_bde64
*) pbuflist
->virt
;
302 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
303 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
305 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
306 busaddr
= sg_dma_address(sgel
);
307 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
308 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
309 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
310 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
311 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
315 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
316 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
317 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
318 busaddr
= sg_dma_address(sgel
);
319 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
320 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
321 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
322 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
323 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
327 cmdiocbq
->iocb
.un
.elsreq64
.bdl
.bdeSize
=
328 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
329 cmdiocbq
->iocb
.ulpContext
= rpi
;
330 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
331 cmdiocbq
->context1
= NULL
;
332 cmdiocbq
->context2
= NULL
;
334 iocb_status
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
,
335 rspiocbq
, (phba
->fc_ratov
* 2)
336 + LPFC_DRVR_TIMEOUT
);
338 /* release the new ndlp once the iocb completes */
340 if (iocb_status
!= IOCB_TIMEDOUT
) {
341 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
342 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
343 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
344 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
347 if (iocb_status
== IOCB_SUCCESS
) {
348 if (rsp
->ulpStatus
== IOSTAT_SUCCESS
) {
349 job
->reply
->reply_payload_rcv_len
=
350 rsp
->un
.elsreq64
.bdl
.bdeSize
;
352 } else if (rsp
->ulpStatus
== IOSTAT_LS_RJT
) {
353 struct fc_bsg_ctels_reply
*els_reply
;
354 /* LS_RJT data returned in word 4 */
355 uint8_t *rjt_data
= (uint8_t *)&rsp
->un
.ulpWord
[4];
357 els_reply
= &job
->reply
->reply_data
.ctels_reply
;
358 job
->reply
->result
= 0;
359 els_reply
->status
= FC_CTELS_STATUS_REJECT
;
360 els_reply
->rjt_data
.action
= rjt_data
[0];
361 els_reply
->rjt_data
.reason_code
= rjt_data
[1];
362 els_reply
->rjt_data
.reason_explanation
= rjt_data
[2];
363 els_reply
->rjt_data
.vendor_unique
= rjt_data
[3];
369 if (iocb_status
!= IOCB_TIMEDOUT
)
370 lpfc_els_free_iocb(phba
, cmdiocbq
);
372 lpfc_sli_release_iocbq(phba
, rspiocbq
);
375 /* make error code available to userspace */
376 job
->reply
->result
= rc
;
377 /* complete the job back to userspace */
383 struct lpfc_ct_event
{
384 struct list_head node
;
386 wait_queue_head_t wq
;
388 /* Event type and waiter identifiers */
393 /* next two flags are here for the auto-delete logic */
394 unsigned long wait_time_stamp
;
397 /* seen and not seen events */
398 struct list_head events_to_get
;
399 struct list_head events_to_see
;
403 struct list_head node
;
410 static struct lpfc_ct_event
*
411 lpfc_ct_event_new(int ev_reg_id
, uint32_t ev_req_id
)
413 struct lpfc_ct_event
*evt
= kzalloc(sizeof(*evt
), GFP_KERNEL
);
417 INIT_LIST_HEAD(&evt
->events_to_get
);
418 INIT_LIST_HEAD(&evt
->events_to_see
);
419 evt
->req_id
= ev_req_id
;
420 evt
->reg_id
= ev_reg_id
;
421 evt
->wait_time_stamp
= jiffies
;
422 init_waitqueue_head(&evt
->wq
);
428 lpfc_ct_event_free(struct lpfc_ct_event
*evt
)
430 struct event_data
*ed
;
432 list_del(&evt
->node
);
434 while (!list_empty(&evt
->events_to_get
)) {
435 ed
= list_entry(evt
->events_to_get
.next
, typeof(*ed
), node
);
441 while (!list_empty(&evt
->events_to_see
)) {
442 ed
= list_entry(evt
->events_to_see
.next
, typeof(*ed
), node
);
452 lpfc_ct_event_ref(struct lpfc_ct_event
*evt
)
458 lpfc_ct_event_unref(struct lpfc_ct_event
*evt
)
461 lpfc_ct_event_free(evt
);
464 #define SLI_CT_ELX_LOOPBACK 0x10
466 enum ELX_LOOPBACK_CMD
{
467 ELX_LOOPBACK_XRI_SETUP
,
472 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
477 * This function is called when an unsolicited CT command is received. It
478 * forwards the event to any processes registerd to receive CT events.
481 lpfc_bsg_ct_unsol_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
482 struct lpfc_iocbq
*piocbq
)
484 uint32_t evt_req_id
= 0;
487 struct lpfc_dmabuf
*dmabuf
= NULL
;
488 struct lpfc_ct_event
*evt
;
489 struct event_data
*evt_dat
= NULL
;
490 struct lpfc_iocbq
*iocbq
;
492 struct list_head head
;
493 struct ulp_bde64
*bde
;
496 struct lpfc_dmabuf
*bdeBuf1
= piocbq
->context2
;
497 struct lpfc_dmabuf
*bdeBuf2
= piocbq
->context3
;
498 struct lpfc_hbq_entry
*hbqe
;
499 struct lpfc_sli_ct_request
*ct_req
;
502 INIT_LIST_HEAD(&head
);
503 list_add_tail(&head
, &piocbq
->list
);
505 if (piocbq
->iocb
.ulpBdeCount
== 0 ||
506 piocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
== 0)
507 goto error_ct_unsol_exit
;
509 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)
512 dma_addr
= getPaddr(piocbq
->iocb
.un
.cont64
[0].addrHigh
,
513 piocbq
->iocb
.un
.cont64
[0].addrLow
);
514 dmabuf
= lpfc_sli_ringpostbuf_get(phba
, pring
, dma_addr
);
517 ct_req
= (struct lpfc_sli_ct_request
*)dmabuf
->virt
;
518 evt_req_id
= ct_req
->FsType
;
519 cmd
= ct_req
->CommandResponse
.bits
.CmdRsp
;
520 len
= ct_req
->CommandResponse
.bits
.Size
;
521 if (!(phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
))
522 lpfc_sli_ringpostbuf_put(phba
, pring
, dmabuf
);
524 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
525 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
526 if (evt
->req_id
!= evt_req_id
)
529 lpfc_ct_event_ref(evt
);
531 evt_dat
= kzalloc(sizeof(*evt_dat
), GFP_KERNEL
);
533 lpfc_ct_event_unref(evt
);
534 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
535 "2614 Memory allocation failed for "
540 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
542 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
543 /* take accumulated byte count from the last iocbq */
544 iocbq
= list_entry(head
.prev
, typeof(*iocbq
), list
);
545 evt_dat
->len
= iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
;
547 list_for_each_entry(iocbq
, &head
, list
) {
548 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++)
550 iocbq
->iocb
.un
.cont64
[i
].tus
.f
.bdeSize
;
554 evt_dat
->data
= kzalloc(evt_dat
->len
, GFP_KERNEL
);
555 if (!evt_dat
->data
) {
556 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
557 "2615 Memory allocation failed for "
558 "CT event data, size %d\n",
561 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
562 lpfc_ct_event_unref(evt
);
563 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
564 goto error_ct_unsol_exit
;
567 list_for_each_entry(iocbq
, &head
, list
) {
568 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
569 bdeBuf1
= iocbq
->context2
;
570 bdeBuf2
= iocbq
->context3
;
572 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++) {
574 if (phba
->sli3_options
&
575 LPFC_SLI3_HBQ_ENABLED
) {
577 hbqe
= (struct lpfc_hbq_entry
*)
578 &iocbq
->iocb
.un
.ulpWord
[0];
579 size
= hbqe
->bde
.tus
.f
.bdeSize
;
582 hbqe
= (struct lpfc_hbq_entry
*)
585 size
= hbqe
->bde
.tus
.f
.bdeSize
;
588 if ((offset
+ size
) > evt_dat
->len
)
589 size
= evt_dat
->len
- offset
;
591 size
= iocbq
->iocb
.un
.cont64
[i
].
593 bde
= &iocbq
->iocb
.un
.cont64
[i
];
594 dma_addr
= getPaddr(bde
->addrHigh
,
596 dmabuf
= lpfc_sli_ringpostbuf_get(phba
,
600 lpfc_printf_log(phba
, KERN_ERR
,
601 LOG_LIBDFC
, "2616 No dmabuf "
602 "found for iocbq 0x%p\n",
604 kfree(evt_dat
->data
);
606 spin_lock_irqsave(&phba
->ct_ev_lock
,
608 lpfc_ct_event_unref(evt
);
609 spin_unlock_irqrestore(
610 &phba
->ct_ev_lock
, flags
);
611 goto error_ct_unsol_exit
;
613 memcpy((char *)(evt_dat
->data
) + offset
,
616 if (evt_req_id
!= SLI_CT_ELX_LOOPBACK
&&
617 !(phba
->sli3_options
&
618 LPFC_SLI3_HBQ_ENABLED
)) {
619 lpfc_sli_ringpostbuf_put(phba
, pring
,
623 case ELX_LOOPBACK_XRI_SETUP
:
624 if (!(phba
->sli3_options
&
625 LPFC_SLI3_HBQ_ENABLED
))
626 lpfc_post_buffer(phba
,
630 lpfc_in_buf_free(phba
,
634 if (!(phba
->sli3_options
&
635 LPFC_SLI3_HBQ_ENABLED
))
636 lpfc_post_buffer(phba
,
645 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
646 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
647 evt_dat
->immed_dat
= phba
->ctx_idx
;
648 phba
->ctx_idx
= (phba
->ctx_idx
+ 1) % 64;
649 phba
->ct_ctx
[evt_dat
->immed_dat
].oxid
=
650 piocbq
->iocb
.ulpContext
;
651 phba
->ct_ctx
[evt_dat
->immed_dat
].SID
=
652 piocbq
->iocb
.un
.rcvels
.remoteID
;
654 evt_dat
->immed_dat
= piocbq
->iocb
.ulpContext
;
656 evt_dat
->type
= FC_REG_CT_EVENT
;
657 list_add(&evt_dat
->node
, &evt
->events_to_see
);
658 wake_up_interruptible(&evt
->wq
);
659 lpfc_ct_event_unref(evt
);
660 if (evt_req_id
== SLI_CT_ELX_LOOPBACK
)
663 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
666 if (!list_empty(&head
))
673 * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
674 * @job: SET_EVENT fc_bsg_job
677 lpfc_bsg_set_event(struct fc_bsg_job
*job
)
679 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
680 struct lpfc_hba
*phba
= vport
->phba
;
681 struct set_ct_event
*event_req
;
682 struct lpfc_ct_event
*evt
;
686 if (job
->request_len
<
687 sizeof(struct fc_bsg_request
) + sizeof(struct set_ct_event
)) {
688 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
689 "2612 Received SET_CT_EVENT below minimum "
694 event_req
= (struct set_ct_event
*)
695 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
697 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
698 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
699 if (evt
->reg_id
== event_req
->ev_reg_id
) {
700 lpfc_ct_event_ref(evt
);
701 evt
->wait_time_stamp
= jiffies
;
705 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
707 if (&evt
->node
== &phba
->ct_ev_waiters
) {
708 /* no event waiting struct yet - first call */
709 evt
= lpfc_ct_event_new(event_req
->ev_reg_id
,
710 event_req
->ev_req_id
);
712 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
713 "2617 Failed allocation of event "
718 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
719 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
720 lpfc_ct_event_ref(evt
);
721 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
725 if (wait_event_interruptible(evt
->wq
,
726 !list_empty(&evt
->events_to_see
))) {
727 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
728 lpfc_ct_event_unref(evt
); /* release ref */
729 lpfc_ct_event_unref(evt
); /* delete */
730 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
735 evt
->wait_time_stamp
= jiffies
;
738 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
739 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
740 lpfc_ct_event_unref(evt
); /* release ref */
741 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
744 /* set_event carries no reply payload */
745 job
->reply
->reply_payload_rcv_len
= 0;
746 /* make error code available to userspace */
747 job
->reply
->result
= rc
;
748 /* complete the job back to userspace */
755 * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
756 * @job: GET_EVENT fc_bsg_job
759 lpfc_bsg_get_event(struct fc_bsg_job
*job
)
761 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
762 struct lpfc_hba
*phba
= vport
->phba
;
763 struct get_ct_event
*event_req
;
764 struct get_ct_event_reply
*event_reply
;
765 struct lpfc_ct_event
*evt
;
766 struct event_data
*evt_dat
= NULL
;
770 if (job
->request_len
<
771 sizeof(struct fc_bsg_request
) + sizeof(struct get_ct_event
)) {
772 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
773 "2613 Received GET_CT_EVENT request below "
778 event_req
= (struct get_ct_event
*)
779 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
781 event_reply
= (struct get_ct_event_reply
*)
782 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
784 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
785 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
786 if (evt
->reg_id
== event_req
->ev_reg_id
) {
787 if (list_empty(&evt
->events_to_get
))
789 lpfc_ct_event_ref(evt
);
790 evt
->wait_time_stamp
= jiffies
;
791 evt_dat
= list_entry(evt
->events_to_get
.prev
,
792 struct event_data
, node
);
793 list_del(&evt_dat
->node
);
797 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
800 job
->reply
->reply_payload_rcv_len
= 0;
802 goto error_get_event_exit
;
805 if (evt_dat
->len
> job
->reply_payload
.payload_len
) {
806 evt_dat
->len
= job
->reply_payload
.payload_len
;
807 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
808 "2618 Truncated event data at %d "
810 job
->reply_payload
.payload_len
);
813 event_reply
->immed_data
= evt_dat
->immed_dat
;
815 if (evt_dat
->len
> 0)
816 job
->reply
->reply_payload_rcv_len
=
817 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
818 job
->reply_payload
.sg_cnt
,
819 evt_dat
->data
, evt_dat
->len
);
821 job
->reply
->reply_payload_rcv_len
= 0;
825 kfree(evt_dat
->data
);
827 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
828 lpfc_ct_event_unref(evt
);
829 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
831 error_get_event_exit
:
832 /* make error code available to userspace */
833 job
->reply
->result
= rc
;
834 /* complete the job back to userspace */
841 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
842 * @job: fc_bsg_job to handle
845 lpfc_bsg_hst_vendor(struct fc_bsg_job
*job
)
847 int command
= job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0];
850 case LPFC_BSG_VENDOR_SET_CT_EVENT
:
851 return lpfc_bsg_set_event(job
);
854 case LPFC_BSG_VENDOR_GET_CT_EVENT
:
855 return lpfc_bsg_get_event(job
);
864 * lpfc_bsg_request - handle a bsg request from the FC transport
865 * @job: fc_bsg_job to handle
868 lpfc_bsg_request(struct fc_bsg_job
*job
)
873 msgcode
= job
->request
->msgcode
;
876 case FC_BSG_HST_VENDOR
:
877 rc
= lpfc_bsg_hst_vendor(job
);
880 rc
= lpfc_bsg_rport_els(job
);
883 rc
= lpfc_bsg_rport_ct(job
);
893 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
894 * @job: fc_bsg_job that has timed out
896 * This function just aborts the job's IOCB. The aborted IOCB will return to
897 * the waiting function which will handle passing the error back to userspace
900 lpfc_bsg_timeout(struct fc_bsg_job
*job
)
902 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
903 struct lpfc_hba
*phba
= vport
->phba
;
904 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*)job
->dd_data
;
905 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
908 lpfc_sli_issue_abort_iotag(phba
, pring
, cmdiocb
);