[SCSI] lpfc 8.3.8: (BSG1) Update BSG infrastructure
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / lpfc / lpfc_bsg.c
blobdfb1f73252a18bcb3cf687346adaa8bfe23b9212
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_transport_fc.h>
28 #include <scsi/scsi_bsg_fc.h>
29 #include <scsi/fc/fc_fs.h>
31 #include "lpfc_hw4.h"
32 #include "lpfc_hw.h"
33 #include "lpfc_sli.h"
34 #include "lpfc_sli4.h"
35 #include "lpfc_nl.h"
36 #include "lpfc_bsg.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_version.h"
45 /**
46 * lpfc_bsg_rport_ct - send a CT command from a bsg request
47 * @job: fc_bsg_job to handle
49 static int
50 lpfc_bsg_rport_ct(struct fc_bsg_job *job)
52 struct Scsi_Host *shost = job->shost;
53 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
54 struct lpfc_hba *phba = vport->phba;
55 struct lpfc_rport_data *rdata = job->rport->dd_data;
56 struct lpfc_nodelist *ndlp = rdata->pnode;
57 struct ulp_bde64 *bpl = NULL;
58 uint32_t timeout;
59 struct lpfc_iocbq *cmdiocbq = NULL;
60 struct lpfc_iocbq *rspiocbq = NULL;
61 IOCB_t *cmd;
62 IOCB_t *rsp;
63 struct lpfc_dmabuf *bmp = NULL;
64 int request_nseg;
65 int reply_nseg;
66 struct scatterlist *sgel = NULL;
67 int numbde;
68 dma_addr_t busaddr;
69 int rc = 0;
71 /* in case no data is transferred */
72 job->reply->reply_payload_rcv_len = 0;
74 if (!lpfc_nlp_get(ndlp)) {
75 job->reply->result = -ENODEV;
76 return 0;
79 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
80 rc = -ENODEV;
81 goto free_ndlp_exit;
84 spin_lock_irq(shost->host_lock);
85 cmdiocbq = lpfc_sli_get_iocbq(phba);
86 if (!cmdiocbq) {
87 rc = -ENOMEM;
88 spin_unlock_irq(shost->host_lock);
89 goto free_ndlp_exit;
91 cmd = &cmdiocbq->iocb;
93 rspiocbq = lpfc_sli_get_iocbq(phba);
94 if (!rspiocbq) {
95 rc = -ENOMEM;
96 goto free_cmdiocbq;
98 spin_unlock_irq(shost->host_lock);
100 rsp = &rspiocbq->iocb;
102 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
103 if (!bmp) {
104 rc = -ENOMEM;
105 spin_lock_irq(shost->host_lock);
106 goto free_rspiocbq;
109 spin_lock_irq(shost->host_lock);
110 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
111 if (!bmp->virt) {
112 rc = -ENOMEM;
113 goto free_bmp;
115 spin_unlock_irq(shost->host_lock);
117 INIT_LIST_HEAD(&bmp->list);
118 bpl = (struct ulp_bde64 *) bmp->virt;
120 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
121 job->request_payload.sg_cnt, DMA_TO_DEVICE);
122 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
123 busaddr = sg_dma_address(sgel);
124 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
125 bpl->tus.f.bdeSize = sg_dma_len(sgel);
126 bpl->tus.w = cpu_to_le32(bpl->tus.w);
127 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
128 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
129 bpl++;
132 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
133 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
134 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
135 busaddr = sg_dma_address(sgel);
136 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
137 bpl->tus.f.bdeSize = sg_dma_len(sgel);
138 bpl->tus.w = cpu_to_le32(bpl->tus.w);
139 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
140 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
141 bpl++;
144 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
145 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
146 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
147 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
148 cmd->un.genreq64.bdl.bdeSize =
149 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
150 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
151 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
152 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
153 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
154 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
155 cmd->ulpBdeCount = 1;
156 cmd->ulpLe = 1;
157 cmd->ulpClass = CLASS3;
158 cmd->ulpContext = ndlp->nlp_rpi;
159 cmd->ulpOwner = OWN_CHIP;
160 cmdiocbq->vport = phba->pport;
161 cmdiocbq->context1 = NULL;
162 cmdiocbq->context2 = NULL;
163 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
165 timeout = phba->fc_ratov * 2;
166 job->dd_data = cmdiocbq;
168 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
169 timeout + LPFC_DRVR_TIMEOUT);
171 if (rc != IOCB_TIMEDOUT) {
172 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
173 job->request_payload.sg_cnt, DMA_TO_DEVICE);
174 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
175 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
178 if (rc == IOCB_TIMEDOUT) {
179 lpfc_sli_release_iocbq(phba, rspiocbq);
180 rc = -EACCES;
181 goto free_ndlp_exit;
184 if (rc != IOCB_SUCCESS) {
185 rc = -EACCES;
186 goto free_outdmp;
189 if (rsp->ulpStatus) {
190 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
191 switch (rsp->un.ulpWord[4] & 0xff) {
192 case IOERR_SEQUENCE_TIMEOUT:
193 rc = -ETIMEDOUT;
194 break;
195 case IOERR_INVALID_RPI:
196 rc = -EFAULT;
197 break;
198 default:
199 rc = -EACCES;
200 break;
202 goto free_outdmp;
204 } else
205 job->reply->reply_payload_rcv_len =
206 rsp->un.genreq64.bdl.bdeSize;
208 free_outdmp:
209 spin_lock_irq(shost->host_lock);
210 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
211 free_bmp:
212 kfree(bmp);
213 free_rspiocbq:
214 lpfc_sli_release_iocbq(phba, rspiocbq);
215 free_cmdiocbq:
216 lpfc_sli_release_iocbq(phba, cmdiocbq);
217 spin_unlock_irq(shost->host_lock);
218 free_ndlp_exit:
219 lpfc_nlp_put(ndlp);
221 /* make error code available to userspace */
222 job->reply->result = rc;
223 /* complete the job back to userspace */
224 job->job_done(job);
226 return 0;
230 * lpfc_bsg_rport_els - send an ELS command from a bsg request
231 * @job: fc_bsg_job to handle
233 static int
234 lpfc_bsg_rport_els(struct fc_bsg_job *job)
236 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
237 struct lpfc_hba *phba = vport->phba;
238 struct lpfc_rport_data *rdata = job->rport->dd_data;
239 struct lpfc_nodelist *ndlp = rdata->pnode;
241 uint32_t elscmd;
242 uint32_t cmdsize;
243 uint32_t rspsize;
244 struct lpfc_iocbq *rspiocbq;
245 struct lpfc_iocbq *cmdiocbq;
246 IOCB_t *rsp;
247 uint16_t rpi = 0;
248 struct lpfc_dmabuf *pcmd;
249 struct lpfc_dmabuf *prsp;
250 struct lpfc_dmabuf *pbuflist = NULL;
251 struct ulp_bde64 *bpl;
252 int iocb_status;
253 int request_nseg;
254 int reply_nseg;
255 struct scatterlist *sgel = NULL;
256 int numbde;
257 dma_addr_t busaddr;
258 int rc = 0;
260 /* in case no data is transferred */
261 job->reply->reply_payload_rcv_len = 0;
263 if (!lpfc_nlp_get(ndlp)) {
264 rc = -ENODEV;
265 goto out;
268 elscmd = job->request->rqst_data.r_els.els_code;
269 cmdsize = job->request_payload.payload_len;
270 rspsize = job->reply_payload.payload_len;
271 rspiocbq = lpfc_sli_get_iocbq(phba);
272 if (!rspiocbq) {
273 lpfc_nlp_put(ndlp);
274 rc = -ENOMEM;
275 goto out;
278 rsp = &rspiocbq->iocb;
279 rpi = ndlp->nlp_rpi;
281 cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp,
282 ndlp->nlp_DID, elscmd);
284 if (!cmdiocbq) {
285 lpfc_sli_release_iocbq(phba, rspiocbq);
286 return -EIO;
289 job->dd_data = cmdiocbq;
290 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
291 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
293 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
294 kfree(pcmd);
295 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
296 kfree(prsp);
297 cmdiocbq->context2 = NULL;
299 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
300 bpl = (struct ulp_bde64 *) pbuflist->virt;
302 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
303 job->request_payload.sg_cnt, DMA_TO_DEVICE);
305 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
306 busaddr = sg_dma_address(sgel);
307 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
308 bpl->tus.f.bdeSize = sg_dma_len(sgel);
309 bpl->tus.w = cpu_to_le32(bpl->tus.w);
310 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
311 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
312 bpl++;
315 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
316 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
317 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
318 busaddr = sg_dma_address(sgel);
319 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
320 bpl->tus.f.bdeSize = sg_dma_len(sgel);
321 bpl->tus.w = cpu_to_le32(bpl->tus.w);
322 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
323 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
324 bpl++;
327 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
328 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
329 cmdiocbq->iocb.ulpContext = rpi;
330 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
331 cmdiocbq->context1 = NULL;
332 cmdiocbq->context2 = NULL;
334 iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
335 rspiocbq, (phba->fc_ratov * 2)
336 + LPFC_DRVR_TIMEOUT);
338 /* release the new ndlp once the iocb completes */
339 lpfc_nlp_put(ndlp);
340 if (iocb_status != IOCB_TIMEDOUT) {
341 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
342 job->request_payload.sg_cnt, DMA_TO_DEVICE);
343 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
344 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
347 if (iocb_status == IOCB_SUCCESS) {
348 if (rsp->ulpStatus == IOSTAT_SUCCESS) {
349 job->reply->reply_payload_rcv_len =
350 rsp->un.elsreq64.bdl.bdeSize;
351 rc = 0;
352 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
353 struct fc_bsg_ctels_reply *els_reply;
354 /* LS_RJT data returned in word 4 */
355 uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
357 els_reply = &job->reply->reply_data.ctels_reply;
358 job->reply->result = 0;
359 els_reply->status = FC_CTELS_STATUS_REJECT;
360 els_reply->rjt_data.action = rjt_data[0];
361 els_reply->rjt_data.reason_code = rjt_data[1];
362 els_reply->rjt_data.reason_explanation = rjt_data[2];
363 els_reply->rjt_data.vendor_unique = rjt_data[3];
364 } else
365 rc = -EIO;
366 } else
367 rc = -EIO;
369 if (iocb_status != IOCB_TIMEDOUT)
370 lpfc_els_free_iocb(phba, cmdiocbq);
372 lpfc_sli_release_iocbq(phba, rspiocbq);
374 out:
375 /* make error code available to userspace */
376 job->reply->result = rc;
377 /* complete the job back to userspace */
378 job->job_done(job);
380 return 0;
383 struct lpfc_ct_event {
384 struct list_head node;
385 int ref;
386 wait_queue_head_t wq;
388 /* Event type and waiter identifiers */
389 uint32_t type_mask;
390 uint32_t req_id;
391 uint32_t reg_id;
393 /* next two flags are here for the auto-delete logic */
394 unsigned long wait_time_stamp;
395 int waiting;
397 /* seen and not seen events */
398 struct list_head events_to_get;
399 struct list_head events_to_see;
402 struct event_data {
403 struct list_head node;
404 uint32_t type;
405 uint32_t immed_dat;
406 void *data;
407 uint32_t len;
410 static struct lpfc_ct_event *
411 lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
413 struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
414 if (!evt)
415 return NULL;
417 INIT_LIST_HEAD(&evt->events_to_get);
418 INIT_LIST_HEAD(&evt->events_to_see);
419 evt->req_id = ev_req_id;
420 evt->reg_id = ev_reg_id;
421 evt->wait_time_stamp = jiffies;
422 init_waitqueue_head(&evt->wq);
424 return evt;
427 static void
428 lpfc_ct_event_free(struct lpfc_ct_event *evt)
430 struct event_data *ed;
432 list_del(&evt->node);
434 while (!list_empty(&evt->events_to_get)) {
435 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
436 list_del(&ed->node);
437 kfree(ed->data);
438 kfree(ed);
441 while (!list_empty(&evt->events_to_see)) {
442 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
443 list_del(&ed->node);
444 kfree(ed->data);
445 kfree(ed);
448 kfree(evt);
451 static inline void
452 lpfc_ct_event_ref(struct lpfc_ct_event *evt)
454 evt->ref++;
457 static inline void
458 lpfc_ct_event_unref(struct lpfc_ct_event *evt)
460 if (--evt->ref < 0)
461 lpfc_ct_event_free(evt);
464 #define SLI_CT_ELX_LOOPBACK 0x10
466 enum ELX_LOOPBACK_CMD {
467 ELX_LOOPBACK_XRI_SETUP,
468 ELX_LOOPBACK_DATA,
472 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
473 * @phba:
474 * @pring:
475 * @piocbq:
477 * This function is called when an unsolicited CT command is received. It
478 * forwards the event to any processes registerd to receive CT events.
481 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
482 struct lpfc_iocbq *piocbq)
484 uint32_t evt_req_id = 0;
485 uint32_t cmd;
486 uint32_t len;
487 struct lpfc_dmabuf *dmabuf = NULL;
488 struct lpfc_ct_event *evt;
489 struct event_data *evt_dat = NULL;
490 struct lpfc_iocbq *iocbq;
491 size_t offset = 0;
492 struct list_head head;
493 struct ulp_bde64 *bde;
494 dma_addr_t dma_addr;
495 int i;
496 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
497 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
498 struct lpfc_hbq_entry *hbqe;
499 struct lpfc_sli_ct_request *ct_req;
500 unsigned long flags;
502 INIT_LIST_HEAD(&head);
503 list_add_tail(&head, &piocbq->list);
505 if (piocbq->iocb.ulpBdeCount == 0 ||
506 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
507 goto error_ct_unsol_exit;
509 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
510 dmabuf = bdeBuf1;
511 else {
512 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
513 piocbq->iocb.un.cont64[0].addrLow);
514 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
517 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
518 evt_req_id = ct_req->FsType;
519 cmd = ct_req->CommandResponse.bits.CmdRsp;
520 len = ct_req->CommandResponse.bits.Size;
521 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
522 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
524 spin_lock_irqsave(&phba->ct_ev_lock, flags);
525 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
526 if (evt->req_id != evt_req_id)
527 continue;
529 lpfc_ct_event_ref(evt);
531 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
532 if (!evt_dat) {
533 lpfc_ct_event_unref(evt);
534 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
535 "2614 Memory allocation failed for "
536 "CT event\n");
537 break;
540 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
542 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
543 /* take accumulated byte count from the last iocbq */
544 iocbq = list_entry(head.prev, typeof(*iocbq), list);
545 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
546 } else {
547 list_for_each_entry(iocbq, &head, list) {
548 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
549 evt_dat->len +=
550 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
554 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
555 if (!evt_dat->data) {
556 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
557 "2615 Memory allocation failed for "
558 "CT event data, size %d\n",
559 evt_dat->len);
560 kfree(evt_dat);
561 spin_lock_irqsave(&phba->ct_ev_lock, flags);
562 lpfc_ct_event_unref(evt);
563 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
564 goto error_ct_unsol_exit;
567 list_for_each_entry(iocbq, &head, list) {
568 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
569 bdeBuf1 = iocbq->context2;
570 bdeBuf2 = iocbq->context3;
572 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
573 int size = 0;
574 if (phba->sli3_options &
575 LPFC_SLI3_HBQ_ENABLED) {
576 if (i == 0) {
577 hbqe = (struct lpfc_hbq_entry *)
578 &iocbq->iocb.un.ulpWord[0];
579 size = hbqe->bde.tus.f.bdeSize;
580 dmabuf = bdeBuf1;
581 } else if (i == 1) {
582 hbqe = (struct lpfc_hbq_entry *)
583 &iocbq->iocb.unsli3.
584 sli3Words[4];
585 size = hbqe->bde.tus.f.bdeSize;
586 dmabuf = bdeBuf2;
588 if ((offset + size) > evt_dat->len)
589 size = evt_dat->len - offset;
590 } else {
591 size = iocbq->iocb.un.cont64[i].
592 tus.f.bdeSize;
593 bde = &iocbq->iocb.un.cont64[i];
594 dma_addr = getPaddr(bde->addrHigh,
595 bde->addrLow);
596 dmabuf = lpfc_sli_ringpostbuf_get(phba,
597 pring, dma_addr);
599 if (!dmabuf) {
600 lpfc_printf_log(phba, KERN_ERR,
601 LOG_LIBDFC, "2616 No dmabuf "
602 "found for iocbq 0x%p\n",
603 iocbq);
604 kfree(evt_dat->data);
605 kfree(evt_dat);
606 spin_lock_irqsave(&phba->ct_ev_lock,
607 flags);
608 lpfc_ct_event_unref(evt);
609 spin_unlock_irqrestore(
610 &phba->ct_ev_lock, flags);
611 goto error_ct_unsol_exit;
613 memcpy((char *)(evt_dat->data) + offset,
614 dmabuf->virt, size);
615 offset += size;
616 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
617 !(phba->sli3_options &
618 LPFC_SLI3_HBQ_ENABLED)) {
619 lpfc_sli_ringpostbuf_put(phba, pring,
620 dmabuf);
621 } else {
622 switch (cmd) {
623 case ELX_LOOPBACK_XRI_SETUP:
624 if (!(phba->sli3_options &
625 LPFC_SLI3_HBQ_ENABLED))
626 lpfc_post_buffer(phba,
627 pring,
629 else
630 lpfc_in_buf_free(phba,
631 dmabuf);
632 break;
633 default:
634 if (!(phba->sli3_options &
635 LPFC_SLI3_HBQ_ENABLED))
636 lpfc_post_buffer(phba,
637 pring,
639 break;
645 spin_lock_irqsave(&phba->ct_ev_lock, flags);
646 if (phba->sli_rev == LPFC_SLI_REV4) {
647 evt_dat->immed_dat = phba->ctx_idx;
648 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
649 phba->ct_ctx[evt_dat->immed_dat].oxid =
650 piocbq->iocb.ulpContext;
651 phba->ct_ctx[evt_dat->immed_dat].SID =
652 piocbq->iocb.un.rcvels.remoteID;
653 } else
654 evt_dat->immed_dat = piocbq->iocb.ulpContext;
656 evt_dat->type = FC_REG_CT_EVENT;
657 list_add(&evt_dat->node, &evt->events_to_see);
658 wake_up_interruptible(&evt->wq);
659 lpfc_ct_event_unref(evt);
660 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
661 break;
663 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
665 error_ct_unsol_exit:
666 if (!list_empty(&head))
667 list_del(&head);
669 return 1;
673 * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
674 * @job: SET_EVENT fc_bsg_job
676 static int
677 lpfc_bsg_set_event(struct fc_bsg_job *job)
679 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
680 struct lpfc_hba *phba = vport->phba;
681 struct set_ct_event *event_req;
682 struct lpfc_ct_event *evt;
683 unsigned long flags;
684 int rc = 0;
686 if (job->request_len <
687 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
688 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
689 "2612 Received SET_CT_EVENT below minimum "
690 "size\n");
691 return -EINVAL;
694 event_req = (struct set_ct_event *)
695 job->request->rqst_data.h_vendor.vendor_cmd;
697 spin_lock_irqsave(&phba->ct_ev_lock, flags);
698 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
699 if (evt->reg_id == event_req->ev_reg_id) {
700 lpfc_ct_event_ref(evt);
701 evt->wait_time_stamp = jiffies;
702 break;
705 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
707 if (&evt->node == &phba->ct_ev_waiters) {
708 /* no event waiting struct yet - first call */
709 evt = lpfc_ct_event_new(event_req->ev_reg_id,
710 event_req->ev_req_id);
711 if (!evt) {
712 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
713 "2617 Failed allocation of event "
714 "waiter\n");
715 return -ENOMEM;
718 spin_lock_irqsave(&phba->ct_ev_lock, flags);
719 list_add(&evt->node, &phba->ct_ev_waiters);
720 lpfc_ct_event_ref(evt);
721 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
724 evt->waiting = 1;
725 if (wait_event_interruptible(evt->wq,
726 !list_empty(&evt->events_to_see))) {
727 spin_lock_irqsave(&phba->ct_ev_lock, flags);
728 lpfc_ct_event_unref(evt); /* release ref */
729 lpfc_ct_event_unref(evt); /* delete */
730 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
731 rc = -EINTR;
732 goto set_event_out;
735 evt->wait_time_stamp = jiffies;
736 evt->waiting = 0;
738 spin_lock_irqsave(&phba->ct_ev_lock, flags);
739 list_move(evt->events_to_see.prev, &evt->events_to_get);
740 lpfc_ct_event_unref(evt); /* release ref */
741 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
743 set_event_out:
744 /* set_event carries no reply payload */
745 job->reply->reply_payload_rcv_len = 0;
746 /* make error code available to userspace */
747 job->reply->result = rc;
748 /* complete the job back to userspace */
749 job->job_done(job);
751 return 0;
755 * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
756 * @job: GET_EVENT fc_bsg_job
758 static int
759 lpfc_bsg_get_event(struct fc_bsg_job *job)
761 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
762 struct lpfc_hba *phba = vport->phba;
763 struct get_ct_event *event_req;
764 struct get_ct_event_reply *event_reply;
765 struct lpfc_ct_event *evt;
766 struct event_data *evt_dat = NULL;
767 unsigned long flags;
768 int rc = 0;
770 if (job->request_len <
771 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
772 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
773 "2613 Received GET_CT_EVENT request below "
774 "minimum size\n");
775 return -EINVAL;
778 event_req = (struct get_ct_event *)
779 job->request->rqst_data.h_vendor.vendor_cmd;
781 event_reply = (struct get_ct_event_reply *)
782 job->reply->reply_data.vendor_reply.vendor_rsp;
784 spin_lock_irqsave(&phba->ct_ev_lock, flags);
785 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
786 if (evt->reg_id == event_req->ev_reg_id) {
787 if (list_empty(&evt->events_to_get))
788 break;
789 lpfc_ct_event_ref(evt);
790 evt->wait_time_stamp = jiffies;
791 evt_dat = list_entry(evt->events_to_get.prev,
792 struct event_data, node);
793 list_del(&evt_dat->node);
794 break;
797 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
799 if (!evt_dat) {
800 job->reply->reply_payload_rcv_len = 0;
801 rc = -ENOENT;
802 goto error_get_event_exit;
805 if (evt_dat->len > job->reply_payload.payload_len) {
806 evt_dat->len = job->reply_payload.payload_len;
807 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
808 "2618 Truncated event data at %d "
809 "bytes\n",
810 job->reply_payload.payload_len);
813 event_reply->immed_data = evt_dat->immed_dat;
815 if (evt_dat->len > 0)
816 job->reply->reply_payload_rcv_len =
817 sg_copy_from_buffer(job->reply_payload.sg_list,
818 job->reply_payload.sg_cnt,
819 evt_dat->data, evt_dat->len);
820 else
821 job->reply->reply_payload_rcv_len = 0;
822 rc = 0;
824 if (evt_dat)
825 kfree(evt_dat->data);
826 kfree(evt_dat);
827 spin_lock_irqsave(&phba->ct_ev_lock, flags);
828 lpfc_ct_event_unref(evt);
829 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
831 error_get_event_exit:
832 /* make error code available to userspace */
833 job->reply->result = rc;
834 /* complete the job back to userspace */
835 job->job_done(job);
837 return rc;
841 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
842 * @job: fc_bsg_job to handle
844 static int
845 lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
847 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
849 switch (command) {
850 case LPFC_BSG_VENDOR_SET_CT_EVENT:
851 return lpfc_bsg_set_event(job);
852 break;
854 case LPFC_BSG_VENDOR_GET_CT_EVENT:
855 return lpfc_bsg_get_event(job);
856 break;
858 default:
859 return -EINVAL;
864 * lpfc_bsg_request - handle a bsg request from the FC transport
865 * @job: fc_bsg_job to handle
868 lpfc_bsg_request(struct fc_bsg_job *job)
870 uint32_t msgcode;
871 int rc = -EINVAL;
873 msgcode = job->request->msgcode;
875 switch (msgcode) {
876 case FC_BSG_HST_VENDOR:
877 rc = lpfc_bsg_hst_vendor(job);
878 break;
879 case FC_BSG_RPT_ELS:
880 rc = lpfc_bsg_rport_els(job);
881 break;
882 case FC_BSG_RPT_CT:
883 rc = lpfc_bsg_rport_ct(job);
884 break;
885 default:
886 break;
889 return rc;
893 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
894 * @job: fc_bsg_job that has timed out
896 * This function just aborts the job's IOCB. The aborted IOCB will return to
897 * the waiting function which will handle passing the error back to userspace
900 lpfc_bsg_timeout(struct fc_bsg_job *job)
902 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
903 struct lpfc_hba *phba = vport->phba;
904 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data;
905 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
907 if (cmdiocb)
908 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
910 return 0;