GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / scsi / qla2xxx / qla_bsg.c
blob9067629817ea4867ac99d342c3985fa895023237
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
13 /* BSG support for ELS/CT pass through */
14 inline srb_t *
15 qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
17 srb_t *sp;
18 struct qla_hw_data *ha = vha->hw;
19 struct srb_ctx *ctx;
21 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
22 if (!sp)
23 goto done;
24 ctx = kzalloc(size, GFP_KERNEL);
25 if (!ctx) {
26 mempool_free(sp, ha->srb_mempool);
27 sp = NULL;
28 goto done;
31 memset(sp, 0, sizeof(*sp));
32 sp->fcport = fcport;
33 sp->ctx = ctx;
34 done:
35 return sp;
38 int
39 qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
41 int i, ret, num_valid;
42 uint8_t *bcode;
43 struct qla_fcp_prio_entry *pri_entry;
44 uint32_t *bcode_val_ptr, bcode_val;
46 ret = 1;
47 num_valid = 0;
48 bcode = (uint8_t *)pri_cfg;
49 bcode_val_ptr = (uint32_t *)pri_cfg;
50 bcode_val = (uint32_t)(*bcode_val_ptr);
52 if (bcode_val == 0xFFFFFFFF) {
53 /* No FCP Priority config data in flash */
54 DEBUG2(printk(KERN_INFO
55 "%s: No FCP priority config data.\n",
56 __func__));
57 return 0;
60 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
61 bcode[3] != 'S') {
62 /* Invalid FCP priority data header*/
63 DEBUG2(printk(KERN_ERR
64 "%s: Invalid FCP Priority data header. bcode=0x%x\n",
65 __func__, bcode_val));
66 return 0;
68 if (flag != 1)
69 return ret;
71 pri_entry = &pri_cfg->entry[0];
72 for (i = 0; i < pri_cfg->num_entries; i++) {
73 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
74 num_valid++;
75 pri_entry++;
78 if (num_valid == 0) {
79 /* No valid FCP priority data entries */
80 DEBUG2(printk(KERN_ERR
81 "%s: No valid FCP Priority data entries.\n",
82 __func__));
83 ret = 0;
84 } else {
85 /* FCP priority data is valid */
86 DEBUG2(printk(KERN_INFO
87 "%s: Valid FCP priority data. num entries = %d\n",
88 __func__, num_valid));
91 return ret;
94 static int
95 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
97 struct Scsi_Host *host = bsg_job->shost;
98 scsi_qla_host_t *vha = shost_priv(host);
99 struct qla_hw_data *ha = vha->hw;
100 int ret = 0;
101 uint32_t len;
102 uint32_t oper;
104 bsg_job->reply->reply_payload_rcv_len = 0;
106 if (!IS_QLA24XX_TYPE(ha) || !IS_QLA25XX(ha)) {
107 ret = -EINVAL;
108 goto exit_fcp_prio_cfg;
111 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
112 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
113 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
114 ret = -EBUSY;
115 goto exit_fcp_prio_cfg;
118 /* Get the sub command */
119 oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
121 /* Only set config is allowed if config memory is not allocated */
122 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
123 ret = -EINVAL;
124 goto exit_fcp_prio_cfg;
126 switch (oper) {
127 case QLFC_FCP_PRIO_DISABLE:
128 if (ha->flags.fcp_prio_enabled) {
129 ha->flags.fcp_prio_enabled = 0;
130 ha->fcp_prio_cfg->attributes &=
131 ~FCP_PRIO_ATTR_ENABLE;
132 qla24xx_update_all_fcp_prio(vha);
133 bsg_job->reply->result = DID_OK;
134 } else {
135 ret = -EINVAL;
136 bsg_job->reply->result = (DID_ERROR << 16);
137 goto exit_fcp_prio_cfg;
139 break;
141 case QLFC_FCP_PRIO_ENABLE:
142 if (!ha->flags.fcp_prio_enabled) {
143 if (ha->fcp_prio_cfg) {
144 ha->flags.fcp_prio_enabled = 1;
145 ha->fcp_prio_cfg->attributes |=
146 FCP_PRIO_ATTR_ENABLE;
147 qla24xx_update_all_fcp_prio(vha);
148 bsg_job->reply->result = DID_OK;
149 } else {
150 ret = -EINVAL;
151 bsg_job->reply->result = (DID_ERROR << 16);
152 goto exit_fcp_prio_cfg;
155 break;
157 case QLFC_FCP_PRIO_GET_CONFIG:
158 len = bsg_job->reply_payload.payload_len;
159 if (!len || len > FCP_PRIO_CFG_SIZE) {
160 ret = -EINVAL;
161 bsg_job->reply->result = (DID_ERROR << 16);
162 goto exit_fcp_prio_cfg;
165 bsg_job->reply->result = DID_OK;
166 bsg_job->reply->reply_payload_rcv_len =
167 sg_copy_from_buffer(
168 bsg_job->reply_payload.sg_list,
169 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
170 len);
172 break;
174 case QLFC_FCP_PRIO_SET_CONFIG:
175 len = bsg_job->request_payload.payload_len;
176 if (!len || len > FCP_PRIO_CFG_SIZE) {
177 bsg_job->reply->result = (DID_ERROR << 16);
178 ret = -EINVAL;
179 goto exit_fcp_prio_cfg;
182 if (!ha->fcp_prio_cfg) {
183 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
184 if (!ha->fcp_prio_cfg) {
185 qla_printk(KERN_WARNING, ha,
186 "Unable to allocate memory "
187 "for fcp prio config data (%x).\n",
188 FCP_PRIO_CFG_SIZE);
189 bsg_job->reply->result = (DID_ERROR << 16);
190 ret = -ENOMEM;
191 goto exit_fcp_prio_cfg;
195 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
196 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
197 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
198 FCP_PRIO_CFG_SIZE);
200 /* validate fcp priority data */
201 if (!qla24xx_fcp_prio_cfg_valid(
202 (struct qla_fcp_prio_cfg *)
203 ha->fcp_prio_cfg, 1)) {
204 bsg_job->reply->result = (DID_ERROR << 16);
205 ret = -EINVAL;
206 /* If buffer was invalidatic int
207 * fcp_prio_cfg is of no use
209 vfree(ha->fcp_prio_cfg);
210 ha->fcp_prio_cfg = NULL;
211 goto exit_fcp_prio_cfg;
214 ha->flags.fcp_prio_enabled = 0;
215 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
216 ha->flags.fcp_prio_enabled = 1;
217 qla24xx_update_all_fcp_prio(vha);
218 bsg_job->reply->result = DID_OK;
219 break;
220 default:
221 ret = -EINVAL;
222 break;
224 exit_fcp_prio_cfg:
225 bsg_job->job_done(bsg_job);
226 return ret;
228 static int
229 qla2x00_process_els(struct fc_bsg_job *bsg_job)
231 struct fc_rport *rport;
232 fc_port_t *fcport = NULL;
233 struct Scsi_Host *host;
234 scsi_qla_host_t *vha;
235 struct qla_hw_data *ha;
236 srb_t *sp;
237 const char *type;
238 int req_sg_cnt, rsp_sg_cnt;
239 int rval = (DRIVER_ERROR << 16);
240 uint16_t nextlid = 0;
241 struct srb_ctx *els;
243 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
244 rport = bsg_job->rport;
245 fcport = *(fc_port_t **) rport->dd_data;
246 host = rport_to_shost(rport);
247 vha = shost_priv(host);
248 ha = vha->hw;
249 type = "FC_BSG_RPT_ELS";
250 } else {
251 host = bsg_job->shost;
252 vha = shost_priv(host);
253 ha = vha->hw;
254 type = "FC_BSG_HST_ELS_NOLOGIN";
257 /* pass through is supported only for ISP 4Gb or higher */
258 if (!IS_FWI2_CAPABLE(ha)) {
259 DEBUG2(qla_printk(KERN_INFO, ha,
260 "scsi(%ld):ELS passthru not supported for ISP23xx based "
261 "adapters\n", vha->host_no));
262 rval = -EPERM;
263 goto done;
266 /* Multiple SG's are not supported for ELS requests */
267 if (bsg_job->request_payload.sg_cnt > 1 ||
268 bsg_job->reply_payload.sg_cnt > 1) {
269 DEBUG2(printk(KERN_INFO
270 "multiple SG's are not supported for ELS requests"
271 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
272 bsg_job->request_payload.sg_cnt,
273 bsg_job->reply_payload.sg_cnt));
274 rval = -EPERM;
275 goto done;
278 /* ELS request for rport */
279 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
280 /* make sure the rport is logged in,
281 * if not perform fabric login
283 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
284 DEBUG2(qla_printk(KERN_WARNING, ha,
285 "failed to login port %06X for ELS passthru\n",
286 fcport->d_id.b24));
287 rval = -EIO;
288 goto done;
290 } else {
291 /* Allocate a dummy fcport structure, since functions
292 * preparing the IOCB and mailbox command retrieves port
293 * specific information from fcport structure. For Host based
294 * ELS commands there will be no fcport structure allocated
296 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
297 if (!fcport) {
298 rval = -ENOMEM;
299 goto done;
302 /* Initialize all required fields of fcport */
303 fcport->vha = vha;
304 fcport->vp_idx = vha->vp_idx;
305 fcport->d_id.b.al_pa =
306 bsg_job->request->rqst_data.h_els.port_id[0];
307 fcport->d_id.b.area =
308 bsg_job->request->rqst_data.h_els.port_id[1];
309 fcport->d_id.b.domain =
310 bsg_job->request->rqst_data.h_els.port_id[2];
311 fcport->loop_id =
312 (fcport->d_id.b.al_pa == 0xFD) ?
313 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
316 if (!vha->flags.online) {
317 DEBUG2(qla_printk(KERN_WARNING, ha,
318 "host not online\n"));
319 rval = -EIO;
320 goto done;
323 req_sg_cnt =
324 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
325 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
326 if (!req_sg_cnt) {
327 rval = -ENOMEM;
328 goto done_free_fcport;
331 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
332 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
333 if (!rsp_sg_cnt) {
334 rval = -ENOMEM;
335 goto done_free_fcport;
338 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
339 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
340 DEBUG2(printk(KERN_INFO
341 "dma mapping resulted in different sg counts \
342 [request_sg_cnt: %x dma_request_sg_cnt: %x\
343 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
344 bsg_job->request_payload.sg_cnt, req_sg_cnt,
345 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
346 rval = -EAGAIN;
347 goto done_unmap_sg;
350 /* Alloc SRB structure */
351 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
352 if (!sp) {
353 rval = -ENOMEM;
354 goto done_unmap_sg;
357 els = sp->ctx;
358 els->type =
359 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
360 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
361 els->name =
362 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
363 "bsg_els_rpt" : "bsg_els_hst");
364 els->u.bsg_job = bsg_job;
366 DEBUG2(qla_printk(KERN_INFO, ha,
367 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
368 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
369 bsg_job->request->rqst_data.h_els.command_code,
370 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
371 fcport->d_id.b.al_pa));
373 rval = qla2x00_start_sp(sp);
374 if (rval != QLA_SUCCESS) {
375 kfree(sp->ctx);
376 mempool_free(sp, ha->srb_mempool);
377 rval = -EIO;
378 goto done_unmap_sg;
380 return rval;
382 done_unmap_sg:
383 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
384 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
385 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
386 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
387 goto done_free_fcport;
389 done_free_fcport:
390 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
391 kfree(fcport);
392 done:
393 return rval;
396 static int
397 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
399 srb_t *sp;
400 struct Scsi_Host *host = bsg_job->shost;
401 scsi_qla_host_t *vha = shost_priv(host);
402 struct qla_hw_data *ha = vha->hw;
403 int rval = (DRIVER_ERROR << 16);
404 int req_sg_cnt, rsp_sg_cnt;
405 uint16_t loop_id;
406 struct fc_port *fcport;
407 char *type = "FC_BSG_HST_CT";
408 struct srb_ctx *ct;
410 req_sg_cnt =
411 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
412 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
413 if (!req_sg_cnt) {
414 rval = -ENOMEM;
415 goto done;
418 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
419 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
420 if (!rsp_sg_cnt) {
421 rval = -ENOMEM;
422 goto done;
425 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
426 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
427 DEBUG2(qla_printk(KERN_WARNING, ha,
428 "[request_sg_cnt: %x dma_request_sg_cnt: %x\
429 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
430 bsg_job->request_payload.sg_cnt, req_sg_cnt,
431 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
432 rval = -EAGAIN;
433 goto done_unmap_sg;
436 if (!vha->flags.online) {
437 DEBUG2(qla_printk(KERN_WARNING, ha,
438 "host not online\n"));
439 rval = -EIO;
440 goto done_unmap_sg;
443 loop_id =
444 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
445 >> 24;
446 switch (loop_id) {
447 case 0xFC:
448 loop_id = cpu_to_le16(NPH_SNS);
449 break;
450 case 0xFA:
451 loop_id = vha->mgmt_svr_loop_id;
452 break;
453 default:
454 DEBUG2(qla_printk(KERN_INFO, ha,
455 "Unknown loop id: %x\n", loop_id));
456 rval = -EINVAL;
457 goto done_unmap_sg;
460 /* Allocate a dummy fcport structure, since functions preparing the
461 * IOCB and mailbox command retrieves port specific information
462 * from fcport structure. For Host based ELS commands there will be
463 * no fcport structure allocated
465 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
466 if (!fcport) {
467 rval = -ENOMEM;
468 goto done_unmap_sg;
471 /* Initialize all required fields of fcport */
472 fcport->vha = vha;
473 fcport->vp_idx = vha->vp_idx;
474 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
475 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
476 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
477 fcport->loop_id = loop_id;
479 /* Alloc SRB structure */
480 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
481 if (!sp) {
482 rval = -ENOMEM;
483 goto done_free_fcport;
486 ct = sp->ctx;
487 ct->type = SRB_CT_CMD;
488 ct->name = "bsg_ct";
489 ct->u.bsg_job = bsg_job;
491 DEBUG2(qla_printk(KERN_INFO, ha,
492 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
493 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
494 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
495 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
496 fcport->d_id.b.al_pa));
498 rval = qla2x00_start_sp(sp);
499 if (rval != QLA_SUCCESS) {
500 kfree(sp->ctx);
501 mempool_free(sp, ha->srb_mempool);
502 rval = -EIO;
503 goto done_free_fcport;
505 return rval;
507 done_free_fcport:
508 kfree(fcport);
509 done_unmap_sg:
510 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
511 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
512 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
513 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
514 done:
515 return rval;
518 /* Set the port configuration to enable the
519 * internal loopback on ISP81XX
521 static inline int
522 qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
523 uint16_t *new_config)
525 int ret = 0;
526 int rval = 0;
527 struct qla_hw_data *ha = vha->hw;
529 if (!IS_QLA81XX(ha))
530 goto done_set_internal;
532 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
533 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
535 ha->notify_dcbx_comp = 1;
536 ret = qla81xx_set_port_config(vha, new_config);
537 if (ret != QLA_SUCCESS) {
538 DEBUG2(printk(KERN_ERR
539 "%s(%lu): Set port config failed\n",
540 __func__, vha->host_no));
541 ha->notify_dcbx_comp = 0;
542 rval = -EINVAL;
543 goto done_set_internal;
546 /* Wait for DCBX complete event */
547 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
548 DEBUG2(qla_printk(KERN_WARNING, ha,
549 "State change notificaition not received.\n"));
550 } else
551 DEBUG2(qla_printk(KERN_INFO, ha,
552 "State change RECEIVED\n"));
554 ha->notify_dcbx_comp = 0;
556 done_set_internal:
557 return rval;
560 /* Set the port configuration to disable the
561 * internal loopback on ISP81XX
563 static inline int
564 qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
565 int wait)
567 int ret = 0;
568 int rval = 0;
569 uint16_t new_config[4];
570 struct qla_hw_data *ha = vha->hw;
572 if (!IS_QLA81XX(ha))
573 goto done_reset_internal;
575 memset(new_config, 0 , sizeof(new_config));
576 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
577 ENABLE_INTERNAL_LOOPBACK) {
578 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
579 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
581 ha->notify_dcbx_comp = wait;
582 ret = qla81xx_set_port_config(vha, new_config);
583 if (ret != QLA_SUCCESS) {
584 DEBUG2(printk(KERN_ERR
585 "%s(%lu): Set port config failed\n",
586 __func__, vha->host_no));
587 ha->notify_dcbx_comp = 0;
588 rval = -EINVAL;
589 goto done_reset_internal;
592 /* Wait for DCBX complete event */
593 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
594 (20 * HZ))) {
595 DEBUG2(qla_printk(KERN_WARNING, ha,
596 "State change notificaition not received.\n"));
597 ha->notify_dcbx_comp = 0;
598 rval = -EINVAL;
599 goto done_reset_internal;
600 } else
601 DEBUG2(qla_printk(KERN_INFO, ha,
602 "State change RECEIVED\n"));
604 ha->notify_dcbx_comp = 0;
606 done_reset_internal:
607 return rval;
610 static int
611 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
613 struct Scsi_Host *host = bsg_job->shost;
614 scsi_qla_host_t *vha = shost_priv(host);
615 struct qla_hw_data *ha = vha->hw;
616 int rval;
617 uint8_t command_sent;
618 char *type;
619 struct msg_echo_lb elreq;
620 uint16_t response[MAILBOX_REGISTER_COUNT];
621 uint16_t config[4], new_config[4];
622 uint8_t *fw_sts_ptr;
623 uint8_t *req_data = NULL;
624 dma_addr_t req_data_dma;
625 uint32_t req_data_len;
626 uint8_t *rsp_data = NULL;
627 dma_addr_t rsp_data_dma;
628 uint32_t rsp_data_len;
630 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
631 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
632 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
633 return -EBUSY;
635 if (!vha->flags.online) {
636 DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
637 return -EIO;
640 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
641 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
642 DMA_TO_DEVICE);
644 if (!elreq.req_sg_cnt)
645 return -ENOMEM;
647 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
648 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
649 DMA_FROM_DEVICE);
651 if (!elreq.rsp_sg_cnt) {
652 rval = -ENOMEM;
653 goto done_unmap_req_sg;
656 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
657 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
658 DEBUG2(printk(KERN_INFO
659 "dma mapping resulted in different sg counts "
660 "[request_sg_cnt: %x dma_request_sg_cnt: %x "
661 "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
662 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
663 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
664 rval = -EAGAIN;
665 goto done_unmap_sg;
667 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
668 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
669 &req_data_dma, GFP_KERNEL);
670 if (!req_data) {
671 DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
672 "failed for host=%lu\n", __func__, vha->host_no));
673 rval = -ENOMEM;
674 goto done_unmap_sg;
677 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
678 &rsp_data_dma, GFP_KERNEL);
679 if (!rsp_data) {
680 DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
681 "failed for host=%lu\n", __func__, vha->host_no));
682 rval = -ENOMEM;
683 goto done_free_dma_req;
686 /* Copy the request buffer in req_data now */
687 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
688 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
690 elreq.send_dma = req_data_dma;
691 elreq.rcv_dma = rsp_data_dma;
692 elreq.transfer_size = req_data_len;
694 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
696 if ((ha->current_topology == ISP_CFG_F ||
697 (IS_QLA81XX(ha) &&
698 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
699 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
700 elreq.options == EXTERNAL_LOOPBACK) {
701 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
702 DEBUG2(qla_printk(KERN_INFO, ha,
703 "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
704 command_sent = INT_DEF_LB_ECHO_CMD;
705 rval = qla2x00_echo_test(vha, &elreq, response);
706 } else {
707 if (IS_QLA81XX(ha)) {
708 memset(config, 0, sizeof(config));
709 memset(new_config, 0, sizeof(new_config));
710 if (qla81xx_get_port_config(vha, config)) {
711 DEBUG2(printk(KERN_ERR
712 "%s(%lu): Get port config failed\n",
713 __func__, vha->host_no));
714 bsg_job->reply->reply_payload_rcv_len = 0;
715 bsg_job->reply->result = (DID_ERROR << 16);
716 rval = -EPERM;
717 goto done_free_dma_req;
720 if (elreq.options != EXTERNAL_LOOPBACK) {
721 DEBUG2(qla_printk(KERN_INFO, ha,
722 "Internal: current port config = %x\n",
723 config[0]));
724 if (qla81xx_set_internal_loopback(vha, config,
725 new_config)) {
726 bsg_job->reply->reply_payload_rcv_len =
728 bsg_job->reply->result =
729 (DID_ERROR << 16);
730 rval = -EPERM;
731 goto done_free_dma_req;
733 } else {
734 /* For external loopback to work
735 * ensure internal loopback is disabled
737 if (qla81xx_reset_internal_loopback(vha,
738 config, 1)) {
739 bsg_job->reply->reply_payload_rcv_len =
741 bsg_job->reply->result =
742 (DID_ERROR << 16);
743 rval = -EPERM;
744 goto done_free_dma_req;
748 type = "FC_BSG_HST_VENDOR_LOOPBACK";
749 DEBUG2(qla_printk(KERN_INFO, ha,
750 "scsi(%ld) bsg rqst type: %s\n",
751 vha->host_no, type));
753 command_sent = INT_DEF_LB_LOOPBACK_CMD;
754 rval = qla2x00_loopback_test(vha, &elreq, response);
756 if (new_config[1]) {
757 /* Revert back to original port config
758 * Also clear internal loopback
760 qla81xx_reset_internal_loopback(vha,
761 new_config, 0);
764 if (response[0] == MBS_COMMAND_ERROR &&
765 response[1] == MBS_LB_RESET) {
766 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
767 "ISP\n", __func__, vha->host_no));
768 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
769 qla2xxx_wake_dpc(vha);
770 qla2x00_wait_for_chip_reset(vha);
771 /* Also reset the MPI */
772 if (qla81xx_restart_mpi_firmware(vha) !=
773 QLA_SUCCESS) {
774 qla_printk(KERN_INFO, ha,
775 "MPI reset failed for host%ld.\n",
776 vha->host_no);
779 bsg_job->reply->reply_payload_rcv_len = 0;
780 bsg_job->reply->result = (DID_ERROR << 16);
781 rval = -EIO;
782 goto done_free_dma_req;
784 } else {
785 type = "FC_BSG_HST_VENDOR_LOOPBACK";
786 DEBUG2(qla_printk(KERN_INFO, ha,
787 "scsi(%ld) bsg rqst type: %s\n",
788 vha->host_no, type));
789 command_sent = INT_DEF_LB_LOOPBACK_CMD;
790 rval = qla2x00_loopback_test(vha, &elreq, response);
794 if (rval) {
795 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
796 "request %s failed\n", vha->host_no, type));
798 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
799 sizeof(struct fc_bsg_reply);
801 memcpy(fw_sts_ptr, response, sizeof(response));
802 fw_sts_ptr += sizeof(response);
803 *fw_sts_ptr = command_sent;
804 rval = 0;
805 bsg_job->reply->reply_payload_rcv_len = 0;
806 bsg_job->reply->result = (DID_ERROR << 16);
807 } else {
808 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
809 "request %s completed\n", vha->host_no, type));
811 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
812 sizeof(response) + sizeof(uint8_t);
813 bsg_job->reply->reply_payload_rcv_len =
814 bsg_job->reply_payload.payload_len;
815 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
816 sizeof(struct fc_bsg_reply);
817 memcpy(fw_sts_ptr, response, sizeof(response));
818 fw_sts_ptr += sizeof(response);
819 *fw_sts_ptr = command_sent;
820 bsg_job->reply->result = DID_OK;
821 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
822 bsg_job->reply_payload.sg_cnt, rsp_data,
823 rsp_data_len);
825 bsg_job->job_done(bsg_job);
827 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
828 rsp_data, rsp_data_dma);
829 done_free_dma_req:
830 dma_free_coherent(&ha->pdev->dev, req_data_len,
831 req_data, req_data_dma);
832 done_unmap_sg:
833 dma_unmap_sg(&ha->pdev->dev,
834 bsg_job->reply_payload.sg_list,
835 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
836 done_unmap_req_sg:
837 dma_unmap_sg(&ha->pdev->dev,
838 bsg_job->request_payload.sg_list,
839 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
840 return rval;
843 static int
844 qla84xx_reset(struct fc_bsg_job *bsg_job)
846 struct Scsi_Host *host = bsg_job->shost;
847 scsi_qla_host_t *vha = shost_priv(host);
848 struct qla_hw_data *ha = vha->hw;
849 int rval = 0;
850 uint32_t flag;
852 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
853 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
854 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
855 return -EBUSY;
857 if (!IS_QLA84XX(ha)) {
858 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
859 "exiting.\n", vha->host_no));
860 return -EINVAL;
863 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
865 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
867 if (rval) {
868 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
869 "request 84xx reset failed\n", vha->host_no));
870 rval = bsg_job->reply->reply_payload_rcv_len = 0;
871 bsg_job->reply->result = (DID_ERROR << 16);
873 } else {
874 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
875 "request 84xx reset completed\n", vha->host_no));
876 bsg_job->reply->result = DID_OK;
879 bsg_job->job_done(bsg_job);
880 return rval;
883 static int
884 qla84xx_updatefw(struct fc_bsg_job *bsg_job)
886 struct Scsi_Host *host = bsg_job->shost;
887 scsi_qla_host_t *vha = shost_priv(host);
888 struct qla_hw_data *ha = vha->hw;
889 struct verify_chip_entry_84xx *mn = NULL;
890 dma_addr_t mn_dma, fw_dma;
891 void *fw_buf = NULL;
892 int rval = 0;
893 uint32_t sg_cnt;
894 uint32_t data_len;
895 uint16_t options;
896 uint32_t flag;
897 uint32_t fw_ver;
899 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
900 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
901 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
902 return -EBUSY;
904 if (!IS_QLA84XX(ha)) {
905 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
906 "exiting.\n", vha->host_no));
907 return -EINVAL;
910 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
911 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
912 if (!sg_cnt)
913 return -ENOMEM;
915 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
916 DEBUG2(printk(KERN_INFO
917 "dma mapping resulted in different sg counts "
918 "request_sg_cnt: %x dma_request_sg_cnt: %x ",
919 bsg_job->request_payload.sg_cnt, sg_cnt));
920 rval = -EAGAIN;
921 goto done_unmap_sg;
924 data_len = bsg_job->request_payload.payload_len;
925 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
926 &fw_dma, GFP_KERNEL);
927 if (!fw_buf) {
928 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
929 "failed for host=%lu\n", __func__, vha->host_no));
930 rval = -ENOMEM;
931 goto done_unmap_sg;
934 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
935 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
937 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
938 if (!mn) {
939 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
940 "failed for host=%lu\n", __func__, vha->host_no));
941 rval = -ENOMEM;
942 goto done_free_fw_buf;
945 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
946 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
948 memset(mn, 0, sizeof(struct access_chip_84xx));
949 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
950 mn->entry_count = 1;
952 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
953 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
954 options |= VCO_DIAG_FW;
956 mn->options = cpu_to_le16(options);
957 mn->fw_ver = cpu_to_le32(fw_ver);
958 mn->fw_size = cpu_to_le32(data_len);
959 mn->fw_seq_size = cpu_to_le32(data_len);
960 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
961 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
962 mn->dseg_length = cpu_to_le32(data_len);
963 mn->data_seg_cnt = cpu_to_le16(1);
965 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
967 if (rval) {
968 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
969 "request 84xx updatefw failed\n", vha->host_no));
971 rval = bsg_job->reply->reply_payload_rcv_len = 0;
972 bsg_job->reply->result = (DID_ERROR << 16);
974 } else {
975 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
976 "request 84xx updatefw completed\n", vha->host_no));
978 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
979 bsg_job->reply->result = DID_OK;
982 bsg_job->job_done(bsg_job);
983 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
985 done_free_fw_buf:
986 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
988 done_unmap_sg:
989 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
990 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
992 return rval;
995 static int
996 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
998 struct Scsi_Host *host = bsg_job->shost;
999 scsi_qla_host_t *vha = shost_priv(host);
1000 struct qla_hw_data *ha = vha->hw;
1001 struct access_chip_84xx *mn = NULL;
1002 dma_addr_t mn_dma, mgmt_dma;
1003 void *mgmt_b = NULL;
1004 int rval = 0;
1005 struct qla_bsg_a84_mgmt *ql84_mgmt;
1006 uint32_t sg_cnt;
1007 uint32_t data_len = 0;
1008 uint32_t dma_direction = DMA_NONE;
1010 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1011 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1012 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
1013 return -EBUSY;
1015 if (!IS_QLA84XX(ha)) {
1016 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
1017 "exiting.\n", vha->host_no));
1018 return -EINVAL;
1021 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1022 sizeof(struct fc_bsg_request));
1023 if (!ql84_mgmt) {
1024 DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
1025 __func__, vha->host_no));
1026 return -EINVAL;
1029 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1030 if (!mn) {
1031 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
1032 "failed for host=%lu\n", __func__, vha->host_no));
1033 return -ENOMEM;
1036 memset(mn, 0, sizeof(struct access_chip_84xx));
1037 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1038 mn->entry_count = 1;
1040 switch (ql84_mgmt->mgmt.cmd) {
1041 case QLA84_MGMT_READ_MEM:
1042 case QLA84_MGMT_GET_INFO:
1043 sg_cnt = dma_map_sg(&ha->pdev->dev,
1044 bsg_job->reply_payload.sg_list,
1045 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1046 if (!sg_cnt) {
1047 rval = -ENOMEM;
1048 goto exit_mgmt;
1051 dma_direction = DMA_FROM_DEVICE;
1053 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1054 DEBUG2(printk(KERN_INFO
1055 "dma mapping resulted in different sg counts "
1056 "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
1057 bsg_job->reply_payload.sg_cnt, sg_cnt));
1058 rval = -EAGAIN;
1059 goto done_unmap_sg;
1062 data_len = bsg_job->reply_payload.payload_len;
1064 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1065 &mgmt_dma, GFP_KERNEL);
1066 if (!mgmt_b) {
1067 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
1068 "failed for host=%lu\n",
1069 __func__, vha->host_no));
1070 rval = -ENOMEM;
1071 goto done_unmap_sg;
1074 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1075 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1076 mn->parameter1 =
1077 cpu_to_le32(
1078 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1080 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1081 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1082 mn->parameter1 =
1083 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1085 mn->parameter2 =
1086 cpu_to_le32(
1087 ql84_mgmt->mgmt.mgmtp.u.info.context);
1089 break;
1091 case QLA84_MGMT_WRITE_MEM:
1092 sg_cnt = dma_map_sg(&ha->pdev->dev,
1093 bsg_job->request_payload.sg_list,
1094 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1096 if (!sg_cnt) {
1097 rval = -ENOMEM;
1098 goto exit_mgmt;
1101 dma_direction = DMA_TO_DEVICE;
1103 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1104 DEBUG2(printk(KERN_INFO
1105 "dma mapping resulted in different sg counts "
1106 "request_sg_cnt: %x dma_request_sg_cnt: %x ",
1107 bsg_job->request_payload.sg_cnt, sg_cnt));
1108 rval = -EAGAIN;
1109 goto done_unmap_sg;
1112 data_len = bsg_job->request_payload.payload_len;
1113 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1114 &mgmt_dma, GFP_KERNEL);
1115 if (!mgmt_b) {
1116 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
1117 "failed for host=%lu\n",
1118 __func__, vha->host_no));
1119 rval = -ENOMEM;
1120 goto done_unmap_sg;
1123 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1124 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1126 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1127 mn->parameter1 =
1128 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1129 break;
1131 case QLA84_MGMT_CHNG_CONFIG:
1132 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1133 mn->parameter1 =
1134 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1136 mn->parameter2 =
1137 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1139 mn->parameter3 =
1140 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1141 break;
1143 default:
1144 rval = -EIO;
1145 goto exit_mgmt;
1148 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1149 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1150 mn->dseg_count = cpu_to_le16(1);
1151 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1152 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1153 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1156 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1158 if (rval) {
1159 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
1160 "request 84xx mgmt failed\n", vha->host_no));
1162 rval = bsg_job->reply->reply_payload_rcv_len = 0;
1163 bsg_job->reply->result = (DID_ERROR << 16);
1165 } else {
1166 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
1167 "request 84xx mgmt completed\n", vha->host_no));
1169 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1170 bsg_job->reply->result = DID_OK;
1172 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1173 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1174 bsg_job->reply->reply_payload_rcv_len =
1175 bsg_job->reply_payload.payload_len;
1177 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1178 bsg_job->reply_payload.sg_cnt, mgmt_b,
1179 data_len);
1183 bsg_job->job_done(bsg_job);
1185 done_unmap_sg:
1186 if (mgmt_b)
1187 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1189 if (dma_direction == DMA_TO_DEVICE)
1190 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1191 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1192 else if (dma_direction == DMA_FROM_DEVICE)
1193 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1194 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1196 exit_mgmt:
1197 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1199 return rval;
1202 static int
1203 qla24xx_iidma(struct fc_bsg_job *bsg_job)
1205 struct Scsi_Host *host = bsg_job->shost;
1206 scsi_qla_host_t *vha = shost_priv(host);
1207 struct qla_hw_data *ha = vha->hw;
1208 int rval = 0;
1209 struct qla_port_param *port_param = NULL;
1210 fc_port_t *fcport = NULL;
1211 uint16_t mb[MAILBOX_REGISTER_COUNT];
1212 uint8_t *rsp_ptr = NULL;
1214 bsg_job->reply->reply_payload_rcv_len = 0;
1216 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1217 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1218 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
1219 return -EBUSY;
1221 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1222 DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
1223 "supported\n", __func__, vha->host_no));
1224 return -EINVAL;
1227 port_param = (struct qla_port_param *)((char *)bsg_job->request +
1228 sizeof(struct fc_bsg_request));
1229 if (!port_param) {
1230 DEBUG2(printk("%s(%ld): port_param header not provided, "
1231 "exiting.\n", __func__, vha->host_no));
1232 return -EINVAL;
1235 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1236 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
1237 __func__, vha->host_no));
1238 return -EINVAL;
1241 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1242 if (fcport->port_type != FCT_TARGET)
1243 continue;
1245 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1246 fcport->port_name, sizeof(fcport->port_name)))
1247 continue;
1248 break;
1251 if (!fcport) {
1252 DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
1253 __func__, vha->host_no));
1254 return -EINVAL;
1257 if (fcport->loop_id == FC_NO_LOOP_ID) {
1258 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid port loop id, "
1259 "loop_id = 0x%x\n",
1260 __func__, vha->host_no, fcport->loop_id));
1261 return -EINVAL;
1264 if (fcport->flags & FCF_LOGIN_NEEDED) {
1265 DEBUG2(printk(KERN_ERR "%s(%ld): Remote port not logged in, "
1266 "flags = 0x%x\n",
1267 __func__, vha->host_no, fcport->flags));
1268 return -EINVAL;
1271 if (port_param->mode)
1272 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1273 port_param->speed, mb);
1274 else
1275 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1276 &port_param->speed, mb);
1278 if (rval) {
1279 DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
1280 "%02x%02x%02x%02x%02x%02x%02x%02x -- "
1281 "%04x %x %04x %04x.\n",
1282 vha->host_no, fcport->port_name[0],
1283 fcport->port_name[1],
1284 fcport->port_name[2], fcport->port_name[3],
1285 fcport->port_name[4], fcport->port_name[5],
1286 fcport->port_name[6], fcport->port_name[7], rval,
1287 fcport->fp_speed, mb[0], mb[1]));
1288 rval = 0;
1289 bsg_job->reply->result = (DID_ERROR << 16);
1291 } else {
1292 if (!port_param->mode) {
1293 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1294 sizeof(struct qla_port_param);
1296 rsp_ptr = ((uint8_t *)bsg_job->reply) +
1297 sizeof(struct fc_bsg_reply);
1299 memcpy(rsp_ptr, port_param,
1300 sizeof(struct qla_port_param));
1303 bsg_job->reply->result = DID_OK;
1306 bsg_job->job_done(bsg_job);
1307 return rval;
1310 static int
1311 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1313 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1314 case QL_VND_LOOPBACK:
1315 return qla2x00_process_loopback(bsg_job);
1317 case QL_VND_A84_RESET:
1318 return qla84xx_reset(bsg_job);
1320 case QL_VND_A84_UPDATE_FW:
1321 return qla84xx_updatefw(bsg_job);
1323 case QL_VND_A84_MGMT_CMD:
1324 return qla84xx_mgmt_cmd(bsg_job);
1326 case QL_VND_IIDMA:
1327 return qla24xx_iidma(bsg_job);
1329 case QL_VND_FCP_PRIO_CFG_CMD:
1330 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1332 default:
1333 bsg_job->reply->result = (DID_ERROR << 16);
1334 bsg_job->job_done(bsg_job);
1335 return -ENOSYS;
1340 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1342 int ret = -EINVAL;
1344 switch (bsg_job->request->msgcode) {
1345 case FC_BSG_RPT_ELS:
1346 case FC_BSG_HST_ELS_NOLOGIN:
1347 ret = qla2x00_process_els(bsg_job);
1348 break;
1349 case FC_BSG_HST_CT:
1350 ret = qla2x00_process_ct(bsg_job);
1351 break;
1352 case FC_BSG_HST_VENDOR:
1353 ret = qla2x00_process_vendor_specific(bsg_job);
1354 break;
1355 case FC_BSG_HST_ADD_RPORT:
1356 case FC_BSG_HST_DEL_RPORT:
1357 case FC_BSG_RPT_CT:
1358 default:
1359 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
1360 break;
1362 return ret;
1366 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1368 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1369 struct qla_hw_data *ha = vha->hw;
1370 srb_t *sp;
1371 int cnt, que;
1372 unsigned long flags;
1373 struct req_que *req;
1374 struct srb_ctx *sp_bsg;
1376 /* find the bsg job from the active list of commands */
1377 spin_lock_irqsave(&ha->hardware_lock, flags);
1378 for (que = 0; que < ha->max_req_queues; que++) {
1379 req = ha->req_q_map[que];
1380 if (!req)
1381 continue;
1383 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1384 sp = req->outstanding_cmds[cnt];
1385 if (sp) {
1386 sp_bsg = sp->ctx;
1388 if (((sp_bsg->type == SRB_CT_CMD) ||
1389 (sp_bsg->type == SRB_ELS_CMD_HST))
1390 && (sp_bsg->u.bsg_job == bsg_job)) {
1391 if (ha->isp_ops->abort_command(sp)) {
1392 DEBUG2(qla_printk(KERN_INFO, ha,
1393 "scsi(%ld): mbx "
1394 "abort_command failed\n",
1395 vha->host_no));
1396 bsg_job->req->errors =
1397 bsg_job->reply->result = -EIO;
1398 } else {
1399 DEBUG2(qla_printk(KERN_INFO, ha,
1400 "scsi(%ld): mbx "
1401 "abort_command success\n",
1402 vha->host_no));
1403 bsg_job->req->errors =
1404 bsg_job->reply->result = 0;
1406 goto done;
1411 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1412 DEBUG2(qla_printk(KERN_INFO, ha,
1413 "scsi(%ld) SRB not found to abort\n", vha->host_no));
1414 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1415 return 0;
1417 done:
1418 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1419 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1420 kfree(sp->fcport);
1421 kfree(sp->ctx);
1422 mempool_free(sp, ha->srb_mempool);
1423 return 0;