PM / Hibernate: Fix s2disk regression related to freezing workqueues
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / qla2xxx / qla_mbx.c
blob34344d3f865832ce963f1091105c4efc753074bf
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <linux/delay.h>
10 #include <linux/gfp.h>
14 * qla2x00_mailbox_command
15 * Issue mailbox command and waits for completion.
17 * Input:
18 * ha = adapter block pointer.
19 * mcp = driver internal mbx struct pointer.
21 * Output:
22 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
24 * Returns:
25 * 0 : QLA_SUCCESS = cmd performed success
26 * 1 : QLA_FUNCTION_FAILED (error encountered)
27 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
29 * Context:
30 * Kernel context.
32 static int
33 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
35 int rval;
36 unsigned long flags = 0;
37 device_reg_t __iomem *reg;
38 uint8_t abort_active;
39 uint8_t io_lock_on;
40 uint16_t command = 0;
41 uint16_t *iptr;
42 uint16_t __iomem *optr;
43 uint32_t cnt;
44 uint32_t mboxes;
45 unsigned long wait_time;
46 struct qla_hw_data *ha = vha->hw;
47 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
49 ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__);
51 if (ha->pdev->error_state > pci_channel_io_frozen) {
52 ql_log(ql_log_warn, base_vha, 0x1001,
53 "error_state is greater than pci_channel_io_frozen, "
54 "exiting.\n");
55 return QLA_FUNCTION_TIMEOUT;
58 if (vha->device_flags & DFLG_DEV_FAILED) {
59 ql_log(ql_log_warn, base_vha, 0x1002,
60 "Device in failed state, exiting.\n");
61 return QLA_FUNCTION_TIMEOUT;
64 reg = ha->iobase;
65 io_lock_on = base_vha->flags.init_done;
67 rval = QLA_SUCCESS;
68 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
71 if (ha->flags.pci_channel_io_perm_failure) {
72 ql_log(ql_log_warn, base_vha, 0x1003,
73 "Perm failure on EEH timeout MBX, exiting.\n");
74 return QLA_FUNCTION_TIMEOUT;
77 if (ha->flags.isp82xx_fw_hung) {
78 /* Setting Link-Down error */
79 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
80 ql_log(ql_log_warn, base_vha, 0x1004,
81 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
82 return QLA_FUNCTION_TIMEOUT;
86 * Wait for active mailbox commands to finish by waiting at most tov
87 * seconds. This is to serialize actual issuing of mailbox cmds during
88 * non ISP abort time.
90 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
91 /* Timeout occurred. Return error. */
92 ql_log(ql_log_warn, base_vha, 0x1005,
93 "Cmd access timeout, Exiting.\n");
94 return QLA_FUNCTION_TIMEOUT;
97 ha->flags.mbox_busy = 1;
98 /* Save mailbox command for debug */
99 ha->mcp = mcp;
101 ql_dbg(ql_dbg_mbx, base_vha, 0x1006,
102 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
104 spin_lock_irqsave(&ha->hardware_lock, flags);
106 /* Load mailbox registers. */
107 if (IS_QLA82XX(ha))
108 optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
109 else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha))
110 optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
111 else
112 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
114 iptr = mcp->mb;
115 command = mcp->mb[0];
116 mboxes = mcp->out_mb;
118 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
119 if (IS_QLA2200(ha) && cnt == 8)
120 optr =
121 (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
122 if (mboxes & BIT_0)
123 WRT_REG_WORD(optr, *iptr);
125 mboxes >>= 1;
126 optr++;
127 iptr++;
130 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111,
131 "Loaded MBX registers (displayed in bytes) =.\n");
132 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112,
133 (uint8_t *)mcp->mb, 16);
134 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113,
135 ".\n");
136 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114,
137 ((uint8_t *)mcp->mb + 0x10), 16);
138 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115,
139 ".\n");
140 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116,
141 ((uint8_t *)mcp->mb + 0x20), 8);
142 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117,
143 "I/O Address = %p.\n", optr);
144 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e);
146 /* Issue set host interrupt command to send cmd out. */
147 ha->flags.mbox_int = 0;
148 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
150 /* Unlock mbx registers and wait for interrupt */
151 ql_dbg(ql_dbg_mbx, base_vha, 0x100f,
152 "Going to unlock irq & waiting for interrupts. "
153 "jiffies=%lx.\n", jiffies);
155 /* Wait for mbx cmd completion until timeout */
157 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
158 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
160 if (IS_QLA82XX(ha)) {
161 if (RD_REG_DWORD(&reg->isp82.hint) &
162 HINT_MBX_INT_PENDING) {
163 spin_unlock_irqrestore(&ha->hardware_lock,
164 flags);
165 ha->flags.mbox_busy = 0;
166 ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
167 "Pending mailbox timeout, exiting.\n");
168 rval = QLA_FUNCTION_TIMEOUT;
169 goto premature_exit;
171 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
172 } else if (IS_FWI2_CAPABLE(ha))
173 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
174 else
175 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
176 spin_unlock_irqrestore(&ha->hardware_lock, flags);
178 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
180 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
182 } else {
183 ql_dbg(ql_dbg_mbx, base_vha, 0x1011,
184 "Cmd=%x Polling Mode.\n", command);
186 if (IS_QLA82XX(ha)) {
187 if (RD_REG_DWORD(&reg->isp82.hint) &
188 HINT_MBX_INT_PENDING) {
189 spin_unlock_irqrestore(&ha->hardware_lock,
190 flags);
191 ha->flags.mbox_busy = 0;
192 ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
193 "Pending mailbox timeout, exiting.\n");
194 rval = QLA_FUNCTION_TIMEOUT;
195 goto premature_exit;
197 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
198 } else if (IS_FWI2_CAPABLE(ha))
199 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
200 else
201 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
202 spin_unlock_irqrestore(&ha->hardware_lock, flags);
204 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
205 while (!ha->flags.mbox_int) {
206 if (time_after(jiffies, wait_time))
207 break;
209 /* Check for pending interrupts. */
210 qla2x00_poll(ha->rsp_q_map[0]);
212 if (!ha->flags.mbox_int &&
213 !(IS_QLA2200(ha) &&
214 command == MBC_LOAD_RISC_RAM_EXTENDED))
215 msleep(10);
216 } /* while */
217 ql_dbg(ql_dbg_mbx, base_vha, 0x1013,
218 "Waited %d sec.\n",
219 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
222 /* Check whether we timed out */
223 if (ha->flags.mbox_int) {
224 uint16_t *iptr2;
226 ql_dbg(ql_dbg_mbx, base_vha, 0x1014,
227 "Cmd=%x completed.\n", command);
229 /* Got interrupt. Clear the flag. */
230 ha->flags.mbox_int = 0;
231 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
233 if (ha->flags.isp82xx_fw_hung) {
234 ha->flags.mbox_busy = 0;
235 /* Setting Link-Down error */
236 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
237 ha->mcp = NULL;
238 rval = QLA_FUNCTION_FAILED;
239 ql_log(ql_log_warn, base_vha, 0x1015,
240 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
241 goto premature_exit;
244 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
245 rval = QLA_FUNCTION_FAILED;
247 /* Load return mailbox registers. */
248 iptr2 = mcp->mb;
249 iptr = (uint16_t *)&ha->mailbox_out[0];
250 mboxes = mcp->in_mb;
251 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
252 if (mboxes & BIT_0)
253 *iptr2 = *iptr;
255 mboxes >>= 1;
256 iptr2++;
257 iptr++;
259 } else {
261 uint16_t mb0;
262 uint32_t ictrl;
264 if (IS_FWI2_CAPABLE(ha)) {
265 mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
266 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
267 } else {
268 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
269 ictrl = RD_REG_WORD(&reg->isp.ictrl);
271 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119,
272 "MBX Command timeout for cmd %x.\n", command);
273 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a,
274 "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
275 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b,
276 "mb[0] = 0x%x.\n", mb0);
277 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019);
279 rval = QLA_FUNCTION_TIMEOUT;
282 ha->flags.mbox_busy = 0;
284 /* Clean up */
285 ha->mcp = NULL;
287 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
288 ql_dbg(ql_dbg_mbx, base_vha, 0x101a,
289 "Checking for additional resp interrupt.\n");
291 /* polling mode for non isp_abort commands. */
292 qla2x00_poll(ha->rsp_q_map[0]);
295 if (rval == QLA_FUNCTION_TIMEOUT &&
296 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
297 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
298 ha->flags.eeh_busy) {
299 /* not in dpc. schedule it for dpc to take over. */
300 ql_dbg(ql_dbg_mbx, base_vha, 0x101b,
301 "Timeout, schedule isp_abort_needed.\n");
303 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
304 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
305 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
306 if (IS_QLA82XX(ha)) {
307 ql_dbg(ql_dbg_mbx, vha, 0x112a,
308 "disabling pause transmit on port "
309 "0 & 1.\n");
310 qla82xx_wr_32(ha,
311 QLA82XX_CRB_NIU + 0x98,
312 CRB_NIU_XG_PAUSE_CTL_P0|
313 CRB_NIU_XG_PAUSE_CTL_P1);
315 ql_log(ql_log_info, base_vha, 0x101c,
316 "Mailbox cmd timeout occured. "
317 "Scheduling ISP abort eeh_busy=0x%x.\n",
318 ha->flags.eeh_busy);
319 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
320 qla2xxx_wake_dpc(vha);
322 } else if (!abort_active) {
323 /* call abort directly since we are in the DPC thread */
324 ql_dbg(ql_dbg_mbx, base_vha, 0x101d,
325 "Timeout, calling abort_isp.\n");
327 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
328 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
329 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
330 if (IS_QLA82XX(ha)) {
331 ql_dbg(ql_dbg_mbx, vha, 0x112b,
332 "disabling pause transmit on port "
333 "0 & 1.\n");
334 qla82xx_wr_32(ha,
335 QLA82XX_CRB_NIU + 0x98,
336 CRB_NIU_XG_PAUSE_CTL_P0|
337 CRB_NIU_XG_PAUSE_CTL_P1);
339 ql_log(ql_log_info, base_vha, 0x101e,
340 "Mailbox cmd timeout occured. "
341 "Scheduling ISP abort.\n");
343 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
344 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
345 if (ha->isp_ops->abort_isp(vha)) {
346 /* Failed. retry later. */
347 set_bit(ISP_ABORT_NEEDED,
348 &vha->dpc_flags);
350 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
351 ql_dbg(ql_dbg_mbx, base_vha, 0x101f,
352 "Finished abort_isp.\n");
357 premature_exit:
358 /* Allow next mbx cmd to come in. */
359 complete(&ha->mbx_cmd_comp);
361 if (rval) {
362 ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
363 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n",
364 mcp->mb[0], mcp->mb[1], mcp->mb[2], command);
365 } else {
366 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
369 return rval;
373 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
374 uint32_t risc_code_size)
376 int rval;
377 struct qla_hw_data *ha = vha->hw;
378 mbx_cmd_t mc;
379 mbx_cmd_t *mcp = &mc;
381 ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
383 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
384 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
385 mcp->mb[8] = MSW(risc_addr);
386 mcp->out_mb = MBX_8|MBX_0;
387 } else {
388 mcp->mb[0] = MBC_LOAD_RISC_RAM;
389 mcp->out_mb = MBX_0;
391 mcp->mb[1] = LSW(risc_addr);
392 mcp->mb[2] = MSW(req_dma);
393 mcp->mb[3] = LSW(req_dma);
394 mcp->mb[6] = MSW(MSD(req_dma));
395 mcp->mb[7] = LSW(MSD(req_dma));
396 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
397 if (IS_FWI2_CAPABLE(ha)) {
398 mcp->mb[4] = MSW(risc_code_size);
399 mcp->mb[5] = LSW(risc_code_size);
400 mcp->out_mb |= MBX_5|MBX_4;
401 } else {
402 mcp->mb[4] = LSW(risc_code_size);
403 mcp->out_mb |= MBX_4;
406 mcp->in_mb = MBX_0;
407 mcp->tov = MBX_TOV_SECONDS;
408 mcp->flags = 0;
409 rval = qla2x00_mailbox_command(vha, mcp);
411 if (rval != QLA_SUCCESS) {
412 ql_dbg(ql_dbg_mbx, vha, 0x1023,
413 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
414 } else {
415 ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
418 return rval;
421 #define EXTENDED_BB_CREDITS BIT_0
423 * qla2x00_execute_fw
424 * Start adapter firmware.
426 * Input:
427 * ha = adapter block pointer.
428 * TARGET_QUEUE_LOCK must be released.
429 * ADAPTER_STATE_LOCK must be released.
431 * Returns:
432 * qla2x00 local function return status code.
434 * Context:
435 * Kernel context.
438 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
440 int rval;
441 struct qla_hw_data *ha = vha->hw;
442 mbx_cmd_t mc;
443 mbx_cmd_t *mcp = &mc;
445 ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
447 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
448 mcp->out_mb = MBX_0;
449 mcp->in_mb = MBX_0;
450 if (IS_FWI2_CAPABLE(ha)) {
451 mcp->mb[1] = MSW(risc_addr);
452 mcp->mb[2] = LSW(risc_addr);
453 mcp->mb[3] = 0;
454 if (IS_QLA81XX(ha)) {
455 struct nvram_81xx *nv = ha->nvram;
456 mcp->mb[4] = (nv->enhanced_features &
457 EXTENDED_BB_CREDITS);
458 } else
459 mcp->mb[4] = 0;
460 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
461 mcp->in_mb |= MBX_1;
462 } else {
463 mcp->mb[1] = LSW(risc_addr);
464 mcp->out_mb |= MBX_1;
465 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
466 mcp->mb[2] = 0;
467 mcp->out_mb |= MBX_2;
471 mcp->tov = MBX_TOV_SECONDS;
472 mcp->flags = 0;
473 rval = qla2x00_mailbox_command(vha, mcp);
475 if (rval != QLA_SUCCESS) {
476 ql_dbg(ql_dbg_mbx, vha, 0x1026,
477 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
478 } else {
479 if (IS_FWI2_CAPABLE(ha)) {
480 ql_dbg(ql_dbg_mbx, vha, 0x1027,
481 "Done exchanges=%x.\n", mcp->mb[1]);
482 } else {
483 ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
487 return rval;
491 * qla2x00_get_fw_version
492 * Get firmware version.
494 * Input:
495 * ha: adapter state pointer.
496 * major: pointer for major number.
497 * minor: pointer for minor number.
498 * subminor: pointer for subminor number.
500 * Returns:
501 * qla2x00 local function return status code.
503 * Context:
504 * Kernel context.
507 qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
508 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
509 uint32_t *mpi_caps, uint8_t *phy)
511 int rval;
512 mbx_cmd_t mc;
513 mbx_cmd_t *mcp = &mc;
515 ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
517 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
518 mcp->out_mb = MBX_0;
519 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
520 if (IS_QLA81XX(vha->hw))
521 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
522 mcp->flags = 0;
523 mcp->tov = MBX_TOV_SECONDS;
524 rval = qla2x00_mailbox_command(vha, mcp);
525 if (rval != QLA_SUCCESS)
526 goto failed;
528 /* Return mailbox data. */
529 *major = mcp->mb[1];
530 *minor = mcp->mb[2];
531 *subminor = mcp->mb[3];
532 *attributes = mcp->mb[6];
533 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
534 *memory = 0x1FFFF; /* Defaults to 128KB. */
535 else
536 *memory = (mcp->mb[5] << 16) | mcp->mb[4];
537 if (IS_QLA81XX(vha->hw)) {
538 mpi[0] = mcp->mb[10] & 0xff;
539 mpi[1] = mcp->mb[11] >> 8;
540 mpi[2] = mcp->mb[11] & 0xff;
541 *mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13];
542 phy[0] = mcp->mb[8] & 0xff;
543 phy[1] = mcp->mb[9] >> 8;
544 phy[2] = mcp->mb[9] & 0xff;
546 failed:
547 if (rval != QLA_SUCCESS) {
548 /*EMPTY*/
549 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
550 } else {
551 /*EMPTY*/
552 ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
554 return rval;
558 * qla2x00_get_fw_options
559 * Set firmware options.
561 * Input:
562 * ha = adapter block pointer.
563 * fwopt = pointer for firmware options.
565 * Returns:
566 * qla2x00 local function return status code.
568 * Context:
569 * Kernel context.
572 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
574 int rval;
575 mbx_cmd_t mc;
576 mbx_cmd_t *mcp = &mc;
578 ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
580 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
581 mcp->out_mb = MBX_0;
582 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
583 mcp->tov = MBX_TOV_SECONDS;
584 mcp->flags = 0;
585 rval = qla2x00_mailbox_command(vha, mcp);
587 if (rval != QLA_SUCCESS) {
588 /*EMPTY*/
589 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
590 } else {
591 fwopts[0] = mcp->mb[0];
592 fwopts[1] = mcp->mb[1];
593 fwopts[2] = mcp->mb[2];
594 fwopts[3] = mcp->mb[3];
596 ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
599 return rval;
604 * qla2x00_set_fw_options
605 * Set firmware options.
607 * Input:
608 * ha = adapter block pointer.
609 * fwopt = pointer for firmware options.
611 * Returns:
612 * qla2x00 local function return status code.
614 * Context:
615 * Kernel context.
618 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
620 int rval;
621 mbx_cmd_t mc;
622 mbx_cmd_t *mcp = &mc;
624 ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
626 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
627 mcp->mb[1] = fwopts[1];
628 mcp->mb[2] = fwopts[2];
629 mcp->mb[3] = fwopts[3];
630 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
631 mcp->in_mb = MBX_0;
632 if (IS_FWI2_CAPABLE(vha->hw)) {
633 mcp->in_mb |= MBX_1;
634 } else {
635 mcp->mb[10] = fwopts[10];
636 mcp->mb[11] = fwopts[11];
637 mcp->mb[12] = 0; /* Undocumented, but used */
638 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
640 mcp->tov = MBX_TOV_SECONDS;
641 mcp->flags = 0;
642 rval = qla2x00_mailbox_command(vha, mcp);
644 fwopts[0] = mcp->mb[0];
646 if (rval != QLA_SUCCESS) {
647 /*EMPTY*/
648 ql_dbg(ql_dbg_mbx, vha, 0x1030,
649 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
650 } else {
651 /*EMPTY*/
652 ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
655 return rval;
659 * qla2x00_mbx_reg_test
660 * Mailbox register wrap test.
662 * Input:
663 * ha = adapter block pointer.
664 * TARGET_QUEUE_LOCK must be released.
665 * ADAPTER_STATE_LOCK must be released.
667 * Returns:
668 * qla2x00 local function return status code.
670 * Context:
671 * Kernel context.
674 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
676 int rval;
677 mbx_cmd_t mc;
678 mbx_cmd_t *mcp = &mc;
680 ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
682 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
683 mcp->mb[1] = 0xAAAA;
684 mcp->mb[2] = 0x5555;
685 mcp->mb[3] = 0xAA55;
686 mcp->mb[4] = 0x55AA;
687 mcp->mb[5] = 0xA5A5;
688 mcp->mb[6] = 0x5A5A;
689 mcp->mb[7] = 0x2525;
690 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
691 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
692 mcp->tov = MBX_TOV_SECONDS;
693 mcp->flags = 0;
694 rval = qla2x00_mailbox_command(vha, mcp);
696 if (rval == QLA_SUCCESS) {
697 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
698 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
699 rval = QLA_FUNCTION_FAILED;
700 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
701 mcp->mb[7] != 0x2525)
702 rval = QLA_FUNCTION_FAILED;
705 if (rval != QLA_SUCCESS) {
706 /*EMPTY*/
707 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
708 } else {
709 /*EMPTY*/
710 ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
713 return rval;
717 * qla2x00_verify_checksum
718 * Verify firmware checksum.
720 * Input:
721 * ha = adapter block pointer.
722 * TARGET_QUEUE_LOCK must be released.
723 * ADAPTER_STATE_LOCK must be released.
725 * Returns:
726 * qla2x00 local function return status code.
728 * Context:
729 * Kernel context.
732 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
734 int rval;
735 mbx_cmd_t mc;
736 mbx_cmd_t *mcp = &mc;
738 ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
740 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
741 mcp->out_mb = MBX_0;
742 mcp->in_mb = MBX_0;
743 if (IS_FWI2_CAPABLE(vha->hw)) {
744 mcp->mb[1] = MSW(risc_addr);
745 mcp->mb[2] = LSW(risc_addr);
746 mcp->out_mb |= MBX_2|MBX_1;
747 mcp->in_mb |= MBX_2|MBX_1;
748 } else {
749 mcp->mb[1] = LSW(risc_addr);
750 mcp->out_mb |= MBX_1;
751 mcp->in_mb |= MBX_1;
754 mcp->tov = MBX_TOV_SECONDS;
755 mcp->flags = 0;
756 rval = qla2x00_mailbox_command(vha, mcp);
758 if (rval != QLA_SUCCESS) {
759 ql_dbg(ql_dbg_mbx, vha, 0x1036,
760 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
761 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
762 } else {
763 ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
766 return rval;
770 * qla2x00_issue_iocb
771 * Issue IOCB using mailbox command
773 * Input:
774 * ha = adapter state pointer.
775 * buffer = buffer pointer.
776 * phys_addr = physical address of buffer.
777 * size = size of buffer.
778 * TARGET_QUEUE_LOCK must be released.
779 * ADAPTER_STATE_LOCK must be released.
781 * Returns:
782 * qla2x00 local function return status code.
784 * Context:
785 * Kernel context.
788 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
789 dma_addr_t phys_addr, size_t size, uint32_t tov)
791 int rval;
792 mbx_cmd_t mc;
793 mbx_cmd_t *mcp = &mc;
795 ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
797 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
798 mcp->mb[1] = 0;
799 mcp->mb[2] = MSW(phys_addr);
800 mcp->mb[3] = LSW(phys_addr);
801 mcp->mb[6] = MSW(MSD(phys_addr));
802 mcp->mb[7] = LSW(MSD(phys_addr));
803 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
804 mcp->in_mb = MBX_2|MBX_0;
805 mcp->tov = tov;
806 mcp->flags = 0;
807 rval = qla2x00_mailbox_command(vha, mcp);
809 if (rval != QLA_SUCCESS) {
810 /*EMPTY*/
811 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
812 } else {
813 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
815 /* Mask reserved bits. */
816 sts_entry->entry_status &=
817 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
818 ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
821 return rval;
825 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
826 size_t size)
828 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
829 MBX_TOV_SECONDS);
833 * qla2x00_abort_command
834 * Abort command aborts a specified IOCB.
836 * Input:
837 * ha = adapter block pointer.
838 * sp = SB structure pointer.
840 * Returns:
841 * qla2x00 local function return status code.
843 * Context:
844 * Kernel context.
847 qla2x00_abort_command(srb_t *sp)
849 unsigned long flags = 0;
850 int rval;
851 uint32_t handle = 0;
852 mbx_cmd_t mc;
853 mbx_cmd_t *mcp = &mc;
854 fc_port_t *fcport = sp->fcport;
855 scsi_qla_host_t *vha = fcport->vha;
856 struct qla_hw_data *ha = vha->hw;
857 struct req_que *req = vha->req;
859 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
861 spin_lock_irqsave(&ha->hardware_lock, flags);
862 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
863 if (req->outstanding_cmds[handle] == sp)
864 break;
866 spin_unlock_irqrestore(&ha->hardware_lock, flags);
868 if (handle == MAX_OUTSTANDING_COMMANDS) {
869 /* command not found */
870 return QLA_FUNCTION_FAILED;
873 mcp->mb[0] = MBC_ABORT_COMMAND;
874 if (HAS_EXTENDED_IDS(ha))
875 mcp->mb[1] = fcport->loop_id;
876 else
877 mcp->mb[1] = fcport->loop_id << 8;
878 mcp->mb[2] = (uint16_t)handle;
879 mcp->mb[3] = (uint16_t)(handle >> 16);
880 mcp->mb[6] = (uint16_t)sp->cmd->device->lun;
881 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
882 mcp->in_mb = MBX_0;
883 mcp->tov = MBX_TOV_SECONDS;
884 mcp->flags = 0;
885 rval = qla2x00_mailbox_command(vha, mcp);
887 if (rval != QLA_SUCCESS) {
888 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
889 } else {
890 ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
893 return rval;
897 qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
899 int rval, rval2;
900 mbx_cmd_t mc;
901 mbx_cmd_t *mcp = &mc;
902 scsi_qla_host_t *vha;
903 struct req_que *req;
904 struct rsp_que *rsp;
906 l = l;
907 vha = fcport->vha;
909 ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
911 req = vha->hw->req_q_map[0];
912 rsp = req->rsp;
913 mcp->mb[0] = MBC_ABORT_TARGET;
914 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
915 if (HAS_EXTENDED_IDS(vha->hw)) {
916 mcp->mb[1] = fcport->loop_id;
917 mcp->mb[10] = 0;
918 mcp->out_mb |= MBX_10;
919 } else {
920 mcp->mb[1] = fcport->loop_id << 8;
922 mcp->mb[2] = vha->hw->loop_reset_delay;
923 mcp->mb[9] = vha->vp_idx;
925 mcp->in_mb = MBX_0;
926 mcp->tov = MBX_TOV_SECONDS;
927 mcp->flags = 0;
928 rval = qla2x00_mailbox_command(vha, mcp);
929 if (rval != QLA_SUCCESS) {
930 ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
933 /* Issue marker IOCB. */
934 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
935 MK_SYNC_ID);
936 if (rval2 != QLA_SUCCESS) {
937 ql_dbg(ql_dbg_mbx, vha, 0x1040,
938 "Failed to issue marker IOCB (%x).\n", rval2);
939 } else {
940 ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
943 return rval;
947 qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
949 int rval, rval2;
950 mbx_cmd_t mc;
951 mbx_cmd_t *mcp = &mc;
952 scsi_qla_host_t *vha;
953 struct req_que *req;
954 struct rsp_que *rsp;
956 vha = fcport->vha;
958 ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
960 req = vha->hw->req_q_map[0];
961 rsp = req->rsp;
962 mcp->mb[0] = MBC_LUN_RESET;
963 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
964 if (HAS_EXTENDED_IDS(vha->hw))
965 mcp->mb[1] = fcport->loop_id;
966 else
967 mcp->mb[1] = fcport->loop_id << 8;
968 mcp->mb[2] = l;
969 mcp->mb[3] = 0;
970 mcp->mb[9] = vha->vp_idx;
972 mcp->in_mb = MBX_0;
973 mcp->tov = MBX_TOV_SECONDS;
974 mcp->flags = 0;
975 rval = qla2x00_mailbox_command(vha, mcp);
976 if (rval != QLA_SUCCESS) {
977 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
980 /* Issue marker IOCB. */
981 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
982 MK_SYNC_ID_LUN);
983 if (rval2 != QLA_SUCCESS) {
984 ql_dbg(ql_dbg_mbx, vha, 0x1044,
985 "Failed to issue marker IOCB (%x).\n", rval2);
986 } else {
987 ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
990 return rval;
994 * qla2x00_get_adapter_id
995 * Get adapter ID and topology.
997 * Input:
998 * ha = adapter block pointer.
999 * id = pointer for loop ID.
1000 * al_pa = pointer for AL_PA.
1001 * area = pointer for area.
1002 * domain = pointer for domain.
1003 * top = pointer for topology.
1004 * TARGET_QUEUE_LOCK must be released.
1005 * ADAPTER_STATE_LOCK must be released.
1007 * Returns:
1008 * qla2x00 local function return status code.
1010 * Context:
1011 * Kernel context.
1014 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1015 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1017 int rval;
1018 mbx_cmd_t mc;
1019 mbx_cmd_t *mcp = &mc;
1021 ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
1023 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1024 mcp->mb[9] = vha->vp_idx;
1025 mcp->out_mb = MBX_9|MBX_0;
1026 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1027 if (IS_QLA8XXX_TYPE(vha->hw))
1028 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1029 mcp->tov = MBX_TOV_SECONDS;
1030 mcp->flags = 0;
1031 rval = qla2x00_mailbox_command(vha, mcp);
1032 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1033 rval = QLA_COMMAND_ERROR;
1034 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1035 rval = QLA_INVALID_COMMAND;
1037 /* Return data. */
1038 *id = mcp->mb[1];
1039 *al_pa = LSB(mcp->mb[2]);
1040 *area = MSB(mcp->mb[2]);
1041 *domain = LSB(mcp->mb[3]);
1042 *top = mcp->mb[6];
1043 *sw_cap = mcp->mb[7];
1045 if (rval != QLA_SUCCESS) {
1046 /*EMPTY*/
1047 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1048 } else {
1049 ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
1051 if (IS_QLA8XXX_TYPE(vha->hw)) {
1052 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1053 vha->fcoe_fcf_idx = mcp->mb[10];
1054 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1055 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1056 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1057 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1058 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1059 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1063 return rval;
1067 * qla2x00_get_retry_cnt
1068 * Get current firmware login retry count and delay.
1070 * Input:
1071 * ha = adapter block pointer.
1072 * retry_cnt = pointer to login retry count.
1073 * tov = pointer to login timeout value.
1075 * Returns:
1076 * qla2x00 local function return status code.
1078 * Context:
1079 * Kernel context.
1082 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1083 uint16_t *r_a_tov)
1085 int rval;
1086 uint16_t ratov;
1087 mbx_cmd_t mc;
1088 mbx_cmd_t *mcp = &mc;
1090 ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
1092 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1093 mcp->out_mb = MBX_0;
1094 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1095 mcp->tov = MBX_TOV_SECONDS;
1096 mcp->flags = 0;
1097 rval = qla2x00_mailbox_command(vha, mcp);
1099 if (rval != QLA_SUCCESS) {
1100 /*EMPTY*/
1101 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1102 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1103 } else {
1104 /* Convert returned data and check our values. */
1105 *r_a_tov = mcp->mb[3] / 2;
1106 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1107 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1108 /* Update to the larger values */
1109 *retry_cnt = (uint8_t)mcp->mb[1];
1110 *tov = ratov;
1113 ql_dbg(ql_dbg_mbx, vha, 0x104b,
1114 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1117 return rval;
1121 * qla2x00_init_firmware
1122 * Initialize adapter firmware.
1124 * Input:
1125 * ha = adapter block pointer.
1126 * dptr = Initialization control block pointer.
1127 * size = size of initialization control block.
1128 * TARGET_QUEUE_LOCK must be released.
1129 * ADAPTER_STATE_LOCK must be released.
1131 * Returns:
1132 * qla2x00 local function return status code.
1134 * Context:
1135 * Kernel context.
1138 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1140 int rval;
1141 mbx_cmd_t mc;
1142 mbx_cmd_t *mcp = &mc;
1143 struct qla_hw_data *ha = vha->hw;
1145 ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
1147 if (IS_QLA82XX(ha) && ql2xdbwr)
1148 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
1149 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1151 if (ha->flags.npiv_supported)
1152 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1153 else
1154 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1156 mcp->mb[1] = 0;
1157 mcp->mb[2] = MSW(ha->init_cb_dma);
1158 mcp->mb[3] = LSW(ha->init_cb_dma);
1159 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1160 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1161 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1162 if (IS_QLA81XX(ha) && ha->ex_init_cb->ex_version) {
1163 mcp->mb[1] = BIT_0;
1164 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1165 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1166 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1167 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1168 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1169 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1171 mcp->in_mb = MBX_0;
1172 mcp->buf_size = size;
1173 mcp->flags = MBX_DMA_OUT;
1174 mcp->tov = MBX_TOV_SECONDS;
1175 rval = qla2x00_mailbox_command(vha, mcp);
1177 if (rval != QLA_SUCCESS) {
1178 /*EMPTY*/
1179 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1180 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1181 } else {
1182 /*EMPTY*/
1183 ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
1186 return rval;
1190 * qla2x00_get_port_database
1191 * Issue normal/enhanced get port database mailbox command
1192 * and copy device name as necessary.
1194 * Input:
1195 * ha = adapter state pointer.
1196 * dev = structure pointer.
1197 * opt = enhanced cmd option byte.
1199 * Returns:
1200 * qla2x00 local function return status code.
1202 * Context:
1203 * Kernel context.
1206 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1208 int rval;
1209 mbx_cmd_t mc;
1210 mbx_cmd_t *mcp = &mc;
1211 port_database_t *pd;
1212 struct port_database_24xx *pd24;
1213 dma_addr_t pd_dma;
1214 struct qla_hw_data *ha = vha->hw;
1216 ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
1218 pd24 = NULL;
1219 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1220 if (pd == NULL) {
1221 ql_log(ql_log_warn, vha, 0x1050,
1222 "Failed to allocate port database structure.\n");
1223 return QLA_MEMORY_ALLOC_FAILED;
1225 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
1227 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1228 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1229 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1230 mcp->mb[2] = MSW(pd_dma);
1231 mcp->mb[3] = LSW(pd_dma);
1232 mcp->mb[6] = MSW(MSD(pd_dma));
1233 mcp->mb[7] = LSW(MSD(pd_dma));
1234 mcp->mb[9] = vha->vp_idx;
1235 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1236 mcp->in_mb = MBX_0;
1237 if (IS_FWI2_CAPABLE(ha)) {
1238 mcp->mb[1] = fcport->loop_id;
1239 mcp->mb[10] = opt;
1240 mcp->out_mb |= MBX_10|MBX_1;
1241 mcp->in_mb |= MBX_1;
1242 } else if (HAS_EXTENDED_IDS(ha)) {
1243 mcp->mb[1] = fcport->loop_id;
1244 mcp->mb[10] = opt;
1245 mcp->out_mb |= MBX_10|MBX_1;
1246 } else {
1247 mcp->mb[1] = fcport->loop_id << 8 | opt;
1248 mcp->out_mb |= MBX_1;
1250 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1251 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1252 mcp->flags = MBX_DMA_IN;
1253 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1254 rval = qla2x00_mailbox_command(vha, mcp);
1255 if (rval != QLA_SUCCESS)
1256 goto gpd_error_out;
1258 if (IS_FWI2_CAPABLE(ha)) {
1259 pd24 = (struct port_database_24xx *) pd;
1261 /* Check for logged in state. */
1262 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1263 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1264 ql_dbg(ql_dbg_mbx, vha, 0x1051,
1265 "Unable to verify login-state (%x/%x) for "
1266 "loop_id %x.\n", pd24->current_login_state,
1267 pd24->last_login_state, fcport->loop_id);
1268 rval = QLA_FUNCTION_FAILED;
1269 goto gpd_error_out;
1272 /* Names are little-endian. */
1273 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1274 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1276 /* Get port_id of device. */
1277 fcport->d_id.b.domain = pd24->port_id[0];
1278 fcport->d_id.b.area = pd24->port_id[1];
1279 fcport->d_id.b.al_pa = pd24->port_id[2];
1280 fcport->d_id.b.rsvd_1 = 0;
1282 /* If not target must be initiator or unknown type. */
1283 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1284 fcport->port_type = FCT_INITIATOR;
1285 else
1286 fcport->port_type = FCT_TARGET;
1287 } else {
1288 /* Check for logged in state. */
1289 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1290 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1291 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1292 "Unable to verify login-state (%x/%x) - "
1293 "portid=%02x%02x%02x.\n", pd->master_state,
1294 pd->slave_state, fcport->d_id.b.domain,
1295 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1296 rval = QLA_FUNCTION_FAILED;
1297 goto gpd_error_out;
1300 /* Names are little-endian. */
1301 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1302 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1304 /* Get port_id of device. */
1305 fcport->d_id.b.domain = pd->port_id[0];
1306 fcport->d_id.b.area = pd->port_id[3];
1307 fcport->d_id.b.al_pa = pd->port_id[2];
1308 fcport->d_id.b.rsvd_1 = 0;
1310 /* If not target must be initiator or unknown type. */
1311 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1312 fcport->port_type = FCT_INITIATOR;
1313 else
1314 fcport->port_type = FCT_TARGET;
1316 /* Passback COS information. */
1317 fcport->supported_classes = (pd->options & BIT_4) ?
1318 FC_COS_CLASS2: FC_COS_CLASS3;
1321 gpd_error_out:
1322 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1324 if (rval != QLA_SUCCESS) {
1325 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1326 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1327 mcp->mb[0], mcp->mb[1]);
1328 } else {
1329 ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
1332 return rval;
1336 * qla2x00_get_firmware_state
1337 * Get adapter firmware state.
1339 * Input:
1340 * ha = adapter block pointer.
1341 * dptr = pointer for firmware state.
1342 * TARGET_QUEUE_LOCK must be released.
1343 * ADAPTER_STATE_LOCK must be released.
1345 * Returns:
1346 * qla2x00 local function return status code.
1348 * Context:
1349 * Kernel context.
1352 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1354 int rval;
1355 mbx_cmd_t mc;
1356 mbx_cmd_t *mcp = &mc;
1358 ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
1360 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1361 mcp->out_mb = MBX_0;
1362 if (IS_FWI2_CAPABLE(vha->hw))
1363 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1364 else
1365 mcp->in_mb = MBX_1|MBX_0;
1366 mcp->tov = MBX_TOV_SECONDS;
1367 mcp->flags = 0;
1368 rval = qla2x00_mailbox_command(vha, mcp);
1370 /* Return firmware states. */
1371 states[0] = mcp->mb[1];
1372 if (IS_FWI2_CAPABLE(vha->hw)) {
1373 states[1] = mcp->mb[2];
1374 states[2] = mcp->mb[3];
1375 states[3] = mcp->mb[4];
1376 states[4] = mcp->mb[5];
1379 if (rval != QLA_SUCCESS) {
1380 /*EMPTY*/
1381 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1382 } else {
1383 /*EMPTY*/
1384 ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
1387 return rval;
1391 * qla2x00_get_port_name
1392 * Issue get port name mailbox command.
1393 * Returned name is in big endian format.
1395 * Input:
1396 * ha = adapter block pointer.
1397 * loop_id = loop ID of device.
1398 * name = pointer for name.
1399 * TARGET_QUEUE_LOCK must be released.
1400 * ADAPTER_STATE_LOCK must be released.
1402 * Returns:
1403 * qla2x00 local function return status code.
1405 * Context:
1406 * Kernel context.
1409 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1410 uint8_t opt)
1412 int rval;
1413 mbx_cmd_t mc;
1414 mbx_cmd_t *mcp = &mc;
1416 ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
1418 mcp->mb[0] = MBC_GET_PORT_NAME;
1419 mcp->mb[9] = vha->vp_idx;
1420 mcp->out_mb = MBX_9|MBX_1|MBX_0;
1421 if (HAS_EXTENDED_IDS(vha->hw)) {
1422 mcp->mb[1] = loop_id;
1423 mcp->mb[10] = opt;
1424 mcp->out_mb |= MBX_10;
1425 } else {
1426 mcp->mb[1] = loop_id << 8 | opt;
1429 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1430 mcp->tov = MBX_TOV_SECONDS;
1431 mcp->flags = 0;
1432 rval = qla2x00_mailbox_command(vha, mcp);
1434 if (rval != QLA_SUCCESS) {
1435 /*EMPTY*/
1436 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
1437 } else {
1438 if (name != NULL) {
1439 /* This function returns name in big endian. */
1440 name[0] = MSB(mcp->mb[2]);
1441 name[1] = LSB(mcp->mb[2]);
1442 name[2] = MSB(mcp->mb[3]);
1443 name[3] = LSB(mcp->mb[3]);
1444 name[4] = MSB(mcp->mb[6]);
1445 name[5] = LSB(mcp->mb[6]);
1446 name[6] = MSB(mcp->mb[7]);
1447 name[7] = LSB(mcp->mb[7]);
1450 ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
1453 return rval;
1457 * qla2x00_lip_reset
1458 * Issue LIP reset mailbox command.
1460 * Input:
1461 * ha = adapter block pointer.
1462 * TARGET_QUEUE_LOCK must be released.
1463 * ADAPTER_STATE_LOCK must be released.
1465 * Returns:
1466 * qla2x00 local function return status code.
1468 * Context:
1469 * Kernel context.
1472 qla2x00_lip_reset(scsi_qla_host_t *vha)
1474 int rval;
1475 mbx_cmd_t mc;
1476 mbx_cmd_t *mcp = &mc;
1478 ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
1480 if (IS_QLA8XXX_TYPE(vha->hw)) {
1481 /* Logout across all FCFs. */
1482 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1483 mcp->mb[1] = BIT_1;
1484 mcp->mb[2] = 0;
1485 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1486 } else if (IS_FWI2_CAPABLE(vha->hw)) {
1487 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1488 mcp->mb[1] = BIT_6;
1489 mcp->mb[2] = 0;
1490 mcp->mb[3] = vha->hw->loop_reset_delay;
1491 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1492 } else {
1493 mcp->mb[0] = MBC_LIP_RESET;
1494 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1495 if (HAS_EXTENDED_IDS(vha->hw)) {
1496 mcp->mb[1] = 0x00ff;
1497 mcp->mb[10] = 0;
1498 mcp->out_mb |= MBX_10;
1499 } else {
1500 mcp->mb[1] = 0xff00;
1502 mcp->mb[2] = vha->hw->loop_reset_delay;
1503 mcp->mb[3] = 0;
1505 mcp->in_mb = MBX_0;
1506 mcp->tov = MBX_TOV_SECONDS;
1507 mcp->flags = 0;
1508 rval = qla2x00_mailbox_command(vha, mcp);
1510 if (rval != QLA_SUCCESS) {
1511 /*EMPTY*/
1512 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
1513 } else {
1514 /*EMPTY*/
1515 ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
1518 return rval;
1522 * qla2x00_send_sns
1523 * Send SNS command.
1525 * Input:
1526 * ha = adapter block pointer.
1527 * sns = pointer for command.
1528 * cmd_size = command size.
1529 * buf_size = response/command size.
1530 * TARGET_QUEUE_LOCK must be released.
1531 * ADAPTER_STATE_LOCK must be released.
1533 * Returns:
1534 * qla2x00 local function return status code.
1536 * Context:
1537 * Kernel context.
1540 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1541 uint16_t cmd_size, size_t buf_size)
1543 int rval;
1544 mbx_cmd_t mc;
1545 mbx_cmd_t *mcp = &mc;
1547 ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
1549 ql_dbg(ql_dbg_mbx, vha, 0x105e,
1550 "Retry cnt=%d ratov=%d total tov=%d.\n",
1551 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
1553 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1554 mcp->mb[1] = cmd_size;
1555 mcp->mb[2] = MSW(sns_phys_address);
1556 mcp->mb[3] = LSW(sns_phys_address);
1557 mcp->mb[6] = MSW(MSD(sns_phys_address));
1558 mcp->mb[7] = LSW(MSD(sns_phys_address));
1559 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1560 mcp->in_mb = MBX_0|MBX_1;
1561 mcp->buf_size = buf_size;
1562 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
1563 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
1564 rval = qla2x00_mailbox_command(vha, mcp);
1566 if (rval != QLA_SUCCESS) {
1567 /*EMPTY*/
1568 ql_dbg(ql_dbg_mbx, vha, 0x105f,
1569 "Failed=%x mb[0]=%x mb[1]=%x.\n",
1570 rval, mcp->mb[0], mcp->mb[1]);
1571 } else {
1572 /*EMPTY*/
1573 ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
1576 return rval;
1580 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1581 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1583 int rval;
1585 struct logio_entry_24xx *lg;
1586 dma_addr_t lg_dma;
1587 uint32_t iop[2];
1588 struct qla_hw_data *ha = vha->hw;
1589 struct req_que *req;
1590 struct rsp_que *rsp;
1592 ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
1594 if (ha->flags.cpu_affinity_enabled)
1595 req = ha->req_q_map[0];
1596 else
1597 req = vha->req;
1598 rsp = req->rsp;
1600 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1601 if (lg == NULL) {
1602 ql_log(ql_log_warn, vha, 0x1062,
1603 "Failed to allocate login IOCB.\n");
1604 return QLA_MEMORY_ALLOC_FAILED;
1606 memset(lg, 0, sizeof(struct logio_entry_24xx));
1608 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1609 lg->entry_count = 1;
1610 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1611 lg->nport_handle = cpu_to_le16(loop_id);
1612 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
1613 if (opt & BIT_0)
1614 lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI);
1615 if (opt & BIT_1)
1616 lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI);
1617 lg->port_id[0] = al_pa;
1618 lg->port_id[1] = area;
1619 lg->port_id[2] = domain;
1620 lg->vp_index = vha->vp_idx;
1621 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1622 if (rval != QLA_SUCCESS) {
1623 ql_dbg(ql_dbg_mbx, vha, 0x1063,
1624 "Failed to issue login IOCB (%x).\n", rval);
1625 } else if (lg->entry_status != 0) {
1626 ql_dbg(ql_dbg_mbx, vha, 0x1064,
1627 "Failed to complete IOCB -- error status (%x).\n",
1628 lg->entry_status);
1629 rval = QLA_FUNCTION_FAILED;
1630 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1631 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1632 iop[1] = le32_to_cpu(lg->io_parameter[1]);
1634 ql_dbg(ql_dbg_mbx, vha, 0x1065,
1635 "Failed to complete IOCB -- completion status (%x) "
1636 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1637 iop[0], iop[1]);
1639 switch (iop[0]) {
1640 case LSC_SCODE_PORTID_USED:
1641 mb[0] = MBS_PORT_ID_USED;
1642 mb[1] = LSW(iop[1]);
1643 break;
1644 case LSC_SCODE_NPORT_USED:
1645 mb[0] = MBS_LOOP_ID_USED;
1646 break;
1647 case LSC_SCODE_NOLINK:
1648 case LSC_SCODE_NOIOCB:
1649 case LSC_SCODE_NOXCB:
1650 case LSC_SCODE_CMD_FAILED:
1651 case LSC_SCODE_NOFABRIC:
1652 case LSC_SCODE_FW_NOT_READY:
1653 case LSC_SCODE_NOT_LOGGED_IN:
1654 case LSC_SCODE_NOPCB:
1655 case LSC_SCODE_ELS_REJECT:
1656 case LSC_SCODE_CMD_PARAM_ERR:
1657 case LSC_SCODE_NONPORT:
1658 case LSC_SCODE_LOGGED_IN:
1659 case LSC_SCODE_NOFLOGI_ACC:
1660 default:
1661 mb[0] = MBS_COMMAND_ERROR;
1662 break;
1664 } else {
1665 ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
1667 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1669 mb[0] = MBS_COMMAND_COMPLETE;
1670 mb[1] = 0;
1671 if (iop[0] & BIT_4) {
1672 if (iop[0] & BIT_8)
1673 mb[1] |= BIT_1;
1674 } else
1675 mb[1] = BIT_0;
1677 /* Passback COS information. */
1678 mb[10] = 0;
1679 if (lg->io_parameter[7] || lg->io_parameter[8])
1680 mb[10] |= BIT_0; /* Class 2. */
1681 if (lg->io_parameter[9] || lg->io_parameter[10])
1682 mb[10] |= BIT_1; /* Class 3. */
1685 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
1687 return rval;
1691 * qla2x00_login_fabric
1692 * Issue login fabric port mailbox command.
1694 * Input:
1695 * ha = adapter block pointer.
1696 * loop_id = device loop ID.
1697 * domain = device domain.
1698 * area = device area.
1699 * al_pa = device AL_PA.
1700 * status = pointer for return status.
1701 * opt = command options.
1702 * TARGET_QUEUE_LOCK must be released.
1703 * ADAPTER_STATE_LOCK must be released.
1705 * Returns:
1706 * qla2x00 local function return status code.
1708 * Context:
1709 * Kernel context.
1712 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1713 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1715 int rval;
1716 mbx_cmd_t mc;
1717 mbx_cmd_t *mcp = &mc;
1718 struct qla_hw_data *ha = vha->hw;
1720 ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
1722 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1723 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1724 if (HAS_EXTENDED_IDS(ha)) {
1725 mcp->mb[1] = loop_id;
1726 mcp->mb[10] = opt;
1727 mcp->out_mb |= MBX_10;
1728 } else {
1729 mcp->mb[1] = (loop_id << 8) | opt;
1731 mcp->mb[2] = domain;
1732 mcp->mb[3] = area << 8 | al_pa;
1734 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
1735 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1736 mcp->flags = 0;
1737 rval = qla2x00_mailbox_command(vha, mcp);
1739 /* Return mailbox statuses. */
1740 if (mb != NULL) {
1741 mb[0] = mcp->mb[0];
1742 mb[1] = mcp->mb[1];
1743 mb[2] = mcp->mb[2];
1744 mb[6] = mcp->mb[6];
1745 mb[7] = mcp->mb[7];
1746 /* COS retrieved from Get-Port-Database mailbox command. */
1747 mb[10] = 0;
1750 if (rval != QLA_SUCCESS) {
1751 /* RLU tmp code: need to change main mailbox_command function to
1752 * return ok even when the mailbox completion value is not
1753 * SUCCESS. The caller needs to be responsible to interpret
1754 * the return values of this mailbox command if we're not
1755 * to change too much of the existing code.
1757 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
1758 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
1759 mcp->mb[0] == 0x4006)
1760 rval = QLA_SUCCESS;
1762 /*EMPTY*/
1763 ql_dbg(ql_dbg_mbx, vha, 0x1068,
1764 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
1765 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
1766 } else {
1767 /*EMPTY*/
1768 ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
1771 return rval;
1775 * qla2x00_login_local_device
1776 * Issue login loop port mailbox command.
1778 * Input:
1779 * ha = adapter block pointer.
1780 * loop_id = device loop ID.
1781 * opt = command options.
1783 * Returns:
1784 * Return status code.
1786 * Context:
1787 * Kernel context.
1791 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1792 uint16_t *mb_ret, uint8_t opt)
1794 int rval;
1795 mbx_cmd_t mc;
1796 mbx_cmd_t *mcp = &mc;
1797 struct qla_hw_data *ha = vha->hw;
1799 ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
1801 if (IS_FWI2_CAPABLE(ha))
1802 return qla24xx_login_fabric(vha, fcport->loop_id,
1803 fcport->d_id.b.domain, fcport->d_id.b.area,
1804 fcport->d_id.b.al_pa, mb_ret, opt);
1806 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
1807 if (HAS_EXTENDED_IDS(ha))
1808 mcp->mb[1] = fcport->loop_id;
1809 else
1810 mcp->mb[1] = fcport->loop_id << 8;
1811 mcp->mb[2] = opt;
1812 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1813 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
1814 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1815 mcp->flags = 0;
1816 rval = qla2x00_mailbox_command(vha, mcp);
1818 /* Return mailbox statuses. */
1819 if (mb_ret != NULL) {
1820 mb_ret[0] = mcp->mb[0];
1821 mb_ret[1] = mcp->mb[1];
1822 mb_ret[6] = mcp->mb[6];
1823 mb_ret[7] = mcp->mb[7];
1826 if (rval != QLA_SUCCESS) {
1827 /* AV tmp code: need to change main mailbox_command function to
1828 * return ok even when the mailbox completion value is not
1829 * SUCCESS. The caller needs to be responsible to interpret
1830 * the return values of this mailbox command if we're not
1831 * to change too much of the existing code.
1833 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
1834 rval = QLA_SUCCESS;
1836 ql_dbg(ql_dbg_mbx, vha, 0x106b,
1837 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
1838 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
1839 } else {
1840 /*EMPTY*/
1841 ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
1844 return (rval);
1848 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1849 uint8_t area, uint8_t al_pa)
1851 int rval;
1852 struct logio_entry_24xx *lg;
1853 dma_addr_t lg_dma;
1854 struct qla_hw_data *ha = vha->hw;
1855 struct req_que *req;
1856 struct rsp_que *rsp;
1858 ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
1860 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1861 if (lg == NULL) {
1862 ql_log(ql_log_warn, vha, 0x106e,
1863 "Failed to allocate logout IOCB.\n");
1864 return QLA_MEMORY_ALLOC_FAILED;
1866 memset(lg, 0, sizeof(struct logio_entry_24xx));
1868 if (ql2xmaxqueues > 1)
1869 req = ha->req_q_map[0];
1870 else
1871 req = vha->req;
1872 rsp = req->rsp;
1873 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1874 lg->entry_count = 1;
1875 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1876 lg->nport_handle = cpu_to_le16(loop_id);
1877 lg->control_flags =
1878 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
1879 LCF_FREE_NPORT);
1880 lg->port_id[0] = al_pa;
1881 lg->port_id[1] = area;
1882 lg->port_id[2] = domain;
1883 lg->vp_index = vha->vp_idx;
1885 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1886 if (rval != QLA_SUCCESS) {
1887 ql_dbg(ql_dbg_mbx, vha, 0x106f,
1888 "Failed to issue logout IOCB (%x).\n", rval);
1889 } else if (lg->entry_status != 0) {
1890 ql_dbg(ql_dbg_mbx, vha, 0x1070,
1891 "Failed to complete IOCB -- error status (%x).\n",
1892 lg->entry_status);
1893 rval = QLA_FUNCTION_FAILED;
1894 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1895 ql_dbg(ql_dbg_mbx, vha, 0x1071,
1896 "Failed to complete IOCB -- completion status (%x) "
1897 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1898 le32_to_cpu(lg->io_parameter[0]),
1899 le32_to_cpu(lg->io_parameter[1]));
1900 } else {
1901 /*EMPTY*/
1902 ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
1905 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
1907 return rval;
1911 * qla2x00_fabric_logout
1912 * Issue logout fabric port mailbox command.
1914 * Input:
1915 * ha = adapter block pointer.
1916 * loop_id = device loop ID.
1917 * TARGET_QUEUE_LOCK must be released.
1918 * ADAPTER_STATE_LOCK must be released.
1920 * Returns:
1921 * qla2x00 local function return status code.
1923 * Context:
1924 * Kernel context.
1927 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1928 uint8_t area, uint8_t al_pa)
1930 int rval;
1931 mbx_cmd_t mc;
1932 mbx_cmd_t *mcp = &mc;
1934 ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
1936 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1937 mcp->out_mb = MBX_1|MBX_0;
1938 if (HAS_EXTENDED_IDS(vha->hw)) {
1939 mcp->mb[1] = loop_id;
1940 mcp->mb[10] = 0;
1941 mcp->out_mb |= MBX_10;
1942 } else {
1943 mcp->mb[1] = loop_id << 8;
1946 mcp->in_mb = MBX_1|MBX_0;
1947 mcp->tov = MBX_TOV_SECONDS;
1948 mcp->flags = 0;
1949 rval = qla2x00_mailbox_command(vha, mcp);
1951 if (rval != QLA_SUCCESS) {
1952 /*EMPTY*/
1953 ql_dbg(ql_dbg_mbx, vha, 0x1074,
1954 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
1955 } else {
1956 /*EMPTY*/
1957 ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
1960 return rval;
1964 * qla2x00_full_login_lip
1965 * Issue full login LIP mailbox command.
1967 * Input:
1968 * ha = adapter block pointer.
1969 * TARGET_QUEUE_LOCK must be released.
1970 * ADAPTER_STATE_LOCK must be released.
1972 * Returns:
1973 * qla2x00 local function return status code.
1975 * Context:
1976 * Kernel context.
1979 qla2x00_full_login_lip(scsi_qla_host_t *vha)
1981 int rval;
1982 mbx_cmd_t mc;
1983 mbx_cmd_t *mcp = &mc;
1985 ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
1987 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1988 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
1989 mcp->mb[2] = 0;
1990 mcp->mb[3] = 0;
1991 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1992 mcp->in_mb = MBX_0;
1993 mcp->tov = MBX_TOV_SECONDS;
1994 mcp->flags = 0;
1995 rval = qla2x00_mailbox_command(vha, mcp);
1997 if (rval != QLA_SUCCESS) {
1998 /*EMPTY*/
1999 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2000 } else {
2001 /*EMPTY*/
2002 ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
2005 return rval;
2009 * qla2x00_get_id_list
2011 * Input:
2012 * ha = adapter block pointer.
2014 * Returns:
2015 * qla2x00 local function return status code.
2017 * Context:
2018 * Kernel context.
2021 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2022 uint16_t *entries)
2024 int rval;
2025 mbx_cmd_t mc;
2026 mbx_cmd_t *mcp = &mc;
2028 ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
2030 if (id_list == NULL)
2031 return QLA_FUNCTION_FAILED;
2033 mcp->mb[0] = MBC_GET_ID_LIST;
2034 mcp->out_mb = MBX_0;
2035 if (IS_FWI2_CAPABLE(vha->hw)) {
2036 mcp->mb[2] = MSW(id_list_dma);
2037 mcp->mb[3] = LSW(id_list_dma);
2038 mcp->mb[6] = MSW(MSD(id_list_dma));
2039 mcp->mb[7] = LSW(MSD(id_list_dma));
2040 mcp->mb[8] = 0;
2041 mcp->mb[9] = vha->vp_idx;
2042 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2043 } else {
2044 mcp->mb[1] = MSW(id_list_dma);
2045 mcp->mb[2] = LSW(id_list_dma);
2046 mcp->mb[3] = MSW(MSD(id_list_dma));
2047 mcp->mb[6] = LSW(MSD(id_list_dma));
2048 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2050 mcp->in_mb = MBX_1|MBX_0;
2051 mcp->tov = MBX_TOV_SECONDS;
2052 mcp->flags = 0;
2053 rval = qla2x00_mailbox_command(vha, mcp);
2055 if (rval != QLA_SUCCESS) {
2056 /*EMPTY*/
2057 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2058 } else {
2059 *entries = mcp->mb[1];
2060 ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
2063 return rval;
2067 * qla2x00_get_resource_cnts
2068 * Get current firmware resource counts.
2070 * Input:
2071 * ha = adapter block pointer.
2073 * Returns:
2074 * qla2x00 local function return status code.
2076 * Context:
2077 * Kernel context.
2080 qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2081 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
2082 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs)
2084 int rval;
2085 mbx_cmd_t mc;
2086 mbx_cmd_t *mcp = &mc;
2088 ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
2090 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2091 mcp->out_mb = MBX_0;
2092 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2093 if (IS_QLA81XX(vha->hw))
2094 mcp->in_mb |= MBX_12;
2095 mcp->tov = MBX_TOV_SECONDS;
2096 mcp->flags = 0;
2097 rval = qla2x00_mailbox_command(vha, mcp);
2099 if (rval != QLA_SUCCESS) {
2100 /*EMPTY*/
2101 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2102 "Failed mb[0]=%x.\n", mcp->mb[0]);
2103 } else {
2104 ql_dbg(ql_dbg_mbx, vha, 0x107e,
2105 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2106 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2107 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2108 mcp->mb[11], mcp->mb[12]);
2110 if (cur_xchg_cnt)
2111 *cur_xchg_cnt = mcp->mb[3];
2112 if (orig_xchg_cnt)
2113 *orig_xchg_cnt = mcp->mb[6];
2114 if (cur_iocb_cnt)
2115 *cur_iocb_cnt = mcp->mb[7];
2116 if (orig_iocb_cnt)
2117 *orig_iocb_cnt = mcp->mb[10];
2118 if (vha->hw->flags.npiv_supported && max_npiv_vports)
2119 *max_npiv_vports = mcp->mb[11];
2120 if (IS_QLA81XX(vha->hw) && max_fcfs)
2121 *max_fcfs = mcp->mb[12];
2124 return (rval);
2128 * qla2x00_get_fcal_position_map
2129 * Get FCAL (LILP) position map using mailbox command
2131 * Input:
2132 * ha = adapter state pointer.
2133 * pos_map = buffer pointer (can be NULL).
2135 * Returns:
2136 * qla2x00 local function return status code.
2138 * Context:
2139 * Kernel context.
2142 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2144 int rval;
2145 mbx_cmd_t mc;
2146 mbx_cmd_t *mcp = &mc;
2147 char *pmap;
2148 dma_addr_t pmap_dma;
2149 struct qla_hw_data *ha = vha->hw;
2151 ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
2153 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2154 if (pmap == NULL) {
2155 ql_log(ql_log_warn, vha, 0x1080,
2156 "Memory alloc failed.\n");
2157 return QLA_MEMORY_ALLOC_FAILED;
2159 memset(pmap, 0, FCAL_MAP_SIZE);
2161 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2162 mcp->mb[2] = MSW(pmap_dma);
2163 mcp->mb[3] = LSW(pmap_dma);
2164 mcp->mb[6] = MSW(MSD(pmap_dma));
2165 mcp->mb[7] = LSW(MSD(pmap_dma));
2166 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2167 mcp->in_mb = MBX_1|MBX_0;
2168 mcp->buf_size = FCAL_MAP_SIZE;
2169 mcp->flags = MBX_DMA_IN;
2170 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2171 rval = qla2x00_mailbox_command(vha, mcp);
2173 if (rval == QLA_SUCCESS) {
2174 ql_dbg(ql_dbg_mbx, vha, 0x1081,
2175 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2176 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2177 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2178 pmap, pmap[0] + 1);
2180 if (pos_map)
2181 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2183 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2185 if (rval != QLA_SUCCESS) {
2186 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2187 } else {
2188 ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
2191 return rval;
2195 * qla2x00_get_link_status
2197 * Input:
2198 * ha = adapter block pointer.
2199 * loop_id = device loop ID.
2200 * ret_buf = pointer to link status return buffer.
2202 * Returns:
2203 * 0 = success.
2204 * BIT_0 = mem alloc error.
2205 * BIT_1 = mailbox error.
2208 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2209 struct link_statistics *stats, dma_addr_t stats_dma)
2211 int rval;
2212 mbx_cmd_t mc;
2213 mbx_cmd_t *mcp = &mc;
2214 uint32_t *siter, *diter, dwords;
2215 struct qla_hw_data *ha = vha->hw;
2217 ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
2219 mcp->mb[0] = MBC_GET_LINK_STATUS;
2220 mcp->mb[2] = MSW(stats_dma);
2221 mcp->mb[3] = LSW(stats_dma);
2222 mcp->mb[6] = MSW(MSD(stats_dma));
2223 mcp->mb[7] = LSW(MSD(stats_dma));
2224 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2225 mcp->in_mb = MBX_0;
2226 if (IS_FWI2_CAPABLE(ha)) {
2227 mcp->mb[1] = loop_id;
2228 mcp->mb[4] = 0;
2229 mcp->mb[10] = 0;
2230 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2231 mcp->in_mb |= MBX_1;
2232 } else if (HAS_EXTENDED_IDS(ha)) {
2233 mcp->mb[1] = loop_id;
2234 mcp->mb[10] = 0;
2235 mcp->out_mb |= MBX_10|MBX_1;
2236 } else {
2237 mcp->mb[1] = loop_id << 8;
2238 mcp->out_mb |= MBX_1;
2240 mcp->tov = MBX_TOV_SECONDS;
2241 mcp->flags = IOCTL_CMD;
2242 rval = qla2x00_mailbox_command(vha, mcp);
2244 if (rval == QLA_SUCCESS) {
2245 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2246 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2247 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2248 rval = QLA_FUNCTION_FAILED;
2249 } else {
2250 /* Copy over data -- firmware data is LE. */
2251 ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
2252 dwords = offsetof(struct link_statistics, unused1) / 4;
2253 siter = diter = &stats->link_fail_cnt;
2254 while (dwords--)
2255 *diter++ = le32_to_cpu(*siter++);
2257 } else {
2258 /* Failed. */
2259 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2262 return rval;
2266 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2267 dma_addr_t stats_dma)
2269 int rval;
2270 mbx_cmd_t mc;
2271 mbx_cmd_t *mcp = &mc;
2272 uint32_t *siter, *diter, dwords;
2274 ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
2276 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2277 mcp->mb[2] = MSW(stats_dma);
2278 mcp->mb[3] = LSW(stats_dma);
2279 mcp->mb[6] = MSW(MSD(stats_dma));
2280 mcp->mb[7] = LSW(MSD(stats_dma));
2281 mcp->mb[8] = sizeof(struct link_statistics) / 4;
2282 mcp->mb[9] = vha->vp_idx;
2283 mcp->mb[10] = 0;
2284 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2285 mcp->in_mb = MBX_2|MBX_1|MBX_0;
2286 mcp->tov = MBX_TOV_SECONDS;
2287 mcp->flags = IOCTL_CMD;
2288 rval = qla2x00_mailbox_command(vha, mcp);
2290 if (rval == QLA_SUCCESS) {
2291 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2292 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2293 "Failed mb[0]=%x.\n", mcp->mb[0]);
2294 rval = QLA_FUNCTION_FAILED;
2295 } else {
2296 ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
2297 /* Copy over data -- firmware data is LE. */
2298 dwords = sizeof(struct link_statistics) / 4;
2299 siter = diter = &stats->link_fail_cnt;
2300 while (dwords--)
2301 *diter++ = le32_to_cpu(*siter++);
2303 } else {
2304 /* Failed. */
2305 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2308 return rval;
2312 qla24xx_abort_command(srb_t *sp)
2314 int rval;
2315 unsigned long flags = 0;
2317 struct abort_entry_24xx *abt;
2318 dma_addr_t abt_dma;
2319 uint32_t handle;
2320 fc_port_t *fcport = sp->fcport;
2321 struct scsi_qla_host *vha = fcport->vha;
2322 struct qla_hw_data *ha = vha->hw;
2323 struct req_que *req = vha->req;
2325 ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
2327 spin_lock_irqsave(&ha->hardware_lock, flags);
2328 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2329 if (req->outstanding_cmds[handle] == sp)
2330 break;
2332 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2333 if (handle == MAX_OUTSTANDING_COMMANDS) {
2334 /* Command not found. */
2335 return QLA_FUNCTION_FAILED;
2338 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2339 if (abt == NULL) {
2340 ql_log(ql_log_warn, vha, 0x108d,
2341 "Failed to allocate abort IOCB.\n");
2342 return QLA_MEMORY_ALLOC_FAILED;
2344 memset(abt, 0, sizeof(struct abort_entry_24xx));
2346 abt->entry_type = ABORT_IOCB_TYPE;
2347 abt->entry_count = 1;
2348 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2349 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2350 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
2351 abt->port_id[0] = fcport->d_id.b.al_pa;
2352 abt->port_id[1] = fcport->d_id.b.area;
2353 abt->port_id[2] = fcport->d_id.b.domain;
2354 abt->vp_index = fcport->vp_idx;
2356 abt->req_que_no = cpu_to_le16(req->id);
2358 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2359 if (rval != QLA_SUCCESS) {
2360 ql_dbg(ql_dbg_mbx, vha, 0x108e,
2361 "Failed to issue IOCB (%x).\n", rval);
2362 } else if (abt->entry_status != 0) {
2363 ql_dbg(ql_dbg_mbx, vha, 0x108f,
2364 "Failed to complete IOCB -- error status (%x).\n",
2365 abt->entry_status);
2366 rval = QLA_FUNCTION_FAILED;
2367 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2368 ql_dbg(ql_dbg_mbx, vha, 0x1090,
2369 "Failed to complete IOCB -- completion status (%x).\n",
2370 le16_to_cpu(abt->nport_handle));
2371 rval = QLA_FUNCTION_FAILED;
2372 } else {
2373 ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
2376 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
2378 return rval;
2381 struct tsk_mgmt_cmd {
2382 union {
2383 struct tsk_mgmt_entry tsk;
2384 struct sts_entry_24xx sts;
2385 } p;
2388 static int
2389 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2390 unsigned int l, int tag)
2392 int rval, rval2;
2393 struct tsk_mgmt_cmd *tsk;
2394 struct sts_entry_24xx *sts;
2395 dma_addr_t tsk_dma;
2396 scsi_qla_host_t *vha;
2397 struct qla_hw_data *ha;
2398 struct req_que *req;
2399 struct rsp_que *rsp;
2401 vha = fcport->vha;
2402 ha = vha->hw;
2403 req = vha->req;
2405 ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
2407 if (ha->flags.cpu_affinity_enabled)
2408 rsp = ha->rsp_q_map[tag + 1];
2409 else
2410 rsp = req->rsp;
2411 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2412 if (tsk == NULL) {
2413 ql_log(ql_log_warn, vha, 0x1093,
2414 "Failed to allocate task management IOCB.\n");
2415 return QLA_MEMORY_ALLOC_FAILED;
2417 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
2419 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2420 tsk->p.tsk.entry_count = 1;
2421 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
2422 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2423 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2424 tsk->p.tsk.control_flags = cpu_to_le32(type);
2425 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
2426 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
2427 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
2428 tsk->p.tsk.vp_index = fcport->vp_idx;
2429 if (type == TCF_LUN_RESET) {
2430 int_to_scsilun(l, &tsk->p.tsk.lun);
2431 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
2432 sizeof(tsk->p.tsk.lun));
2435 sts = &tsk->p.sts;
2436 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2437 if (rval != QLA_SUCCESS) {
2438 ql_dbg(ql_dbg_mbx, vha, 0x1094,
2439 "Failed to issue %s reset IOCB (%x).\n", name, rval);
2440 } else if (sts->entry_status != 0) {
2441 ql_dbg(ql_dbg_mbx, vha, 0x1095,
2442 "Failed to complete IOCB -- error status (%x).\n",
2443 sts->entry_status);
2444 rval = QLA_FUNCTION_FAILED;
2445 } else if (sts->comp_status !=
2446 __constant_cpu_to_le16(CS_COMPLETE)) {
2447 ql_dbg(ql_dbg_mbx, vha, 0x1096,
2448 "Failed to complete IOCB -- completion status (%x).\n",
2449 le16_to_cpu(sts->comp_status));
2450 rval = QLA_FUNCTION_FAILED;
2451 } else if (le16_to_cpu(sts->scsi_status) &
2452 SS_RESPONSE_INFO_LEN_VALID) {
2453 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2454 ql_dbg(ql_dbg_mbx, vha, 0x1097,
2455 "Ignoring inconsistent data length -- not enough "
2456 "response info (%d).\n",
2457 le32_to_cpu(sts->rsp_data_len));
2458 } else if (sts->data[3]) {
2459 ql_dbg(ql_dbg_mbx, vha, 0x1098,
2460 "Failed to complete IOCB -- response (%x).\n",
2461 sts->data[3]);
2462 rval = QLA_FUNCTION_FAILED;
2466 /* Issue marker IOCB. */
2467 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
2468 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
2469 if (rval2 != QLA_SUCCESS) {
2470 ql_dbg(ql_dbg_mbx, vha, 0x1099,
2471 "Failed to issue marker IOCB (%x).\n", rval2);
2472 } else {
2473 ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
2476 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
2478 return rval;
2482 qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
2484 struct qla_hw_data *ha = fcport->vha->hw;
2486 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2487 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
2489 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
2493 qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
2495 struct qla_hw_data *ha = fcport->vha->hw;
2497 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2498 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
2500 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
2504 qla2x00_system_error(scsi_qla_host_t *vha)
2506 int rval;
2507 mbx_cmd_t mc;
2508 mbx_cmd_t *mcp = &mc;
2509 struct qla_hw_data *ha = vha->hw;
2511 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2512 return QLA_FUNCTION_FAILED;
2514 ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
2516 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2517 mcp->out_mb = MBX_0;
2518 mcp->in_mb = MBX_0;
2519 mcp->tov = 5;
2520 mcp->flags = 0;
2521 rval = qla2x00_mailbox_command(vha, mcp);
2523 if (rval != QLA_SUCCESS) {
2524 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
2525 } else {
2526 ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
2529 return rval;
2533 * qla2x00_set_serdes_params() -
2534 * @ha: HA context
2536 * Returns
2539 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2540 uint16_t sw_em_2g, uint16_t sw_em_4g)
2542 int rval;
2543 mbx_cmd_t mc;
2544 mbx_cmd_t *mcp = &mc;
2546 ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
2548 mcp->mb[0] = MBC_SERDES_PARAMS;
2549 mcp->mb[1] = BIT_0;
2550 mcp->mb[2] = sw_em_1g | BIT_15;
2551 mcp->mb[3] = sw_em_2g | BIT_15;
2552 mcp->mb[4] = sw_em_4g | BIT_15;
2553 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2554 mcp->in_mb = MBX_0;
2555 mcp->tov = MBX_TOV_SECONDS;
2556 mcp->flags = 0;
2557 rval = qla2x00_mailbox_command(vha, mcp);
2559 if (rval != QLA_SUCCESS) {
2560 /*EMPTY*/
2561 ql_dbg(ql_dbg_mbx, vha, 0x109f,
2562 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2563 } else {
2564 /*EMPTY*/
2565 ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
2568 return rval;
2572 qla2x00_stop_firmware(scsi_qla_host_t *vha)
2574 int rval;
2575 mbx_cmd_t mc;
2576 mbx_cmd_t *mcp = &mc;
2578 if (!IS_FWI2_CAPABLE(vha->hw))
2579 return QLA_FUNCTION_FAILED;
2581 ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
2583 mcp->mb[0] = MBC_STOP_FIRMWARE;
2584 mcp->out_mb = MBX_0;
2585 mcp->in_mb = MBX_0;
2586 mcp->tov = 5;
2587 mcp->flags = 0;
2588 rval = qla2x00_mailbox_command(vha, mcp);
2590 if (rval != QLA_SUCCESS) {
2591 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
2592 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2593 rval = QLA_INVALID_COMMAND;
2594 } else {
2595 ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
2598 return rval;
2602 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2603 uint16_t buffers)
2605 int rval;
2606 mbx_cmd_t mc;
2607 mbx_cmd_t *mcp = &mc;
2609 ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
2611 if (!IS_FWI2_CAPABLE(vha->hw))
2612 return QLA_FUNCTION_FAILED;
2614 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2615 return QLA_FUNCTION_FAILED;
2617 mcp->mb[0] = MBC_TRACE_CONTROL;
2618 mcp->mb[1] = TC_EFT_ENABLE;
2619 mcp->mb[2] = LSW(eft_dma);
2620 mcp->mb[3] = MSW(eft_dma);
2621 mcp->mb[4] = LSW(MSD(eft_dma));
2622 mcp->mb[5] = MSW(MSD(eft_dma));
2623 mcp->mb[6] = buffers;
2624 mcp->mb[7] = TC_AEN_DISABLE;
2625 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2626 mcp->in_mb = MBX_1|MBX_0;
2627 mcp->tov = MBX_TOV_SECONDS;
2628 mcp->flags = 0;
2629 rval = qla2x00_mailbox_command(vha, mcp);
2630 if (rval != QLA_SUCCESS) {
2631 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
2632 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2633 rval, mcp->mb[0], mcp->mb[1]);
2634 } else {
2635 ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
2638 return rval;
2642 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2644 int rval;
2645 mbx_cmd_t mc;
2646 mbx_cmd_t *mcp = &mc;
2648 ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
2650 if (!IS_FWI2_CAPABLE(vha->hw))
2651 return QLA_FUNCTION_FAILED;
2653 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2654 return QLA_FUNCTION_FAILED;
2656 mcp->mb[0] = MBC_TRACE_CONTROL;
2657 mcp->mb[1] = TC_EFT_DISABLE;
2658 mcp->out_mb = MBX_1|MBX_0;
2659 mcp->in_mb = MBX_1|MBX_0;
2660 mcp->tov = MBX_TOV_SECONDS;
2661 mcp->flags = 0;
2662 rval = qla2x00_mailbox_command(vha, mcp);
2663 if (rval != QLA_SUCCESS) {
2664 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
2665 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2666 rval, mcp->mb[0], mcp->mb[1]);
2667 } else {
2668 ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
2671 return rval;
2675 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2676 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
2678 int rval;
2679 mbx_cmd_t mc;
2680 mbx_cmd_t *mcp = &mc;
2682 ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
2684 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
2685 return QLA_FUNCTION_FAILED;
2687 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2688 return QLA_FUNCTION_FAILED;
2690 mcp->mb[0] = MBC_TRACE_CONTROL;
2691 mcp->mb[1] = TC_FCE_ENABLE;
2692 mcp->mb[2] = LSW(fce_dma);
2693 mcp->mb[3] = MSW(fce_dma);
2694 mcp->mb[4] = LSW(MSD(fce_dma));
2695 mcp->mb[5] = MSW(MSD(fce_dma));
2696 mcp->mb[6] = buffers;
2697 mcp->mb[7] = TC_AEN_DISABLE;
2698 mcp->mb[8] = 0;
2699 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
2700 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
2701 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
2702 MBX_1|MBX_0;
2703 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2704 mcp->tov = MBX_TOV_SECONDS;
2705 mcp->flags = 0;
2706 rval = qla2x00_mailbox_command(vha, mcp);
2707 if (rval != QLA_SUCCESS) {
2708 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
2709 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2710 rval, mcp->mb[0], mcp->mb[1]);
2711 } else {
2712 ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
2714 if (mb)
2715 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
2716 if (dwords)
2717 *dwords = buffers;
2720 return rval;
2724 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2726 int rval;
2727 mbx_cmd_t mc;
2728 mbx_cmd_t *mcp = &mc;
2730 ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
2732 if (!IS_FWI2_CAPABLE(vha->hw))
2733 return QLA_FUNCTION_FAILED;
2735 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2736 return QLA_FUNCTION_FAILED;
2738 mcp->mb[0] = MBC_TRACE_CONTROL;
2739 mcp->mb[1] = TC_FCE_DISABLE;
2740 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
2741 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2742 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
2743 MBX_1|MBX_0;
2744 mcp->tov = MBX_TOV_SECONDS;
2745 mcp->flags = 0;
2746 rval = qla2x00_mailbox_command(vha, mcp);
2747 if (rval != QLA_SUCCESS) {
2748 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
2749 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2750 rval, mcp->mb[0], mcp->mb[1]);
2751 } else {
2752 ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
2754 if (wr)
2755 *wr = (uint64_t) mcp->mb[5] << 48 |
2756 (uint64_t) mcp->mb[4] << 32 |
2757 (uint64_t) mcp->mb[3] << 16 |
2758 (uint64_t) mcp->mb[2];
2759 if (rd)
2760 *rd = (uint64_t) mcp->mb[9] << 48 |
2761 (uint64_t) mcp->mb[8] << 32 |
2762 (uint64_t) mcp->mb[7] << 16 |
2763 (uint64_t) mcp->mb[6];
2766 return rval;
2770 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2771 uint16_t *port_speed, uint16_t *mb)
2773 int rval;
2774 mbx_cmd_t mc;
2775 mbx_cmd_t *mcp = &mc;
2777 ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
2779 if (!IS_IIDMA_CAPABLE(vha->hw))
2780 return QLA_FUNCTION_FAILED;
2782 mcp->mb[0] = MBC_PORT_PARAMS;
2783 mcp->mb[1] = loop_id;
2784 mcp->mb[2] = mcp->mb[3] = 0;
2785 mcp->mb[9] = vha->vp_idx;
2786 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
2787 mcp->in_mb = MBX_3|MBX_1|MBX_0;
2788 mcp->tov = MBX_TOV_SECONDS;
2789 mcp->flags = 0;
2790 rval = qla2x00_mailbox_command(vha, mcp);
2792 /* Return mailbox statuses. */
2793 if (mb != NULL) {
2794 mb[0] = mcp->mb[0];
2795 mb[1] = mcp->mb[1];
2796 mb[3] = mcp->mb[3];
2799 if (rval != QLA_SUCCESS) {
2800 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
2801 } else {
2802 ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
2803 if (port_speed)
2804 *port_speed = mcp->mb[3];
2807 return rval;
2811 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2812 uint16_t port_speed, uint16_t *mb)
2814 int rval;
2815 mbx_cmd_t mc;
2816 mbx_cmd_t *mcp = &mc;
2818 ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
2820 if (!IS_IIDMA_CAPABLE(vha->hw))
2821 return QLA_FUNCTION_FAILED;
2823 mcp->mb[0] = MBC_PORT_PARAMS;
2824 mcp->mb[1] = loop_id;
2825 mcp->mb[2] = BIT_0;
2826 if (IS_QLA8XXX_TYPE(vha->hw))
2827 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
2828 else
2829 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
2830 mcp->mb[9] = vha->vp_idx;
2831 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
2832 mcp->in_mb = MBX_3|MBX_1|MBX_0;
2833 mcp->tov = MBX_TOV_SECONDS;
2834 mcp->flags = 0;
2835 rval = qla2x00_mailbox_command(vha, mcp);
2837 /* Return mailbox statuses. */
2838 if (mb != NULL) {
2839 mb[0] = mcp->mb[0];
2840 mb[1] = mcp->mb[1];
2841 mb[3] = mcp->mb[3];
2844 if (rval != QLA_SUCCESS) {
2845 ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
2846 } else {
2847 ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
2850 return rval;
2853 void
2854 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2855 struct vp_rpt_id_entry_24xx *rptid_entry)
2857 uint8_t vp_idx;
2858 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
2859 struct qla_hw_data *ha = vha->hw;
2860 scsi_qla_host_t *vp;
2861 unsigned long flags;
2863 ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
2865 if (rptid_entry->entry_status != 0)
2866 return;
2868 if (rptid_entry->format == 0) {
2869 ql_dbg(ql_dbg_mbx, vha, 0x10b7,
2870 "Format 0 : Number of VPs setup %d, number of "
2871 "VPs acquired %d.\n",
2872 MSB(le16_to_cpu(rptid_entry->vp_count)),
2873 LSB(le16_to_cpu(rptid_entry->vp_count)));
2874 ql_dbg(ql_dbg_mbx, vha, 0x10b8,
2875 "Primary port id %02x%02x%02x.\n",
2876 rptid_entry->port_id[2], rptid_entry->port_id[1],
2877 rptid_entry->port_id[0]);
2878 } else if (rptid_entry->format == 1) {
2879 vp_idx = LSB(stat);
2880 ql_dbg(ql_dbg_mbx, vha, 0x10b9,
2881 "Format 1: VP[%d] enabled - status %d - with "
2882 "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
2883 rptid_entry->port_id[2], rptid_entry->port_id[1],
2884 rptid_entry->port_id[0]);
2886 vp = vha;
2887 if (vp_idx == 0 && (MSB(stat) != 1))
2888 goto reg_needed;
2890 if (MSB(stat) != 0) {
2891 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
2892 "Could not acquire ID for VP[%d].\n", vp_idx);
2893 return;
2896 spin_lock_irqsave(&ha->vport_slock, flags);
2897 list_for_each_entry(vp, &ha->vp_list, list)
2898 if (vp_idx == vp->vp_idx)
2899 break;
2900 spin_unlock_irqrestore(&ha->vport_slock, flags);
2902 if (!vp)
2903 return;
2905 vp->d_id.b.domain = rptid_entry->port_id[2];
2906 vp->d_id.b.area = rptid_entry->port_id[1];
2907 vp->d_id.b.al_pa = rptid_entry->port_id[0];
2910 * Cannot configure here as we are still sitting on the
2911 * response queue. Handle it in dpc context.
2913 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
2915 reg_needed:
2916 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
2917 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
2918 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
2919 qla2xxx_wake_dpc(vha);
2924 * qla24xx_modify_vp_config
2925 * Change VP configuration for vha
2927 * Input:
2928 * vha = adapter block pointer.
2930 * Returns:
2931 * qla2xxx local function return status code.
2933 * Context:
2934 * Kernel context.
2937 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2939 int rval;
2940 struct vp_config_entry_24xx *vpmod;
2941 dma_addr_t vpmod_dma;
2942 struct qla_hw_data *ha = vha->hw;
2943 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2945 /* This can be called by the parent */
2947 ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
2949 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
2950 if (!vpmod) {
2951 ql_log(ql_log_warn, vha, 0x10bc,
2952 "Failed to allocate modify VP IOCB.\n");
2953 return QLA_MEMORY_ALLOC_FAILED;
2956 memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
2957 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
2958 vpmod->entry_count = 1;
2959 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
2960 vpmod->vp_count = 1;
2961 vpmod->vp_index1 = vha->vp_idx;
2962 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
2963 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
2964 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
2965 vpmod->entry_count = 1;
2967 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
2968 if (rval != QLA_SUCCESS) {
2969 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
2970 "Failed to issue VP config IOCB (%x).\n", rval);
2971 } else if (vpmod->comp_status != 0) {
2972 ql_dbg(ql_dbg_mbx, vha, 0x10be,
2973 "Failed to complete IOCB -- error status (%x).\n",
2974 vpmod->comp_status);
2975 rval = QLA_FUNCTION_FAILED;
2976 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2977 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
2978 "Failed to complete IOCB -- completion status (%x).\n",
2979 le16_to_cpu(vpmod->comp_status));
2980 rval = QLA_FUNCTION_FAILED;
2981 } else {
2982 /* EMPTY */
2983 ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
2984 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
2986 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
2988 return rval;
2992 * qla24xx_control_vp
2993 * Enable a virtual port for given host
2995 * Input:
2996 * ha = adapter block pointer.
2997 * vhba = virtual adapter (unused)
2998 * index = index number for enabled VP
3000 * Returns:
3001 * qla2xxx local function return status code.
3003 * Context:
3004 * Kernel context.
3007 qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3009 int rval;
3010 int map, pos;
3011 struct vp_ctrl_entry_24xx *vce;
3012 dma_addr_t vce_dma;
3013 struct qla_hw_data *ha = vha->hw;
3014 int vp_index = vha->vp_idx;
3015 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3017 ql_dbg(ql_dbg_mbx, vha, 0x10c1,
3018 "Entered %s enabling index %d.\n", __func__, vp_index);
3020 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3021 return QLA_PARAMETER_ERROR;
3023 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3024 if (!vce) {
3025 ql_log(ql_log_warn, vha, 0x10c2,
3026 "Failed to allocate VP control IOCB.\n");
3027 return QLA_MEMORY_ALLOC_FAILED;
3029 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
3031 vce->entry_type = VP_CTRL_IOCB_TYPE;
3032 vce->entry_count = 1;
3033 vce->command = cpu_to_le16(cmd);
3034 vce->vp_count = __constant_cpu_to_le16(1);
3036 /* index map in firmware starts with 1; decrement index
3037 * this is ok as we never use index 0
3039 map = (vp_index - 1) / 8;
3040 pos = (vp_index - 1) & 7;
3041 mutex_lock(&ha->vport_lock);
3042 vce->vp_idx_map[map] |= 1 << pos;
3043 mutex_unlock(&ha->vport_lock);
3045 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
3046 if (rval != QLA_SUCCESS) {
3047 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
3048 "Failed to issue VP control IOCB (%x).\n", rval);
3049 } else if (vce->entry_status != 0) {
3050 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
3051 "Failed to complete IOCB -- error status (%x).\n",
3052 vce->entry_status);
3053 rval = QLA_FUNCTION_FAILED;
3054 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
3055 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
3056 "Failed to complet IOCB -- completion status (%x).\n",
3057 le16_to_cpu(vce->comp_status));
3058 rval = QLA_FUNCTION_FAILED;
3059 } else {
3060 ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
3063 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
3065 return rval;
3069 * qla2x00_send_change_request
3070 * Receive or disable RSCN request from fabric controller
3072 * Input:
3073 * ha = adapter block pointer
3074 * format = registration format:
3075 * 0 - Reserved
3076 * 1 - Fabric detected registration
3077 * 2 - N_port detected registration
3078 * 3 - Full registration
3079 * FF - clear registration
3080 * vp_idx = Virtual port index
3082 * Returns:
3083 * qla2x00 local function return status code.
3085 * Context:
3086 * Kernel Context
3090 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3091 uint16_t vp_idx)
3093 int rval;
3094 mbx_cmd_t mc;
3095 mbx_cmd_t *mcp = &mc;
3097 ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
3100 * This command is implicitly executed by firmware during login for the
3101 * physical hosts
3103 if (vp_idx == 0)
3104 return QLA_FUNCTION_FAILED;
3106 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
3107 mcp->mb[1] = format;
3108 mcp->mb[9] = vp_idx;
3109 mcp->out_mb = MBX_9|MBX_1|MBX_0;
3110 mcp->in_mb = MBX_0|MBX_1;
3111 mcp->tov = MBX_TOV_SECONDS;
3112 mcp->flags = 0;
3113 rval = qla2x00_mailbox_command(vha, mcp);
3115 if (rval == QLA_SUCCESS) {
3116 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3117 rval = BIT_1;
3119 } else
3120 rval = BIT_1;
3122 return rval;
3126 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3127 uint32_t size)
3129 int rval;
3130 mbx_cmd_t mc;
3131 mbx_cmd_t *mcp = &mc;
3133 ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
3135 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3136 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
3137 mcp->mb[8] = MSW(addr);
3138 mcp->out_mb = MBX_8|MBX_0;
3139 } else {
3140 mcp->mb[0] = MBC_DUMP_RISC_RAM;
3141 mcp->out_mb = MBX_0;
3143 mcp->mb[1] = LSW(addr);
3144 mcp->mb[2] = MSW(req_dma);
3145 mcp->mb[3] = LSW(req_dma);
3146 mcp->mb[6] = MSW(MSD(req_dma));
3147 mcp->mb[7] = LSW(MSD(req_dma));
3148 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
3149 if (IS_FWI2_CAPABLE(vha->hw)) {
3150 mcp->mb[4] = MSW(size);
3151 mcp->mb[5] = LSW(size);
3152 mcp->out_mb |= MBX_5|MBX_4;
3153 } else {
3154 mcp->mb[4] = LSW(size);
3155 mcp->out_mb |= MBX_4;
3158 mcp->in_mb = MBX_0;
3159 mcp->tov = MBX_TOV_SECONDS;
3160 mcp->flags = 0;
3161 rval = qla2x00_mailbox_command(vha, mcp);
3163 if (rval != QLA_SUCCESS) {
3164 ql_dbg(ql_dbg_mbx, vha, 0x1008,
3165 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3166 } else {
3167 ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
3170 return rval;
3173 /* 84XX Support **************************************************************/
3175 struct cs84xx_mgmt_cmd {
3176 union {
3177 struct verify_chip_entry_84xx req;
3178 struct verify_chip_rsp_84xx rsp;
3179 } p;
3183 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3185 int rval, retry;
3186 struct cs84xx_mgmt_cmd *mn;
3187 dma_addr_t mn_dma;
3188 uint16_t options;
3189 unsigned long flags;
3190 struct qla_hw_data *ha = vha->hw;
3192 ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
3194 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
3195 if (mn == NULL) {
3196 return QLA_MEMORY_ALLOC_FAILED;
3199 /* Force Update? */
3200 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
3201 /* Diagnostic firmware? */
3202 /* options |= MENLO_DIAG_FW; */
3203 /* We update the firmware with only one data sequence. */
3204 options |= VCO_END_OF_DATA;
3206 do {
3207 retry = 0;
3208 memset(mn, 0, sizeof(*mn));
3209 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
3210 mn->p.req.entry_count = 1;
3211 mn->p.req.options = cpu_to_le16(options);
3213 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
3214 "Dump of Verify Request.\n");
3215 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
3216 (uint8_t *)mn, sizeof(*mn));
3218 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
3219 if (rval != QLA_SUCCESS) {
3220 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
3221 "Failed to issue verify IOCB (%x).\n", rval);
3222 goto verify_done;
3225 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
3226 "Dump of Verify Response.\n");
3227 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
3228 (uint8_t *)mn, sizeof(*mn));
3230 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
3231 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3232 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3233 ql_dbg(ql_dbg_mbx, vha, 0x10ce,
3234 "cs=%x fc=%x.\n", status[0], status[1]);
3236 if (status[0] != CS_COMPLETE) {
3237 rval = QLA_FUNCTION_FAILED;
3238 if (!(options & VCO_DONT_UPDATE_FW)) {
3239 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
3240 "Firmware update failed. Retrying "
3241 "without update firmware.\n");
3242 options |= VCO_DONT_UPDATE_FW;
3243 options &= ~VCO_FORCE_UPDATE;
3244 retry = 1;
3246 } else {
3247 ql_dbg(ql_dbg_mbx, vha, 0x10d0,
3248 "Firmware updated to %x.\n",
3249 le32_to_cpu(mn->p.rsp.fw_ver));
3251 /* NOTE: we only update OP firmware. */
3252 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
3253 ha->cs84xx->op_fw_version =
3254 le32_to_cpu(mn->p.rsp.fw_ver);
3255 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
3256 flags);
3258 } while (retry);
3260 verify_done:
3261 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
3263 if (rval != QLA_SUCCESS) {
3264 ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
3265 } else {
3266 ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
3269 return rval;
3273 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3275 int rval;
3276 unsigned long flags;
3277 mbx_cmd_t mc;
3278 mbx_cmd_t *mcp = &mc;
3279 struct device_reg_25xxmq __iomem *reg;
3280 struct qla_hw_data *ha = vha->hw;
3282 ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
3284 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3285 mcp->mb[1] = req->options;
3286 mcp->mb[2] = MSW(LSD(req->dma));
3287 mcp->mb[3] = LSW(LSD(req->dma));
3288 mcp->mb[6] = MSW(MSD(req->dma));
3289 mcp->mb[7] = LSW(MSD(req->dma));
3290 mcp->mb[5] = req->length;
3291 if (req->rsp)
3292 mcp->mb[10] = req->rsp->id;
3293 mcp->mb[12] = req->qos;
3294 mcp->mb[11] = req->vp_idx;
3295 mcp->mb[13] = req->rid;
3297 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3298 QLA_QUE_PAGE * req->id);
3300 mcp->mb[4] = req->id;
3301 /* que in ptr index */
3302 mcp->mb[8] = 0;
3303 /* que out ptr index */
3304 mcp->mb[9] = 0;
3305 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
3306 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3307 mcp->in_mb = MBX_0;
3308 mcp->flags = MBX_DMA_OUT;
3309 mcp->tov = 60;
3311 spin_lock_irqsave(&ha->hardware_lock, flags);
3312 if (!(req->options & BIT_0)) {
3313 WRT_REG_DWORD(&reg->req_q_in, 0);
3314 WRT_REG_DWORD(&reg->req_q_out, 0);
3316 req->req_q_in = &reg->req_q_in;
3317 req->req_q_out = &reg->req_q_out;
3318 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3320 rval = qla2x00_mailbox_command(vha, mcp);
3321 if (rval != QLA_SUCCESS) {
3322 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
3323 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3324 } else {
3325 ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
3328 return rval;
3332 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3334 int rval;
3335 unsigned long flags;
3336 mbx_cmd_t mc;
3337 mbx_cmd_t *mcp = &mc;
3338 struct device_reg_25xxmq __iomem *reg;
3339 struct qla_hw_data *ha = vha->hw;
3341 ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
3343 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3344 mcp->mb[1] = rsp->options;
3345 mcp->mb[2] = MSW(LSD(rsp->dma));
3346 mcp->mb[3] = LSW(LSD(rsp->dma));
3347 mcp->mb[6] = MSW(MSD(rsp->dma));
3348 mcp->mb[7] = LSW(MSD(rsp->dma));
3349 mcp->mb[5] = rsp->length;
3350 mcp->mb[14] = rsp->msix->entry;
3351 mcp->mb[13] = rsp->rid;
3353 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3354 QLA_QUE_PAGE * rsp->id);
3356 mcp->mb[4] = rsp->id;
3357 /* que in ptr index */
3358 mcp->mb[8] = 0;
3359 /* que out ptr index */
3360 mcp->mb[9] = 0;
3361 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
3362 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3363 mcp->in_mb = MBX_0;
3364 mcp->flags = MBX_DMA_OUT;
3365 mcp->tov = 60;
3367 spin_lock_irqsave(&ha->hardware_lock, flags);
3368 if (!(rsp->options & BIT_0)) {
3369 WRT_REG_DWORD(&reg->rsp_q_out, 0);
3370 WRT_REG_DWORD(&reg->rsp_q_in, 0);
3373 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3375 rval = qla2x00_mailbox_command(vha, mcp);
3376 if (rval != QLA_SUCCESS) {
3377 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
3378 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3379 } else {
3380 ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
3383 return rval;
3387 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3389 int rval;
3390 mbx_cmd_t mc;
3391 mbx_cmd_t *mcp = &mc;
3393 ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
3395 mcp->mb[0] = MBC_IDC_ACK;
3396 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
3397 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3398 mcp->in_mb = MBX_0;
3399 mcp->tov = MBX_TOV_SECONDS;
3400 mcp->flags = 0;
3401 rval = qla2x00_mailbox_command(vha, mcp);
3403 if (rval != QLA_SUCCESS) {
3404 ql_dbg(ql_dbg_mbx, vha, 0x10da,
3405 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3406 } else {
3407 ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
3410 return rval;
3414 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3416 int rval;
3417 mbx_cmd_t mc;
3418 mbx_cmd_t *mcp = &mc;
3420 ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
3422 if (!IS_QLA81XX(vha->hw))
3423 return QLA_FUNCTION_FAILED;
3425 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3426 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
3427 mcp->out_mb = MBX_1|MBX_0;
3428 mcp->in_mb = MBX_1|MBX_0;
3429 mcp->tov = MBX_TOV_SECONDS;
3430 mcp->flags = 0;
3431 rval = qla2x00_mailbox_command(vha, mcp);
3433 if (rval != QLA_SUCCESS) {
3434 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
3435 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3436 rval, mcp->mb[0], mcp->mb[1]);
3437 } else {
3438 ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
3439 *sector_size = mcp->mb[1];
3442 return rval;
3446 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3448 int rval;
3449 mbx_cmd_t mc;
3450 mbx_cmd_t *mcp = &mc;
3452 if (!IS_QLA81XX(vha->hw))
3453 return QLA_FUNCTION_FAILED;
3455 ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
3457 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3458 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
3459 FAC_OPT_CMD_WRITE_PROTECT;
3460 mcp->out_mb = MBX_1|MBX_0;
3461 mcp->in_mb = MBX_1|MBX_0;
3462 mcp->tov = MBX_TOV_SECONDS;
3463 mcp->flags = 0;
3464 rval = qla2x00_mailbox_command(vha, mcp);
3466 if (rval != QLA_SUCCESS) {
3467 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
3468 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3469 rval, mcp->mb[0], mcp->mb[1]);
3470 } else {
3471 ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
3474 return rval;
3478 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3480 int rval;
3481 mbx_cmd_t mc;
3482 mbx_cmd_t *mcp = &mc;
3484 if (!IS_QLA81XX(vha->hw))
3485 return QLA_FUNCTION_FAILED;
3487 ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
3489 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3490 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
3491 mcp->mb[2] = LSW(start);
3492 mcp->mb[3] = MSW(start);
3493 mcp->mb[4] = LSW(finish);
3494 mcp->mb[5] = MSW(finish);
3495 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3496 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3497 mcp->tov = MBX_TOV_SECONDS;
3498 mcp->flags = 0;
3499 rval = qla2x00_mailbox_command(vha, mcp);
3501 if (rval != QLA_SUCCESS) {
3502 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
3503 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3504 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3505 } else {
3506 ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
3509 return rval;
3513 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3515 int rval = 0;
3516 mbx_cmd_t mc;
3517 mbx_cmd_t *mcp = &mc;
3519 ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
3521 mcp->mb[0] = MBC_RESTART_MPI_FW;
3522 mcp->out_mb = MBX_0;
3523 mcp->in_mb = MBX_0|MBX_1;
3524 mcp->tov = MBX_TOV_SECONDS;
3525 mcp->flags = 0;
3526 rval = qla2x00_mailbox_command(vha, mcp);
3528 if (rval != QLA_SUCCESS) {
3529 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
3530 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3531 rval, mcp->mb[0], mcp->mb[1]);
3532 } else {
3533 ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
3536 return rval;
3540 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3541 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
3543 int rval;
3544 mbx_cmd_t mc;
3545 mbx_cmd_t *mcp = &mc;
3546 struct qla_hw_data *ha = vha->hw;
3548 ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
3550 if (!IS_FWI2_CAPABLE(ha))
3551 return QLA_FUNCTION_FAILED;
3553 if (len == 1)
3554 opt |= BIT_0;
3556 mcp->mb[0] = MBC_READ_SFP;
3557 mcp->mb[1] = dev;
3558 mcp->mb[2] = MSW(sfp_dma);
3559 mcp->mb[3] = LSW(sfp_dma);
3560 mcp->mb[6] = MSW(MSD(sfp_dma));
3561 mcp->mb[7] = LSW(MSD(sfp_dma));
3562 mcp->mb[8] = len;
3563 mcp->mb[9] = off;
3564 mcp->mb[10] = opt;
3565 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3566 mcp->in_mb = MBX_1|MBX_0;
3567 mcp->tov = MBX_TOV_SECONDS;
3568 mcp->flags = 0;
3569 rval = qla2x00_mailbox_command(vha, mcp);
3571 if (opt & BIT_0)
3572 *sfp = mcp->mb[1];
3574 if (rval != QLA_SUCCESS) {
3575 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
3576 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3577 } else {
3578 ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
3581 return rval;
3585 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3586 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
3588 int rval;
3589 mbx_cmd_t mc;
3590 mbx_cmd_t *mcp = &mc;
3591 struct qla_hw_data *ha = vha->hw;
3593 ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
3595 if (!IS_FWI2_CAPABLE(ha))
3596 return QLA_FUNCTION_FAILED;
3598 if (len == 1)
3599 opt |= BIT_0;
3601 if (opt & BIT_0)
3602 len = *sfp;
3604 mcp->mb[0] = MBC_WRITE_SFP;
3605 mcp->mb[1] = dev;
3606 mcp->mb[2] = MSW(sfp_dma);
3607 mcp->mb[3] = LSW(sfp_dma);
3608 mcp->mb[6] = MSW(MSD(sfp_dma));
3609 mcp->mb[7] = LSW(MSD(sfp_dma));
3610 mcp->mb[8] = len;
3611 mcp->mb[9] = off;
3612 mcp->mb[10] = opt;
3613 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3614 mcp->in_mb = MBX_1|MBX_0;
3615 mcp->tov = MBX_TOV_SECONDS;
3616 mcp->flags = 0;
3617 rval = qla2x00_mailbox_command(vha, mcp);
3619 if (rval != QLA_SUCCESS) {
3620 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
3621 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3622 } else {
3623 ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
3626 return rval;
3630 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3631 uint16_t size_in_bytes, uint16_t *actual_size)
3633 int rval;
3634 mbx_cmd_t mc;
3635 mbx_cmd_t *mcp = &mc;
3637 ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
3639 if (!IS_QLA8XXX_TYPE(vha->hw))
3640 return QLA_FUNCTION_FAILED;
3642 mcp->mb[0] = MBC_GET_XGMAC_STATS;
3643 mcp->mb[2] = MSW(stats_dma);
3644 mcp->mb[3] = LSW(stats_dma);
3645 mcp->mb[6] = MSW(MSD(stats_dma));
3646 mcp->mb[7] = LSW(MSD(stats_dma));
3647 mcp->mb[8] = size_in_bytes >> 2;
3648 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3649 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3650 mcp->tov = MBX_TOV_SECONDS;
3651 mcp->flags = 0;
3652 rval = qla2x00_mailbox_command(vha, mcp);
3654 if (rval != QLA_SUCCESS) {
3655 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
3656 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3657 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3658 } else {
3659 ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
3662 *actual_size = mcp->mb[2] << 2;
3665 return rval;
3669 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3670 uint16_t size)
3672 int rval;
3673 mbx_cmd_t mc;
3674 mbx_cmd_t *mcp = &mc;
3676 ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
3678 if (!IS_QLA8XXX_TYPE(vha->hw))
3679 return QLA_FUNCTION_FAILED;
3681 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
3682 mcp->mb[1] = 0;
3683 mcp->mb[2] = MSW(tlv_dma);
3684 mcp->mb[3] = LSW(tlv_dma);
3685 mcp->mb[6] = MSW(MSD(tlv_dma));
3686 mcp->mb[7] = LSW(MSD(tlv_dma));
3687 mcp->mb[8] = size;
3688 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3689 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3690 mcp->tov = MBX_TOV_SECONDS;
3691 mcp->flags = 0;
3692 rval = qla2x00_mailbox_command(vha, mcp);
3694 if (rval != QLA_SUCCESS) {
3695 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
3696 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3697 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3698 } else {
3699 ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
3702 return rval;
3706 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3708 int rval;
3709 mbx_cmd_t mc;
3710 mbx_cmd_t *mcp = &mc;
3712 ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
3714 if (!IS_FWI2_CAPABLE(vha->hw))
3715 return QLA_FUNCTION_FAILED;
3717 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
3718 mcp->mb[1] = LSW(risc_addr);
3719 mcp->mb[8] = MSW(risc_addr);
3720 mcp->out_mb = MBX_8|MBX_1|MBX_0;
3721 mcp->in_mb = MBX_3|MBX_2|MBX_0;
3722 mcp->tov = 30;
3723 mcp->flags = 0;
3724 rval = qla2x00_mailbox_command(vha, mcp);
3725 if (rval != QLA_SUCCESS) {
3726 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
3727 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3728 } else {
3729 ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
3730 *data = mcp->mb[3] << 16 | mcp->mb[2];
3733 return rval;
3737 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3738 uint16_t *mresp)
3740 int rval;
3741 mbx_cmd_t mc;
3742 mbx_cmd_t *mcp = &mc;
3743 uint32_t iter_cnt = 0x1;
3745 ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
3747 memset(mcp->mb, 0 , sizeof(mcp->mb));
3748 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
3749 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
3751 /* transfer count */
3752 mcp->mb[10] = LSW(mreq->transfer_size);
3753 mcp->mb[11] = MSW(mreq->transfer_size);
3755 /* send data address */
3756 mcp->mb[14] = LSW(mreq->send_dma);
3757 mcp->mb[15] = MSW(mreq->send_dma);
3758 mcp->mb[20] = LSW(MSD(mreq->send_dma));
3759 mcp->mb[21] = MSW(MSD(mreq->send_dma));
3761 /* receive data address */
3762 mcp->mb[16] = LSW(mreq->rcv_dma);
3763 mcp->mb[17] = MSW(mreq->rcv_dma);
3764 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
3765 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
3767 /* Iteration count */
3768 mcp->mb[18] = LSW(iter_cnt);
3769 mcp->mb[19] = MSW(iter_cnt);
3771 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
3772 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3773 if (IS_QLA8XXX_TYPE(vha->hw))
3774 mcp->out_mb |= MBX_2;
3775 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
3777 mcp->buf_size = mreq->transfer_size;
3778 mcp->tov = MBX_TOV_SECONDS;
3779 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3781 rval = qla2x00_mailbox_command(vha, mcp);
3783 if (rval != QLA_SUCCESS) {
3784 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
3785 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
3786 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3787 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
3788 } else {
3789 ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
3792 /* Copy mailbox information */
3793 memcpy( mresp, mcp->mb, 64);
3794 return rval;
3798 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3799 uint16_t *mresp)
3801 int rval;
3802 mbx_cmd_t mc;
3803 mbx_cmd_t *mcp = &mc;
3804 struct qla_hw_data *ha = vha->hw;
3806 ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
3808 memset(mcp->mb, 0 , sizeof(mcp->mb));
3809 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
3810 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
3811 if (IS_QLA8XXX_TYPE(ha)) {
3812 mcp->mb[1] |= BIT_15;
3813 mcp->mb[2] = vha->fcoe_fcf_idx;
3815 mcp->mb[16] = LSW(mreq->rcv_dma);
3816 mcp->mb[17] = MSW(mreq->rcv_dma);
3817 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
3818 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
3820 mcp->mb[10] = LSW(mreq->transfer_size);
3822 mcp->mb[14] = LSW(mreq->send_dma);
3823 mcp->mb[15] = MSW(mreq->send_dma);
3824 mcp->mb[20] = LSW(MSD(mreq->send_dma));
3825 mcp->mb[21] = MSW(MSD(mreq->send_dma));
3827 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
3828 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3829 if (IS_QLA8XXX_TYPE(ha))
3830 mcp->out_mb |= MBX_2;
3832 mcp->in_mb = MBX_0;
3833 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
3834 mcp->in_mb |= MBX_1;
3835 if (IS_QLA8XXX_TYPE(ha))
3836 mcp->in_mb |= MBX_3;
3838 mcp->tov = MBX_TOV_SECONDS;
3839 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3840 mcp->buf_size = mreq->transfer_size;
3842 rval = qla2x00_mailbox_command(vha, mcp);
3844 if (rval != QLA_SUCCESS) {
3845 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
3846 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3847 rval, mcp->mb[0], mcp->mb[1]);
3848 } else {
3849 ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
3852 /* Copy mailbox information */
3853 memcpy(mresp, mcp->mb, 64);
3854 return rval;
3858 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
3860 int rval;
3861 mbx_cmd_t mc;
3862 mbx_cmd_t *mcp = &mc;
3864 ql_dbg(ql_dbg_mbx, vha, 0x10fd,
3865 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
3867 mcp->mb[0] = MBC_ISP84XX_RESET;
3868 mcp->mb[1] = enable_diagnostic;
3869 mcp->out_mb = MBX_1|MBX_0;
3870 mcp->in_mb = MBX_1|MBX_0;
3871 mcp->tov = MBX_TOV_SECONDS;
3872 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3873 rval = qla2x00_mailbox_command(vha, mcp);
3875 if (rval != QLA_SUCCESS)
3876 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
3877 else
3878 ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
3880 return rval;
3884 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3886 int rval;
3887 mbx_cmd_t mc;
3888 mbx_cmd_t *mcp = &mc;
3890 ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
3892 if (!IS_FWI2_CAPABLE(vha->hw))
3893 return QLA_FUNCTION_FAILED;
3895 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
3896 mcp->mb[1] = LSW(risc_addr);
3897 mcp->mb[2] = LSW(data);
3898 mcp->mb[3] = MSW(data);
3899 mcp->mb[8] = MSW(risc_addr);
3900 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
3901 mcp->in_mb = MBX_0;
3902 mcp->tov = 30;
3903 mcp->flags = 0;
3904 rval = qla2x00_mailbox_command(vha, mcp);
3905 if (rval != QLA_SUCCESS) {
3906 ql_dbg(ql_dbg_mbx, vha, 0x1101,
3907 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3908 } else {
3909 ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
3912 return rval;
3916 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3918 int rval;
3919 uint32_t stat, timer;
3920 uint16_t mb0 = 0;
3921 struct qla_hw_data *ha = vha->hw;
3922 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3924 rval = QLA_SUCCESS;
3926 ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
3928 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
3930 /* Write the MBC data to the registers */
3931 WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
3932 WRT_REG_WORD(&reg->mailbox1, mb[0]);
3933 WRT_REG_WORD(&reg->mailbox2, mb[1]);
3934 WRT_REG_WORD(&reg->mailbox3, mb[2]);
3935 WRT_REG_WORD(&reg->mailbox4, mb[3]);
3937 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
3939 /* Poll for MBC interrupt */
3940 for (timer = 6000000; timer; timer--) {
3941 /* Check for pending interrupts. */
3942 stat = RD_REG_DWORD(&reg->host_status);
3943 if (stat & HSRX_RISC_INT) {
3944 stat &= 0xff;
3946 if (stat == 0x1 || stat == 0x2 ||
3947 stat == 0x10 || stat == 0x11) {
3948 set_bit(MBX_INTERRUPT,
3949 &ha->mbx_cmd_flags);
3950 mb0 = RD_REG_WORD(&reg->mailbox0);
3951 WRT_REG_DWORD(&reg->hccr,
3952 HCCRX_CLR_RISC_INT);
3953 RD_REG_DWORD(&reg->hccr);
3954 break;
3957 udelay(5);
3960 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
3961 rval = mb0 & MBS_MASK;
3962 else
3963 rval = QLA_FUNCTION_FAILED;
3965 if (rval != QLA_SUCCESS) {
3966 ql_dbg(ql_dbg_mbx, vha, 0x1104,
3967 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
3968 } else {
3969 ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
3972 return rval;
3975 qla2x00_get_data_rate(scsi_qla_host_t *vha)
3977 int rval;
3978 mbx_cmd_t mc;
3979 mbx_cmd_t *mcp = &mc;
3980 struct qla_hw_data *ha = vha->hw;
3982 ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
3984 if (!IS_FWI2_CAPABLE(ha))
3985 return QLA_FUNCTION_FAILED;
3987 mcp->mb[0] = MBC_DATA_RATE;
3988 mcp->mb[1] = 0;
3989 mcp->out_mb = MBX_1|MBX_0;
3990 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3991 mcp->tov = MBX_TOV_SECONDS;
3992 mcp->flags = 0;
3993 rval = qla2x00_mailbox_command(vha, mcp);
3994 if (rval != QLA_SUCCESS) {
3995 ql_dbg(ql_dbg_mbx, vha, 0x1107,
3996 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3997 } else {
3998 ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
3999 if (mcp->mb[1] != 0x7)
4000 ha->link_data_rate = mcp->mb[1];
4003 return rval;
4007 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4009 int rval;
4010 mbx_cmd_t mc;
4011 mbx_cmd_t *mcp = &mc;
4012 struct qla_hw_data *ha = vha->hw;
4014 ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
4016 if (!IS_QLA81XX(ha))
4017 return QLA_FUNCTION_FAILED;
4018 mcp->mb[0] = MBC_GET_PORT_CONFIG;
4019 mcp->out_mb = MBX_0;
4020 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4021 mcp->tov = MBX_TOV_SECONDS;
4022 mcp->flags = 0;
4024 rval = qla2x00_mailbox_command(vha, mcp);
4026 if (rval != QLA_SUCCESS) {
4027 ql_dbg(ql_dbg_mbx, vha, 0x110a,
4028 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4029 } else {
4030 /* Copy all bits to preserve original value */
4031 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4033 ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
4035 return rval;
4039 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4041 int rval;
4042 mbx_cmd_t mc;
4043 mbx_cmd_t *mcp = &mc;
4045 ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
4047 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4048 /* Copy all bits to preserve original setting */
4049 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
4050 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4051 mcp->in_mb = MBX_0;
4052 mcp->tov = MBX_TOV_SECONDS;
4053 mcp->flags = 0;
4054 rval = qla2x00_mailbox_command(vha, mcp);
4056 if (rval != QLA_SUCCESS) {
4057 ql_dbg(ql_dbg_mbx, vha, 0x110d,
4058 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4059 } else
4060 ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
4062 return rval;
4067 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4068 uint16_t *mb)
4070 int rval;
4071 mbx_cmd_t mc;
4072 mbx_cmd_t *mcp = &mc;
4073 struct qla_hw_data *ha = vha->hw;
4075 ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
4077 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
4078 return QLA_FUNCTION_FAILED;
4080 mcp->mb[0] = MBC_PORT_PARAMS;
4081 mcp->mb[1] = loop_id;
4082 if (ha->flags.fcp_prio_enabled)
4083 mcp->mb[2] = BIT_1;
4084 else
4085 mcp->mb[2] = BIT_2;
4086 mcp->mb[4] = priority & 0xf;
4087 mcp->mb[9] = vha->vp_idx;
4088 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4089 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
4090 mcp->tov = 30;
4091 mcp->flags = 0;
4092 rval = qla2x00_mailbox_command(vha, mcp);
4093 if (mb != NULL) {
4094 mb[0] = mcp->mb[0];
4095 mb[1] = mcp->mb[1];
4096 mb[3] = mcp->mb[3];
4097 mb[4] = mcp->mb[4];
4100 if (rval != QLA_SUCCESS) {
4101 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
4102 } else {
4103 ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
4106 return rval;
4110 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4112 int rval;
4113 uint8_t byte;
4114 struct qla_hw_data *ha = vha->hw;
4116 ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
4118 /* Integer part */
4119 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
4120 if (rval != QLA_SUCCESS) {
4121 ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
4122 ha->flags.thermal_supported = 0;
4123 goto fail;
4125 *temp = byte;
4127 /* Fraction part */
4128 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0);
4129 if (rval != QLA_SUCCESS) {
4130 ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
4131 ha->flags.thermal_supported = 0;
4132 goto fail;
4134 *frac = (byte >> 6) * 25;
4136 ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
4137 fail:
4138 return rval;
4142 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4144 int rval;
4145 struct qla_hw_data *ha = vha->hw;
4146 mbx_cmd_t mc;
4147 mbx_cmd_t *mcp = &mc;
4149 ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
4151 if (!IS_FWI2_CAPABLE(ha))
4152 return QLA_FUNCTION_FAILED;
4154 memset(mcp, 0, sizeof(mbx_cmd_t));
4155 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4156 mcp->mb[1] = 1;
4158 mcp->out_mb = MBX_1|MBX_0;
4159 mcp->in_mb = MBX_0;
4160 mcp->tov = 30;
4161 mcp->flags = 0;
4163 rval = qla2x00_mailbox_command(vha, mcp);
4164 if (rval != QLA_SUCCESS) {
4165 ql_dbg(ql_dbg_mbx, vha, 0x1016,
4166 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4167 } else {
4168 ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
4171 return rval;
4175 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4177 int rval;
4178 struct qla_hw_data *ha = vha->hw;
4179 mbx_cmd_t mc;
4180 mbx_cmd_t *mcp = &mc;
4182 ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
4184 if (!IS_QLA82XX(ha))
4185 return QLA_FUNCTION_FAILED;
4187 memset(mcp, 0, sizeof(mbx_cmd_t));
4188 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4189 mcp->mb[1] = 0;
4191 mcp->out_mb = MBX_1|MBX_0;
4192 mcp->in_mb = MBX_0;
4193 mcp->tov = 30;
4194 mcp->flags = 0;
4196 rval = qla2x00_mailbox_command(vha, mcp);
4197 if (rval != QLA_SUCCESS) {
4198 ql_dbg(ql_dbg_mbx, vha, 0x100c,
4199 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4200 } else {
4201 ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
4204 return rval;
4208 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4210 struct qla_hw_data *ha = vha->hw;
4211 mbx_cmd_t mc;
4212 mbx_cmd_t *mcp = &mc;
4213 int rval = QLA_FUNCTION_FAILED;
4215 ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__);
4217 memset(mcp->mb, 0 , sizeof(mcp->mb));
4218 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4219 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4220 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
4221 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
4223 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4224 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
4225 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4227 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4228 mcp->tov = MBX_TOV_SECONDS;
4229 rval = qla2x00_mailbox_command(vha, mcp);
4231 /* Always copy back return mailbox values. */
4232 if (rval != QLA_SUCCESS) {
4233 ql_dbg(ql_dbg_mbx, vha, 0x1120,
4234 "mailbox command FAILED=0x%x, subcode=%x.\n",
4235 (mcp->mb[1] << 16) | mcp->mb[0],
4236 (mcp->mb[3] << 16) | mcp->mb[2]);
4237 } else {
4238 ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__);
4239 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
4240 if (!ha->md_template_size) {
4241 ql_dbg(ql_dbg_mbx, vha, 0x1122,
4242 "Null template size obtained.\n");
4243 rval = QLA_FUNCTION_FAILED;
4246 return rval;
4250 qla82xx_md_get_template(scsi_qla_host_t *vha)
4252 struct qla_hw_data *ha = vha->hw;
4253 mbx_cmd_t mc;
4254 mbx_cmd_t *mcp = &mc;
4255 int rval = QLA_FUNCTION_FAILED;
4257 ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__);
4259 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
4260 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
4261 if (!ha->md_tmplt_hdr) {
4262 ql_log(ql_log_warn, vha, 0x1124,
4263 "Unable to allocate memory for Minidump template.\n");
4264 return rval;
4267 memset(mcp->mb, 0 , sizeof(mcp->mb));
4268 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4269 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4270 mcp->mb[2] = LSW(RQST_TMPLT);
4271 mcp->mb[3] = MSW(RQST_TMPLT);
4272 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
4273 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
4274 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
4275 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
4276 mcp->mb[8] = LSW(ha->md_template_size);
4277 mcp->mb[9] = MSW(ha->md_template_size);
4279 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4280 mcp->tov = MBX_TOV_SECONDS;
4281 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
4282 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4283 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4284 rval = qla2x00_mailbox_command(vha, mcp);
4286 if (rval != QLA_SUCCESS) {
4287 ql_dbg(ql_dbg_mbx, vha, 0x1125,
4288 "mailbox command FAILED=0x%x, subcode=%x.\n",
4289 ((mcp->mb[1] << 16) | mcp->mb[0]),
4290 ((mcp->mb[3] << 16) | mcp->mb[2]));
4291 } else
4292 ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__);
4293 return rval;
4297 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4299 int rval;
4300 struct qla_hw_data *ha = vha->hw;
4301 mbx_cmd_t mc;
4302 mbx_cmd_t *mcp = &mc;
4304 if (!IS_QLA82XX(ha))
4305 return QLA_FUNCTION_FAILED;
4307 ql_dbg(ql_dbg_mbx, vha, 0x1127,
4308 "Entered %s.\n", __func__);
4310 memset(mcp, 0, sizeof(mbx_cmd_t));
4311 mcp->mb[0] = MBC_SET_LED_CONFIG;
4312 if (enable)
4313 mcp->mb[7] = 0xE;
4314 else
4315 mcp->mb[7] = 0xD;
4317 mcp->out_mb = MBX_7|MBX_0;
4318 mcp->in_mb = MBX_0;
4319 mcp->tov = 30;
4320 mcp->flags = 0;
4322 rval = qla2x00_mailbox_command(vha, mcp);
4323 if (rval != QLA_SUCCESS) {
4324 ql_dbg(ql_dbg_mbx, vha, 0x1128,
4325 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4326 } else {
4327 ql_dbg(ql_dbg_mbx, vha, 0x1129,
4328 "Done %s.\n", __func__);
4331 return rval;