tulip: convert to use netdev_for_each_mc_addr
[linux-2.6/kvm.git] / drivers / scsi / qla2xxx / qla_dbg.c
blobcb2eca4c26d801b24faccd43f622647f7c655ae7
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <linux/delay.h>
11 static inline void
12 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
14 fw_dump->fw_major_version = htonl(ha->fw_major_version);
15 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
16 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
17 fw_dump->fw_attributes = htonl(ha->fw_attributes);
19 fw_dump->vendor = htonl(ha->pdev->vendor);
20 fw_dump->device = htonl(ha->pdev->device);
21 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
22 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
25 static inline void *
26 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
28 struct req_que *req = ha->req_q_map[0];
29 struct rsp_que *rsp = ha->rsp_q_map[0];
30 /* Request queue. */
31 memcpy(ptr, req->ring, req->length *
32 sizeof(request_t));
34 /* Response queue. */
35 ptr += req->length * sizeof(request_t);
36 memcpy(ptr, rsp->ring, rsp->length *
37 sizeof(response_t));
39 return ptr + (rsp->length * sizeof(response_t));
42 static int
43 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
44 uint32_t ram_dwords, void **nxt)
46 int rval;
47 uint32_t cnt, stat, timer, dwords, idx;
48 uint16_t mb0;
49 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
50 dma_addr_t dump_dma = ha->gid_list_dma;
51 uint32_t *dump = (uint32_t *)ha->gid_list;
53 rval = QLA_SUCCESS;
54 mb0 = 0;
56 WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
57 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
59 dwords = GID_LIST_SIZE / 4;
60 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
61 cnt += dwords, addr += dwords) {
62 if (cnt + dwords > ram_dwords)
63 dwords = ram_dwords - cnt;
65 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
66 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
68 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
69 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
70 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
71 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
73 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
74 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
75 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
77 for (timer = 6000000; timer; timer--) {
78 /* Check for pending interrupts. */
79 stat = RD_REG_DWORD(&reg->host_status);
80 if (stat & HSRX_RISC_INT) {
81 stat &= 0xff;
83 if (stat == 0x1 || stat == 0x2 ||
84 stat == 0x10 || stat == 0x11) {
85 set_bit(MBX_INTERRUPT,
86 &ha->mbx_cmd_flags);
88 mb0 = RD_REG_WORD(&reg->mailbox0);
90 WRT_REG_DWORD(&reg->hccr,
91 HCCRX_CLR_RISC_INT);
92 RD_REG_DWORD(&reg->hccr);
93 break;
96 /* Clear this intr; it wasn't a mailbox intr */
97 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
98 RD_REG_DWORD(&reg->hccr);
100 udelay(5);
103 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
104 rval = mb0 & MBS_MASK;
105 for (idx = 0; idx < dwords; idx++)
106 ram[cnt + idx] = swab32(dump[idx]);
107 } else {
108 rval = QLA_FUNCTION_FAILED;
112 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
113 return rval;
116 static int
117 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
118 uint32_t cram_size, void **nxt)
120 int rval;
122 /* Code RAM. */
123 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
124 if (rval != QLA_SUCCESS)
125 return rval;
127 /* External Memory. */
128 return qla24xx_dump_ram(ha, 0x100000, *nxt,
129 ha->fw_memory_size - 0x100000 + 1, nxt);
132 static uint32_t *
133 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
134 uint32_t count, uint32_t *buf)
136 uint32_t __iomem *dmp_reg;
138 WRT_REG_DWORD(&reg->iobase_addr, iobase);
139 dmp_reg = &reg->iobase_window;
140 while (count--)
141 *buf++ = htonl(RD_REG_DWORD(dmp_reg++));
143 return buf;
146 static inline int
147 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
149 int rval = QLA_SUCCESS;
150 uint32_t cnt;
152 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
153 for (cnt = 30000;
154 ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
155 rval == QLA_SUCCESS; cnt--) {
156 if (cnt)
157 udelay(100);
158 else
159 rval = QLA_FUNCTION_TIMEOUT;
162 return rval;
165 static int
166 qla24xx_soft_reset(struct qla_hw_data *ha)
168 int rval = QLA_SUCCESS;
169 uint32_t cnt;
170 uint16_t mb0, wd;
171 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
173 /* Reset RISC. */
174 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
175 for (cnt = 0; cnt < 30000; cnt++) {
176 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
177 break;
179 udelay(10);
182 WRT_REG_DWORD(&reg->ctrl_status,
183 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
184 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
186 udelay(100);
187 /* Wait for firmware to complete NVRAM accesses. */
188 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
189 for (cnt = 10000 ; cnt && mb0; cnt--) {
190 udelay(5);
191 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
192 barrier();
195 /* Wait for soft-reset to complete. */
196 for (cnt = 0; cnt < 30000; cnt++) {
197 if ((RD_REG_DWORD(&reg->ctrl_status) &
198 CSRX_ISP_SOFT_RESET) == 0)
199 break;
201 udelay(10);
203 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
204 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
206 for (cnt = 30000; RD_REG_WORD(&reg->mailbox0) != 0 &&
207 rval == QLA_SUCCESS; cnt--) {
208 if (cnt)
209 udelay(100);
210 else
211 rval = QLA_FUNCTION_TIMEOUT;
214 return rval;
217 static int
218 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
219 uint32_t ram_words, void **nxt)
221 int rval;
222 uint32_t cnt, stat, timer, words, idx;
223 uint16_t mb0;
224 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
225 dma_addr_t dump_dma = ha->gid_list_dma;
226 uint16_t *dump = (uint16_t *)ha->gid_list;
228 rval = QLA_SUCCESS;
229 mb0 = 0;
231 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
232 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
234 words = GID_LIST_SIZE / 2;
235 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
236 cnt += words, addr += words) {
237 if (cnt + words > ram_words)
238 words = ram_words - cnt;
240 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
241 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
243 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
244 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
245 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
246 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
248 WRT_MAILBOX_REG(ha, reg, 4, words);
249 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
251 for (timer = 6000000; timer; timer--) {
252 /* Check for pending interrupts. */
253 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
254 if (stat & HSR_RISC_INT) {
255 stat &= 0xff;
257 if (stat == 0x1 || stat == 0x2) {
258 set_bit(MBX_INTERRUPT,
259 &ha->mbx_cmd_flags);
261 mb0 = RD_MAILBOX_REG(ha, reg, 0);
263 /* Release mailbox registers. */
264 WRT_REG_WORD(&reg->semaphore, 0);
265 WRT_REG_WORD(&reg->hccr,
266 HCCR_CLR_RISC_INT);
267 RD_REG_WORD(&reg->hccr);
268 break;
269 } else if (stat == 0x10 || stat == 0x11) {
270 set_bit(MBX_INTERRUPT,
271 &ha->mbx_cmd_flags);
273 mb0 = RD_MAILBOX_REG(ha, reg, 0);
275 WRT_REG_WORD(&reg->hccr,
276 HCCR_CLR_RISC_INT);
277 RD_REG_WORD(&reg->hccr);
278 break;
281 /* clear this intr; it wasn't a mailbox intr */
282 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
283 RD_REG_WORD(&reg->hccr);
285 udelay(5);
288 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
289 rval = mb0 & MBS_MASK;
290 for (idx = 0; idx < words; idx++)
291 ram[cnt + idx] = swab16(dump[idx]);
292 } else {
293 rval = QLA_FUNCTION_FAILED;
297 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
298 return rval;
301 static inline void
302 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
303 uint16_t *buf)
305 uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
307 while (count--)
308 *buf++ = htons(RD_REG_WORD(dmp_reg++));
311 static inline void *
312 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
314 if (!ha->eft)
315 return ptr;
317 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
318 return ptr + ntohl(ha->fw_dump->eft_size);
321 static inline void *
322 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
324 uint32_t cnt;
325 uint32_t *iter_reg;
326 struct qla2xxx_fce_chain *fcec = ptr;
328 if (!ha->fce)
329 return ptr;
331 *last_chain = &fcec->type;
332 fcec->type = __constant_htonl(DUMP_CHAIN_FCE);
333 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
334 fce_calc_size(ha->fce_bufs));
335 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
336 fcec->addr_l = htonl(LSD(ha->fce_dma));
337 fcec->addr_h = htonl(MSD(ha->fce_dma));
339 iter_reg = fcec->eregs;
340 for (cnt = 0; cnt < 8; cnt++)
341 *iter_reg++ = htonl(ha->fce_mb[cnt]);
343 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
345 return iter_reg;
348 static inline void *
349 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
351 uint32_t cnt, que_idx;
352 uint8_t que_cnt;
353 struct qla2xxx_mq_chain *mq = ptr;
354 struct device_reg_25xxmq __iomem *reg;
356 if (!ha->mqenable)
357 return ptr;
359 mq = ptr;
360 *last_chain = &mq->type;
361 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
362 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
364 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
365 ha->max_req_queues : ha->max_rsp_queues;
366 mq->count = htonl(que_cnt);
367 for (cnt = 0; cnt < que_cnt; cnt++) {
368 reg = (struct device_reg_25xxmq *) ((void *)
369 ha->mqiobase + cnt * QLA_QUE_PAGE);
370 que_idx = cnt * 4;
371 mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in));
372 mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out));
373 mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(&reg->rsp_q_in));
374 mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(&reg->rsp_q_out));
377 return ptr + sizeof(struct qla2xxx_mq_chain);
380 static void
381 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
383 struct qla_hw_data *ha = vha->hw;
385 if (rval != QLA_SUCCESS) {
386 qla_printk(KERN_WARNING, ha,
387 "Failed to dump firmware (%x)!!!\n", rval);
388 ha->fw_dumped = 0;
389 } else {
390 qla_printk(KERN_INFO, ha,
391 "Firmware dump saved to temp buffer (%ld/%p).\n",
392 vha->host_no, ha->fw_dump);
393 ha->fw_dumped = 1;
394 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
399 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
400 * @ha: HA context
401 * @hardware_locked: Called with the hardware_lock
403 void
404 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
406 int rval;
407 uint32_t cnt;
408 struct qla_hw_data *ha = vha->hw;
409 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
410 uint16_t __iomem *dmp_reg;
411 unsigned long flags;
412 struct qla2300_fw_dump *fw;
413 void *nxt;
414 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
416 flags = 0;
418 if (!hardware_locked)
419 spin_lock_irqsave(&ha->hardware_lock, flags);
421 if (!ha->fw_dump) {
422 qla_printk(KERN_WARNING, ha,
423 "No buffer available for dump!!!\n");
424 goto qla2300_fw_dump_failed;
427 if (ha->fw_dumped) {
428 qla_printk(KERN_WARNING, ha,
429 "Firmware has been previously dumped (%p) -- ignoring "
430 "request...\n", ha->fw_dump);
431 goto qla2300_fw_dump_failed;
433 fw = &ha->fw_dump->isp.isp23;
434 qla2xxx_prep_dump(ha, ha->fw_dump);
436 rval = QLA_SUCCESS;
437 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
439 /* Pause RISC. */
440 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
441 if (IS_QLA2300(ha)) {
442 for (cnt = 30000;
443 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
444 rval == QLA_SUCCESS; cnt--) {
445 if (cnt)
446 udelay(100);
447 else
448 rval = QLA_FUNCTION_TIMEOUT;
450 } else {
451 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
452 udelay(10);
455 if (rval == QLA_SUCCESS) {
456 dmp_reg = &reg->flash_address;
457 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
458 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
460 dmp_reg = &reg->u.isp2300.req_q_in;
461 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++)
462 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
464 dmp_reg = &reg->u.isp2300.mailbox0;
465 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
466 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
468 WRT_REG_WORD(&reg->ctrl_status, 0x40);
469 qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
471 WRT_REG_WORD(&reg->ctrl_status, 0x50);
472 qla2xxx_read_window(reg, 48, fw->dma_reg);
474 WRT_REG_WORD(&reg->ctrl_status, 0x00);
475 dmp_reg = &reg->risc_hw;
476 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
477 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
479 WRT_REG_WORD(&reg->pcr, 0x2000);
480 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
482 WRT_REG_WORD(&reg->pcr, 0x2200);
483 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
485 WRT_REG_WORD(&reg->pcr, 0x2400);
486 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
488 WRT_REG_WORD(&reg->pcr, 0x2600);
489 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
491 WRT_REG_WORD(&reg->pcr, 0x2800);
492 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
494 WRT_REG_WORD(&reg->pcr, 0x2A00);
495 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
497 WRT_REG_WORD(&reg->pcr, 0x2C00);
498 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
500 WRT_REG_WORD(&reg->pcr, 0x2E00);
501 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
503 WRT_REG_WORD(&reg->ctrl_status, 0x10);
504 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
506 WRT_REG_WORD(&reg->ctrl_status, 0x20);
507 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
509 WRT_REG_WORD(&reg->ctrl_status, 0x30);
510 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
512 /* Reset RISC. */
513 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
514 for (cnt = 0; cnt < 30000; cnt++) {
515 if ((RD_REG_WORD(&reg->ctrl_status) &
516 CSR_ISP_SOFT_RESET) == 0)
517 break;
519 udelay(10);
523 if (!IS_QLA2300(ha)) {
524 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
525 rval == QLA_SUCCESS; cnt--) {
526 if (cnt)
527 udelay(100);
528 else
529 rval = QLA_FUNCTION_TIMEOUT;
533 /* Get RISC SRAM. */
534 if (rval == QLA_SUCCESS)
535 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
536 sizeof(fw->risc_ram) / 2, &nxt);
538 /* Get stack SRAM. */
539 if (rval == QLA_SUCCESS)
540 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
541 sizeof(fw->stack_ram) / 2, &nxt);
543 /* Get data SRAM. */
544 if (rval == QLA_SUCCESS)
545 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
546 ha->fw_memory_size - 0x11000 + 1, &nxt);
548 if (rval == QLA_SUCCESS)
549 qla2xxx_copy_queues(ha, nxt);
551 qla2xxx_dump_post_process(base_vha, rval);
553 qla2300_fw_dump_failed:
554 if (!hardware_locked)
555 spin_unlock_irqrestore(&ha->hardware_lock, flags);
559 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
560 * @ha: HA context
561 * @hardware_locked: Called with the hardware_lock
563 void
564 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
566 int rval;
567 uint32_t cnt, timer;
568 uint16_t risc_address;
569 uint16_t mb0, mb2;
570 struct qla_hw_data *ha = vha->hw;
571 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
572 uint16_t __iomem *dmp_reg;
573 unsigned long flags;
574 struct qla2100_fw_dump *fw;
575 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
577 risc_address = 0;
578 mb0 = mb2 = 0;
579 flags = 0;
581 if (!hardware_locked)
582 spin_lock_irqsave(&ha->hardware_lock, flags);
584 if (!ha->fw_dump) {
585 qla_printk(KERN_WARNING, ha,
586 "No buffer available for dump!!!\n");
587 goto qla2100_fw_dump_failed;
590 if (ha->fw_dumped) {
591 qla_printk(KERN_WARNING, ha,
592 "Firmware has been previously dumped (%p) -- ignoring "
593 "request...\n", ha->fw_dump);
594 goto qla2100_fw_dump_failed;
596 fw = &ha->fw_dump->isp.isp21;
597 qla2xxx_prep_dump(ha, ha->fw_dump);
599 rval = QLA_SUCCESS;
600 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
602 /* Pause RISC. */
603 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
604 for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
605 rval == QLA_SUCCESS; cnt--) {
606 if (cnt)
607 udelay(100);
608 else
609 rval = QLA_FUNCTION_TIMEOUT;
611 if (rval == QLA_SUCCESS) {
612 dmp_reg = &reg->flash_address;
613 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
614 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
616 dmp_reg = &reg->u.isp2100.mailbox0;
617 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
618 if (cnt == 8)
619 dmp_reg = &reg->u_end.isp2200.mailbox8;
621 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
624 dmp_reg = &reg->u.isp2100.unused_2[0];
625 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
626 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
628 WRT_REG_WORD(&reg->ctrl_status, 0x00);
629 dmp_reg = &reg->risc_hw;
630 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
631 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
633 WRT_REG_WORD(&reg->pcr, 0x2000);
634 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
636 WRT_REG_WORD(&reg->pcr, 0x2100);
637 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
639 WRT_REG_WORD(&reg->pcr, 0x2200);
640 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
642 WRT_REG_WORD(&reg->pcr, 0x2300);
643 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
645 WRT_REG_WORD(&reg->pcr, 0x2400);
646 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
648 WRT_REG_WORD(&reg->pcr, 0x2500);
649 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
651 WRT_REG_WORD(&reg->pcr, 0x2600);
652 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
654 WRT_REG_WORD(&reg->pcr, 0x2700);
655 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
657 WRT_REG_WORD(&reg->ctrl_status, 0x10);
658 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
660 WRT_REG_WORD(&reg->ctrl_status, 0x20);
661 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
663 WRT_REG_WORD(&reg->ctrl_status, 0x30);
664 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
666 /* Reset the ISP. */
667 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
670 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
671 rval == QLA_SUCCESS; cnt--) {
672 if (cnt)
673 udelay(100);
674 else
675 rval = QLA_FUNCTION_TIMEOUT;
678 /* Pause RISC. */
679 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
680 (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
682 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
683 for (cnt = 30000;
684 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
685 rval == QLA_SUCCESS; cnt--) {
686 if (cnt)
687 udelay(100);
688 else
689 rval = QLA_FUNCTION_TIMEOUT;
691 if (rval == QLA_SUCCESS) {
692 /* Set memory configuration and timing. */
693 if (IS_QLA2100(ha))
694 WRT_REG_WORD(&reg->mctr, 0xf1);
695 else
696 WRT_REG_WORD(&reg->mctr, 0xf2);
697 RD_REG_WORD(&reg->mctr); /* PCI Posting. */
699 /* Release RISC. */
700 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
704 if (rval == QLA_SUCCESS) {
705 /* Get RISC SRAM. */
706 risc_address = 0x1000;
707 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
708 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
710 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
711 cnt++, risc_address++) {
712 WRT_MAILBOX_REG(ha, reg, 1, risc_address);
713 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
715 for (timer = 6000000; timer != 0; timer--) {
716 /* Check for pending interrupts. */
717 if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
718 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
719 set_bit(MBX_INTERRUPT,
720 &ha->mbx_cmd_flags);
722 mb0 = RD_MAILBOX_REG(ha, reg, 0);
723 mb2 = RD_MAILBOX_REG(ha, reg, 2);
725 WRT_REG_WORD(&reg->semaphore, 0);
726 WRT_REG_WORD(&reg->hccr,
727 HCCR_CLR_RISC_INT);
728 RD_REG_WORD(&reg->hccr);
729 break;
731 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
732 RD_REG_WORD(&reg->hccr);
734 udelay(5);
737 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
738 rval = mb0 & MBS_MASK;
739 fw->risc_ram[cnt] = htons(mb2);
740 } else {
741 rval = QLA_FUNCTION_FAILED;
745 if (rval == QLA_SUCCESS)
746 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
748 qla2xxx_dump_post_process(base_vha, rval);
750 qla2100_fw_dump_failed:
751 if (!hardware_locked)
752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
755 void
756 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
758 int rval;
759 uint32_t cnt;
760 uint32_t risc_address;
761 struct qla_hw_data *ha = vha->hw;
762 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
763 uint32_t __iomem *dmp_reg;
764 uint32_t *iter_reg;
765 uint16_t __iomem *mbx_reg;
766 unsigned long flags;
767 struct qla24xx_fw_dump *fw;
768 uint32_t ext_mem_cnt;
769 void *nxt;
770 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
772 risc_address = ext_mem_cnt = 0;
773 flags = 0;
775 if (!hardware_locked)
776 spin_lock_irqsave(&ha->hardware_lock, flags);
778 if (!ha->fw_dump) {
779 qla_printk(KERN_WARNING, ha,
780 "No buffer available for dump!!!\n");
781 goto qla24xx_fw_dump_failed;
784 if (ha->fw_dumped) {
785 qla_printk(KERN_WARNING, ha,
786 "Firmware has been previously dumped (%p) -- ignoring "
787 "request...\n", ha->fw_dump);
788 goto qla24xx_fw_dump_failed;
790 fw = &ha->fw_dump->isp.isp24;
791 qla2xxx_prep_dump(ha, ha->fw_dump);
793 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
795 /* Pause RISC. */
796 rval = qla24xx_pause_risc(reg);
797 if (rval != QLA_SUCCESS)
798 goto qla24xx_fw_dump_failed_0;
800 /* Host interface registers. */
801 dmp_reg = &reg->flash_addr;
802 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
803 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
805 /* Disable interrupts. */
806 WRT_REG_DWORD(&reg->ictrl, 0);
807 RD_REG_DWORD(&reg->ictrl);
809 /* Shadow registers. */
810 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
811 RD_REG_DWORD(&reg->iobase_addr);
812 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
813 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
815 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
816 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
818 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
819 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
821 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
822 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
824 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
825 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
827 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
828 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
830 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
831 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
833 /* Mailbox registers. */
834 mbx_reg = &reg->mailbox0;
835 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
836 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
838 /* Transfer sequence registers. */
839 iter_reg = fw->xseq_gp_reg;
840 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
841 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
842 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
843 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
844 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
845 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
846 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
847 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
849 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
850 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
852 /* Receive sequence registers. */
853 iter_reg = fw->rseq_gp_reg;
854 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
855 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
856 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
857 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
858 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
859 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
860 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
861 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
863 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
864 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
865 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
867 /* Command DMA registers. */
868 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
870 /* Queues. */
871 iter_reg = fw->req0_dma_reg;
872 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
873 dmp_reg = &reg->iobase_q;
874 for (cnt = 0; cnt < 7; cnt++)
875 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
877 iter_reg = fw->resp0_dma_reg;
878 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
879 dmp_reg = &reg->iobase_q;
880 for (cnt = 0; cnt < 7; cnt++)
881 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
883 iter_reg = fw->req1_dma_reg;
884 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
885 dmp_reg = &reg->iobase_q;
886 for (cnt = 0; cnt < 7; cnt++)
887 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
889 /* Transmit DMA registers. */
890 iter_reg = fw->xmt0_dma_reg;
891 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
892 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
894 iter_reg = fw->xmt1_dma_reg;
895 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
896 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
898 iter_reg = fw->xmt2_dma_reg;
899 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
900 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
902 iter_reg = fw->xmt3_dma_reg;
903 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
904 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
906 iter_reg = fw->xmt4_dma_reg;
907 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
908 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
910 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
912 /* Receive DMA registers. */
913 iter_reg = fw->rcvt0_data_dma_reg;
914 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
915 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
917 iter_reg = fw->rcvt1_data_dma_reg;
918 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
919 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
921 /* RISC registers. */
922 iter_reg = fw->risc_gp_reg;
923 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
924 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
925 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
926 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
927 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
928 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
929 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
930 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
932 /* Local memory controller registers. */
933 iter_reg = fw->lmc_reg;
934 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
935 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
936 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
937 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
938 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
939 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
940 qla24xx_read_window(reg, 0x3060, 16, iter_reg);
942 /* Fibre Protocol Module registers. */
943 iter_reg = fw->fpm_hdw_reg;
944 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
945 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
946 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
947 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
948 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
949 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
950 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
951 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
952 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
953 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
954 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
955 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
957 /* Frame Buffer registers. */
958 iter_reg = fw->fb_hdw_reg;
959 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
960 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
961 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
962 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
963 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
964 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
965 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
966 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
967 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
968 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
969 qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
971 rval = qla24xx_soft_reset(ha);
972 if (rval != QLA_SUCCESS)
973 goto qla24xx_fw_dump_failed_0;
975 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
976 &nxt);
977 if (rval != QLA_SUCCESS)
978 goto qla24xx_fw_dump_failed_0;
980 nxt = qla2xxx_copy_queues(ha, nxt);
982 qla24xx_copy_eft(ha, nxt);
984 qla24xx_fw_dump_failed_0:
985 qla2xxx_dump_post_process(base_vha, rval);
987 qla24xx_fw_dump_failed:
988 if (!hardware_locked)
989 spin_unlock_irqrestore(&ha->hardware_lock, flags);
992 void
993 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
995 int rval;
996 uint32_t cnt;
997 uint32_t risc_address;
998 struct qla_hw_data *ha = vha->hw;
999 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1000 uint32_t __iomem *dmp_reg;
1001 uint32_t *iter_reg;
1002 uint16_t __iomem *mbx_reg;
1003 unsigned long flags;
1004 struct qla25xx_fw_dump *fw;
1005 uint32_t ext_mem_cnt;
1006 void *nxt, *nxt_chain;
1007 uint32_t *last_chain = NULL;
1008 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1010 risc_address = ext_mem_cnt = 0;
1011 flags = 0;
1013 if (!hardware_locked)
1014 spin_lock_irqsave(&ha->hardware_lock, flags);
1016 if (!ha->fw_dump) {
1017 qla_printk(KERN_WARNING, ha,
1018 "No buffer available for dump!!!\n");
1019 goto qla25xx_fw_dump_failed;
1022 if (ha->fw_dumped) {
1023 qla_printk(KERN_WARNING, ha,
1024 "Firmware has been previously dumped (%p) -- ignoring "
1025 "request...\n", ha->fw_dump);
1026 goto qla25xx_fw_dump_failed;
1028 fw = &ha->fw_dump->isp.isp25;
1029 qla2xxx_prep_dump(ha, ha->fw_dump);
1030 ha->fw_dump->version = __constant_htonl(2);
1032 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1034 /* Pause RISC. */
1035 rval = qla24xx_pause_risc(reg);
1036 if (rval != QLA_SUCCESS)
1037 goto qla25xx_fw_dump_failed_0;
1039 /* Host/Risc registers. */
1040 iter_reg = fw->host_risc_reg;
1041 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1042 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1044 /* PCIe registers. */
1045 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1046 RD_REG_DWORD(&reg->iobase_addr);
1047 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1048 dmp_reg = &reg->iobase_c4;
1049 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
1050 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
1051 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1052 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1054 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1055 RD_REG_DWORD(&reg->iobase_window);
1057 /* Host interface registers. */
1058 dmp_reg = &reg->flash_addr;
1059 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1060 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1062 /* Disable interrupts. */
1063 WRT_REG_DWORD(&reg->ictrl, 0);
1064 RD_REG_DWORD(&reg->ictrl);
1066 /* Shadow registers. */
1067 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1068 RD_REG_DWORD(&reg->iobase_addr);
1069 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1070 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1072 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1073 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1075 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1076 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1078 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1079 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1081 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1082 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1084 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1085 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1087 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1088 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1090 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1091 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1093 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1094 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1096 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1097 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1099 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1100 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1102 /* RISC I/O register. */
1103 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1104 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1106 /* Mailbox registers. */
1107 mbx_reg = &reg->mailbox0;
1108 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1109 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1111 /* Transfer sequence registers. */
1112 iter_reg = fw->xseq_gp_reg;
1113 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1114 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1115 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1116 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1117 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1118 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1119 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1120 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1122 iter_reg = fw->xseq_0_reg;
1123 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1124 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1125 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1127 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1129 /* Receive sequence registers. */
1130 iter_reg = fw->rseq_gp_reg;
1131 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1132 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1133 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1134 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1135 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1136 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1137 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1138 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1140 iter_reg = fw->rseq_0_reg;
1141 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1142 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1144 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1145 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1147 /* Auxiliary sequence registers. */
1148 iter_reg = fw->aseq_gp_reg;
1149 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1150 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1151 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1152 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1153 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1154 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1155 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1156 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1158 iter_reg = fw->aseq_0_reg;
1159 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1160 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1162 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1163 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1165 /* Command DMA registers. */
1166 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1168 /* Queues. */
1169 iter_reg = fw->req0_dma_reg;
1170 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1171 dmp_reg = &reg->iobase_q;
1172 for (cnt = 0; cnt < 7; cnt++)
1173 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1175 iter_reg = fw->resp0_dma_reg;
1176 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1177 dmp_reg = &reg->iobase_q;
1178 for (cnt = 0; cnt < 7; cnt++)
1179 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1181 iter_reg = fw->req1_dma_reg;
1182 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1183 dmp_reg = &reg->iobase_q;
1184 for (cnt = 0; cnt < 7; cnt++)
1185 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1187 /* Transmit DMA registers. */
1188 iter_reg = fw->xmt0_dma_reg;
1189 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1190 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1192 iter_reg = fw->xmt1_dma_reg;
1193 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1194 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1196 iter_reg = fw->xmt2_dma_reg;
1197 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1198 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1200 iter_reg = fw->xmt3_dma_reg;
1201 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1202 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1204 iter_reg = fw->xmt4_dma_reg;
1205 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1206 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1208 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1210 /* Receive DMA registers. */
1211 iter_reg = fw->rcvt0_data_dma_reg;
1212 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1213 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1215 iter_reg = fw->rcvt1_data_dma_reg;
1216 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1217 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1219 /* RISC registers. */
1220 iter_reg = fw->risc_gp_reg;
1221 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1222 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1223 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1224 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1225 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1226 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1227 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1228 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1230 /* Local memory controller registers. */
1231 iter_reg = fw->lmc_reg;
1232 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1233 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1234 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1235 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1236 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1237 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1238 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1239 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1241 /* Fibre Protocol Module registers. */
1242 iter_reg = fw->fpm_hdw_reg;
1243 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1244 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1245 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1246 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1247 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1248 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1249 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1250 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1251 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1252 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1253 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1254 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1256 /* Frame Buffer registers. */
1257 iter_reg = fw->fb_hdw_reg;
1258 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1259 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1260 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1261 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1262 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1263 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1264 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1265 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1266 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1267 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1268 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1269 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1271 /* Multi queue registers */
1272 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1273 &last_chain);
1275 rval = qla24xx_soft_reset(ha);
1276 if (rval != QLA_SUCCESS)
1277 goto qla25xx_fw_dump_failed_0;
1279 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1280 &nxt);
1281 if (rval != QLA_SUCCESS)
1282 goto qla25xx_fw_dump_failed_0;
1284 nxt = qla2xxx_copy_queues(ha, nxt);
1286 nxt = qla24xx_copy_eft(ha, nxt);
1288 /* Chain entries -- started with MQ. */
1289 qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1290 if (last_chain) {
1291 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1292 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1295 qla25xx_fw_dump_failed_0:
1296 qla2xxx_dump_post_process(base_vha, rval);
1298 qla25xx_fw_dump_failed:
1299 if (!hardware_locked)
1300 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1303 void
1304 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1306 int rval;
1307 uint32_t cnt;
1308 uint32_t risc_address;
1309 struct qla_hw_data *ha = vha->hw;
1310 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1311 uint32_t __iomem *dmp_reg;
1312 uint32_t *iter_reg;
1313 uint16_t __iomem *mbx_reg;
1314 unsigned long flags;
1315 struct qla81xx_fw_dump *fw;
1316 uint32_t ext_mem_cnt;
1317 void *nxt, *nxt_chain;
1318 uint32_t *last_chain = NULL;
1319 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1321 risc_address = ext_mem_cnt = 0;
1322 flags = 0;
1324 if (!hardware_locked)
1325 spin_lock_irqsave(&ha->hardware_lock, flags);
1327 if (!ha->fw_dump) {
1328 qla_printk(KERN_WARNING, ha,
1329 "No buffer available for dump!!!\n");
1330 goto qla81xx_fw_dump_failed;
1333 if (ha->fw_dumped) {
1334 qla_printk(KERN_WARNING, ha,
1335 "Firmware has been previously dumped (%p) -- ignoring "
1336 "request...\n", ha->fw_dump);
1337 goto qla81xx_fw_dump_failed;
1339 fw = &ha->fw_dump->isp.isp81;
1340 qla2xxx_prep_dump(ha, ha->fw_dump);
1342 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1344 /* Pause RISC. */
1345 rval = qla24xx_pause_risc(reg);
1346 if (rval != QLA_SUCCESS)
1347 goto qla81xx_fw_dump_failed_0;
1349 /* Host/Risc registers. */
1350 iter_reg = fw->host_risc_reg;
1351 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1352 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1354 /* PCIe registers. */
1355 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1356 RD_REG_DWORD(&reg->iobase_addr);
1357 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1358 dmp_reg = &reg->iobase_c4;
1359 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
1360 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
1361 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1362 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1364 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1365 RD_REG_DWORD(&reg->iobase_window);
1367 /* Host interface registers. */
1368 dmp_reg = &reg->flash_addr;
1369 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1370 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1372 /* Disable interrupts. */
1373 WRT_REG_DWORD(&reg->ictrl, 0);
1374 RD_REG_DWORD(&reg->ictrl);
1376 /* Shadow registers. */
1377 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1378 RD_REG_DWORD(&reg->iobase_addr);
1379 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1380 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1382 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1383 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1385 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1386 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1388 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1389 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1391 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1392 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1394 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1395 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1397 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1398 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1400 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1401 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1403 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1404 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1406 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1407 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1409 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1410 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1412 /* RISC I/O register. */
1413 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1414 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1416 /* Mailbox registers. */
1417 mbx_reg = &reg->mailbox0;
1418 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1419 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1421 /* Transfer sequence registers. */
1422 iter_reg = fw->xseq_gp_reg;
1423 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1424 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1425 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1426 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1427 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1428 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1429 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1430 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1432 iter_reg = fw->xseq_0_reg;
1433 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1434 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1435 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1437 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1439 /* Receive sequence registers. */
1440 iter_reg = fw->rseq_gp_reg;
1441 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1442 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1443 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1444 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1445 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1446 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1447 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1448 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1450 iter_reg = fw->rseq_0_reg;
1451 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1452 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1454 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1455 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1457 /* Auxiliary sequence registers. */
1458 iter_reg = fw->aseq_gp_reg;
1459 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1460 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1461 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1462 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1463 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1464 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1465 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1466 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1468 iter_reg = fw->aseq_0_reg;
1469 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1470 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1472 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1473 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1475 /* Command DMA registers. */
1476 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1478 /* Queues. */
1479 iter_reg = fw->req0_dma_reg;
1480 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1481 dmp_reg = &reg->iobase_q;
1482 for (cnt = 0; cnt < 7; cnt++)
1483 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1485 iter_reg = fw->resp0_dma_reg;
1486 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1487 dmp_reg = &reg->iobase_q;
1488 for (cnt = 0; cnt < 7; cnt++)
1489 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1491 iter_reg = fw->req1_dma_reg;
1492 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1493 dmp_reg = &reg->iobase_q;
1494 for (cnt = 0; cnt < 7; cnt++)
1495 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1497 /* Transmit DMA registers. */
1498 iter_reg = fw->xmt0_dma_reg;
1499 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1500 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1502 iter_reg = fw->xmt1_dma_reg;
1503 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1504 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1506 iter_reg = fw->xmt2_dma_reg;
1507 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1508 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1510 iter_reg = fw->xmt3_dma_reg;
1511 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1512 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1514 iter_reg = fw->xmt4_dma_reg;
1515 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1516 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1518 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1520 /* Receive DMA registers. */
1521 iter_reg = fw->rcvt0_data_dma_reg;
1522 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1523 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1525 iter_reg = fw->rcvt1_data_dma_reg;
1526 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1527 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1529 /* RISC registers. */
1530 iter_reg = fw->risc_gp_reg;
1531 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1532 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1533 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1534 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1535 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1536 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1537 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1538 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1540 /* Local memory controller registers. */
1541 iter_reg = fw->lmc_reg;
1542 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1543 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1544 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1545 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1546 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1547 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1548 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1549 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1551 /* Fibre Protocol Module registers. */
1552 iter_reg = fw->fpm_hdw_reg;
1553 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1554 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1555 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1556 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1557 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1558 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1559 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1560 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1561 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1562 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1563 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1564 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1565 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1566 qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1568 /* Frame Buffer registers. */
1569 iter_reg = fw->fb_hdw_reg;
1570 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1571 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1572 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1573 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1574 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1575 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1576 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1577 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1578 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1579 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1580 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1581 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1582 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1584 /* Multi queue registers */
1585 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1586 &last_chain);
1588 rval = qla24xx_soft_reset(ha);
1589 if (rval != QLA_SUCCESS)
1590 goto qla81xx_fw_dump_failed_0;
1592 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1593 &nxt);
1594 if (rval != QLA_SUCCESS)
1595 goto qla81xx_fw_dump_failed_0;
1597 nxt = qla2xxx_copy_queues(ha, nxt);
1599 nxt = qla24xx_copy_eft(ha, nxt);
1601 /* Chain entries -- started with MQ. */
1602 qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1603 if (last_chain) {
1604 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1605 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1608 qla81xx_fw_dump_failed_0:
1609 qla2xxx_dump_post_process(base_vha, rval);
1611 qla81xx_fw_dump_failed:
1612 if (!hardware_locked)
1613 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1616 /****************************************************************************/
1617 /* Driver Debug Functions. */
1618 /****************************************************************************/
1620 void
1621 qla2x00_dump_regs(scsi_qla_host_t *vha)
1623 int i;
1624 struct qla_hw_data *ha = vha->hw;
1625 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1626 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1627 uint16_t __iomem *mbx_reg;
1629 mbx_reg = IS_FWI2_CAPABLE(ha) ? &reg24->mailbox0:
1630 MAILBOX_REG(ha, reg, 0);
1632 printk("Mailbox registers:\n");
1633 for (i = 0; i < 6; i++)
1634 printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
1635 RD_REG_WORD(mbx_reg++));
1639 void
1640 qla2x00_dump_buffer(uint8_t * b, uint32_t size)
1642 uint32_t cnt;
1643 uint8_t c;
1645 printk(" 0 1 2 3 4 5 6 7 8 9 "
1646 "Ah Bh Ch Dh Eh Fh\n");
1647 printk("----------------------------------------"
1648 "----------------------\n");
1650 for (cnt = 0; cnt < size;) {
1651 c = *b++;
1652 printk("%02x",(uint32_t) c);
1653 cnt++;
1654 if (!(cnt % 16))
1655 printk("\n");
1656 else
1657 printk(" ");
1659 if (cnt % 16)
1660 printk("\n");