[SCSI] qla2xxx: Refactor qla data structures
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / qla2xxx / qla_isr.c
bloba76efd99d0070fa7d61ee43a5c8d2c539c5db3c6
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
14 static void qla2x00_status_entry(scsi_qla_host_t *, void *);
15 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
16 static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
18 /**
19 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
20 * @irq:
21 * @dev_id: SCSI driver HA context
23 * Called by system whenever the host adapter generates an interrupt.
25 * Returns handled flag.
27 irqreturn_t
28 qla2100_intr_handler(int irq, void *dev_id)
30 scsi_qla_host_t *ha;
31 struct device_reg_2xxx __iomem *reg;
32 int status;
33 unsigned long iter;
34 uint16_t hccr;
35 uint16_t mb[4];
37 ha = (scsi_qla_host_t *) dev_id;
38 if (!ha) {
39 printk(KERN_INFO
40 "%s(): NULL host pointer\n", __func__);
41 return (IRQ_NONE);
44 reg = &ha->iobase->isp;
45 status = 0;
47 spin_lock(&ha->hardware_lock);
48 for (iter = 50; iter--; ) {
49 hccr = RD_REG_WORD(&reg->hccr);
50 if (hccr & HCCR_RISC_PAUSE) {
51 if (pci_channel_offline(ha->pdev))
52 break;
55 * Issue a "HARD" reset in order for the RISC interrupt
56 * bit to be cleared. Schedule a big hammmer to get
57 * out of the RISC PAUSED state.
59 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
60 RD_REG_WORD(&reg->hccr);
62 ha->isp_ops->fw_dump(ha, 1);
63 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
64 break;
65 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
66 break;
68 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
69 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
70 RD_REG_WORD(&reg->hccr);
72 /* Get mailbox data. */
73 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
74 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
75 qla2x00_mbx_completion(ha, mb[0]);
76 status |= MBX_INTERRUPT;
77 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
78 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
79 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
80 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
81 qla2x00_async_event(ha, mb);
82 } else {
83 /*EMPTY*/
84 DEBUG2(printk("scsi(%ld): Unrecognized "
85 "interrupt type (%d).\n",
86 ha->host_no, mb[0]));
88 /* Release mailbox registers. */
89 WRT_REG_WORD(&reg->semaphore, 0);
90 RD_REG_WORD(&reg->semaphore);
91 } else {
92 qla2x00_process_response_queue(ha);
94 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
95 RD_REG_WORD(&reg->hccr);
98 spin_unlock(&ha->hardware_lock);
100 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
101 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
102 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
103 complete(&ha->mbx_intr_comp);
106 return (IRQ_HANDLED);
110 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
111 * @irq:
112 * @dev_id: SCSI driver HA context
114 * Called by system whenever the host adapter generates an interrupt.
116 * Returns handled flag.
118 irqreturn_t
119 qla2300_intr_handler(int irq, void *dev_id)
121 scsi_qla_host_t *ha;
122 struct device_reg_2xxx __iomem *reg;
123 int status;
124 unsigned long iter;
125 uint32_t stat;
126 uint16_t hccr;
127 uint16_t mb[4];
129 ha = (scsi_qla_host_t *) dev_id;
130 if (!ha) {
131 printk(KERN_INFO
132 "%s(): NULL host pointer\n", __func__);
133 return (IRQ_NONE);
136 reg = &ha->iobase->isp;
137 status = 0;
139 spin_lock(&ha->hardware_lock);
140 for (iter = 50; iter--; ) {
141 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
142 if (stat & HSR_RISC_PAUSED) {
143 if (pci_channel_offline(ha->pdev))
144 break;
146 hccr = RD_REG_WORD(&reg->hccr);
147 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
148 qla_printk(KERN_INFO, ha, "Parity error -- "
149 "HCCR=%x, Dumping firmware!\n", hccr);
150 else
151 qla_printk(KERN_INFO, ha, "RISC paused -- "
152 "HCCR=%x, Dumping firmware!\n", hccr);
155 * Issue a "HARD" reset in order for the RISC
156 * interrupt bit to be cleared. Schedule a big
157 * hammmer to get out of the RISC PAUSED state.
159 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
160 RD_REG_WORD(&reg->hccr);
162 ha->isp_ops->fw_dump(ha, 1);
163 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
164 break;
165 } else if ((stat & HSR_RISC_INT) == 0)
166 break;
168 switch (stat & 0xff) {
169 case 0x1:
170 case 0x2:
171 case 0x10:
172 case 0x11:
173 qla2x00_mbx_completion(ha, MSW(stat));
174 status |= MBX_INTERRUPT;
176 /* Release mailbox registers. */
177 WRT_REG_WORD(&reg->semaphore, 0);
178 break;
179 case 0x12:
180 mb[0] = MSW(stat);
181 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
182 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
183 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
184 qla2x00_async_event(ha, mb);
185 break;
186 case 0x13:
187 qla2x00_process_response_queue(ha);
188 break;
189 case 0x15:
190 mb[0] = MBA_CMPLT_1_16BIT;
191 mb[1] = MSW(stat);
192 qla2x00_async_event(ha, mb);
193 break;
194 case 0x16:
195 mb[0] = MBA_SCSI_COMPLETION;
196 mb[1] = MSW(stat);
197 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
198 qla2x00_async_event(ha, mb);
199 break;
200 default:
201 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
202 "(%d).\n",
203 ha->host_no, stat & 0xff));
204 break;
206 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
207 RD_REG_WORD_RELAXED(&reg->hccr);
209 spin_unlock(&ha->hardware_lock);
211 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
212 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
213 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
214 complete(&ha->mbx_intr_comp);
217 return (IRQ_HANDLED);
221 * qla2x00_mbx_completion() - Process mailbox command completions.
222 * @ha: SCSI driver HA context
223 * @mb0: Mailbox0 register
225 static void
226 qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
228 uint16_t cnt;
229 uint16_t __iomem *wptr;
230 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
232 /* Load return mailbox registers. */
233 ha->flags.mbox_int = 1;
234 ha->mailbox_out[0] = mb0;
235 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
237 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
238 if (IS_QLA2200(ha) && cnt == 8)
239 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
240 if (cnt == 4 || cnt == 5)
241 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
242 else
243 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
245 wptr++;
248 if (ha->mcp) {
249 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
250 __func__, ha->host_no, ha->mcp->mb[0]));
251 } else {
252 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
253 __func__, ha->host_no));
258 * qla2x00_async_event() - Process aynchronous events.
259 * @ha: SCSI driver HA context
260 * @mb: Mailbox registers (0 - 3)
262 void
263 qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
265 #define LS_UNKNOWN 2
266 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
267 char *link_speed;
268 uint16_t handle_cnt;
269 uint16_t cnt;
270 uint32_t handles[5];
271 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
272 uint32_t rscn_entry, host_pid;
273 uint8_t rscn_queue_index;
274 unsigned long flags;
276 /* Setup to process RIO completion. */
277 handle_cnt = 0;
278 switch (mb[0]) {
279 case MBA_SCSI_COMPLETION:
280 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
281 handle_cnt = 1;
282 break;
283 case MBA_CMPLT_1_16BIT:
284 handles[0] = mb[1];
285 handle_cnt = 1;
286 mb[0] = MBA_SCSI_COMPLETION;
287 break;
288 case MBA_CMPLT_2_16BIT:
289 handles[0] = mb[1];
290 handles[1] = mb[2];
291 handle_cnt = 2;
292 mb[0] = MBA_SCSI_COMPLETION;
293 break;
294 case MBA_CMPLT_3_16BIT:
295 handles[0] = mb[1];
296 handles[1] = mb[2];
297 handles[2] = mb[3];
298 handle_cnt = 3;
299 mb[0] = MBA_SCSI_COMPLETION;
300 break;
301 case MBA_CMPLT_4_16BIT:
302 handles[0] = mb[1];
303 handles[1] = mb[2];
304 handles[2] = mb[3];
305 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
306 handle_cnt = 4;
307 mb[0] = MBA_SCSI_COMPLETION;
308 break;
309 case MBA_CMPLT_5_16BIT:
310 handles[0] = mb[1];
311 handles[1] = mb[2];
312 handles[2] = mb[3];
313 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
314 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
315 handle_cnt = 5;
316 mb[0] = MBA_SCSI_COMPLETION;
317 break;
318 case MBA_CMPLT_2_32BIT:
319 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
320 handles[1] = le32_to_cpu(
321 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
322 RD_MAILBOX_REG(ha, reg, 6));
323 handle_cnt = 2;
324 mb[0] = MBA_SCSI_COMPLETION;
325 break;
326 default:
327 break;
330 switch (mb[0]) {
331 case MBA_SCSI_COMPLETION: /* Fast Post */
332 if (!ha->flags.online)
333 break;
335 for (cnt = 0; cnt < handle_cnt; cnt++)
336 qla2x00_process_completed_request(ha, handles[cnt]);
337 break;
339 case MBA_RESET: /* Reset */
340 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no));
342 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
343 break;
345 case MBA_SYSTEM_ERR: /* System Error */
346 qla_printk(KERN_INFO, ha,
347 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
348 mb[1], mb[2], mb[3]);
350 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
351 ha->isp_ops->fw_dump(ha, 1);
353 if (IS_FWI2_CAPABLE(ha)) {
354 if (mb[1] == 0 && mb[2] == 0) {
355 qla_printk(KERN_ERR, ha,
356 "Unrecoverable Hardware Error: adapter "
357 "marked OFFLINE!\n");
358 ha->flags.online = 0;
359 } else
360 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
361 } else if (mb[1] == 0) {
362 qla_printk(KERN_INFO, ha,
363 "Unrecoverable Hardware Error: adapter marked "
364 "OFFLINE!\n");
365 ha->flags.online = 0;
366 } else
367 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
368 break;
370 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
371 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
372 ha->host_no));
373 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
375 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
376 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
377 break;
379 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
380 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
381 ha->host_no));
382 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
384 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
385 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
386 break;
388 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
389 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
390 ha->host_no));
391 break;
393 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
394 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no,
395 mb[1]));
396 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
398 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
399 atomic_set(&ha->loop_state, LOOP_DOWN);
400 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
401 qla2x00_mark_all_devices_lost(ha, 1);
404 if (ha->parent) {
405 atomic_set(&ha->vp_state, VP_FAILED);
406 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
409 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
410 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
412 ha->flags.management_server_logged_in = 0;
413 qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]);
414 break;
416 case MBA_LOOP_UP: /* Loop Up Event */
417 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
418 link_speed = link_speeds[0];
419 ha->link_data_rate = PORT_SPEED_1GB;
420 } else {
421 link_speed = link_speeds[LS_UNKNOWN];
422 if (mb[1] < 5)
423 link_speed = link_speeds[mb[1]];
424 ha->link_data_rate = mb[1];
427 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
428 ha->host_no, link_speed));
429 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
430 link_speed);
432 ha->flags.management_server_logged_in = 0;
433 qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate);
434 break;
436 case MBA_LOOP_DOWN: /* Loop Down Event */
437 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
438 "(%x %x %x).\n", ha->host_no, mb[1], mb[2], mb[3]));
439 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
440 mb[1], mb[2], mb[3]);
442 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
443 atomic_set(&ha->loop_state, LOOP_DOWN);
444 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
445 ha->device_flags |= DFLG_NO_CABLE;
446 qla2x00_mark_all_devices_lost(ha, 1);
449 if (ha->parent) {
450 atomic_set(&ha->vp_state, VP_FAILED);
451 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
454 ha->flags.management_server_logged_in = 0;
455 ha->link_data_rate = PORT_SPEED_UNKNOWN;
456 qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0);
457 break;
459 case MBA_LIP_RESET: /* LIP reset occurred */
460 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
461 ha->host_no, mb[1]));
462 qla_printk(KERN_INFO, ha,
463 "LIP reset occurred (%x).\n", mb[1]);
465 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
466 atomic_set(&ha->loop_state, LOOP_DOWN);
467 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
468 qla2x00_mark_all_devices_lost(ha, 1);
471 if (ha->parent) {
472 atomic_set(&ha->vp_state, VP_FAILED);
473 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
476 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
478 ha->operating_mode = LOOP;
479 ha->flags.management_server_logged_in = 0;
480 qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]);
481 break;
483 case MBA_POINT_TO_POINT: /* Point-to-Point */
484 if (IS_QLA2100(ha))
485 break;
487 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
488 ha->host_no));
491 * Until there's a transition from loop down to loop up, treat
492 * this as loop down only.
494 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
495 atomic_set(&ha->loop_state, LOOP_DOWN);
496 if (!atomic_read(&ha->loop_down_timer))
497 atomic_set(&ha->loop_down_timer,
498 LOOP_DOWN_TIME);
499 qla2x00_mark_all_devices_lost(ha, 1);
502 if (ha->parent) {
503 atomic_set(&ha->vp_state, VP_FAILED);
504 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
507 if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
508 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
510 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
511 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
513 ha->flags.gpsc_supported = 1;
514 ha->flags.management_server_logged_in = 0;
515 break;
517 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
518 if (IS_QLA2100(ha))
519 break;
521 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
522 "received.\n",
523 ha->host_no));
524 qla_printk(KERN_INFO, ha,
525 "Configuration change detected: value=%x.\n", mb[1]);
527 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
528 atomic_set(&ha->loop_state, LOOP_DOWN);
529 if (!atomic_read(&ha->loop_down_timer))
530 atomic_set(&ha->loop_down_timer,
531 LOOP_DOWN_TIME);
532 qla2x00_mark_all_devices_lost(ha, 1);
535 if (ha->parent) {
536 atomic_set(&ha->vp_state, VP_FAILED);
537 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
540 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
541 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
542 break;
544 case MBA_PORT_UPDATE: /* Port database update */
546 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
547 * event etc. earlier indicating loop is down) then process
548 * it. Otherwise ignore it and Wait for RSCN to come in.
550 atomic_set(&ha->loop_down_timer, 0);
551 if (atomic_read(&ha->loop_state) != LOOP_DOWN &&
552 atomic_read(&ha->loop_state) != LOOP_DEAD) {
553 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
554 "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1],
555 mb[2], mb[3]));
556 break;
559 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
560 ha->host_no));
561 DEBUG(printk(KERN_INFO
562 "scsi(%ld): Port database changed %04x %04x %04x.\n",
563 ha->host_no, mb[1], mb[2], mb[3]));
566 * Mark all devices as missing so we will login again.
568 atomic_set(&ha->loop_state, LOOP_UP);
570 qla2x00_mark_all_devices_lost(ha, 1);
572 ha->flags.rscn_queue_overflow = 1;
574 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
575 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
576 break;
578 case MBA_RSCN_UPDATE: /* State Change Registration */
579 /* Check if the Vport has issued a SCR */
580 if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
581 break;
582 /* Only handle SCNs for our Vport index. */
583 if (ha->parent && ha->vp_idx != (mb[3] & 0xff))
584 break;
586 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
587 ha->host_no));
588 DEBUG(printk(KERN_INFO
589 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
590 ha->host_no, mb[1], mb[2], mb[3]));
592 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
593 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
594 ha->d_id.b.al_pa;
595 if (rscn_entry == host_pid) {
596 DEBUG(printk(KERN_INFO
597 "scsi(%ld): Ignoring RSCN update to local host "
598 "port ID (%06x)\n",
599 ha->host_no, host_pid));
600 break;
603 /* Ignore reserved bits from RSCN-payload. */
604 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
605 rscn_queue_index = ha->rscn_in_ptr + 1;
606 if (rscn_queue_index == MAX_RSCN_COUNT)
607 rscn_queue_index = 0;
608 if (rscn_queue_index != ha->rscn_out_ptr) {
609 ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry;
610 ha->rscn_in_ptr = rscn_queue_index;
611 } else {
612 ha->flags.rscn_queue_overflow = 1;
615 atomic_set(&ha->loop_state, LOOP_UPDATE);
616 atomic_set(&ha->loop_down_timer, 0);
617 ha->flags.management_server_logged_in = 0;
619 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
620 set_bit(RSCN_UPDATE, &ha->dpc_flags);
621 qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry);
622 break;
624 /* case MBA_RIO_RESPONSE: */
625 case MBA_ZIO_RESPONSE:
626 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
627 ha->host_no));
628 DEBUG(printk(KERN_INFO
629 "scsi(%ld): [R|Z]IO update completion.\n",
630 ha->host_no));
632 if (IS_FWI2_CAPABLE(ha))
633 qla24xx_process_response_queue(ha);
634 else
635 qla2x00_process_response_queue(ha);
636 break;
638 case MBA_DISCARD_RND_FRAME:
639 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
640 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
641 break;
643 case MBA_TRACE_NOTIFICATION:
644 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
645 ha->host_no, mb[1], mb[2]));
646 break;
648 case MBA_ISP84XX_ALERT:
649 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
650 "%04x %04x %04x\n", ha->host_no, mb[1], mb[2], mb[3]));
652 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
653 switch (mb[1]) {
654 case A84_PANIC_RECOVERY:
655 qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
656 "%04x %04x\n", mb[2], mb[3]);
657 break;
658 case A84_OP_LOGIN_COMPLETE:
659 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
660 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
661 "firmware version %x\n", ha->cs84xx->op_fw_version));
662 break;
663 case A84_DIAG_LOGIN_COMPLETE:
664 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
665 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
666 "diagnostic firmware version %x\n",
667 ha->cs84xx->diag_fw_version));
668 break;
669 case A84_GOLD_LOGIN_COMPLETE:
670 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
671 ha->cs84xx->fw_update = 1;
672 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
673 "firmware version %x\n",
674 ha->cs84xx->gold_fw_version));
675 break;
676 default:
677 qla_printk(KERN_ERR, ha,
678 "Alert 84xx: Invalid Alert %04x %04x %04x\n",
679 mb[1], mb[2], mb[3]);
681 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
682 break;
685 if (!ha->parent && ha->num_vhosts)
686 qla2x00_alert_all_vps(ha, mb);
689 static void
690 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
692 fc_port_t *fcport = data;
694 if (fcport->ha->max_q_depth <= sdev->queue_depth)
695 return;
697 if (sdev->ordered_tags)
698 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
699 sdev->queue_depth + 1);
700 else
701 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
702 sdev->queue_depth + 1);
704 fcport->last_ramp_up = jiffies;
706 DEBUG2(qla_printk(KERN_INFO, fcport->ha,
707 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
708 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
709 sdev->queue_depth));
712 static void
713 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
715 fc_port_t *fcport = data;
717 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
718 return;
720 DEBUG2(qla_printk(KERN_INFO, fcport->ha,
721 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
722 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
723 sdev->queue_depth));
726 static inline void
727 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
729 fc_port_t *fcport;
730 struct scsi_device *sdev;
732 sdev = sp->cmd->device;
733 if (sdev->queue_depth >= ha->max_q_depth)
734 return;
736 fcport = sp->fcport;
737 if (time_before(jiffies,
738 fcport->last_ramp_up + ql2xqfullrampup * HZ))
739 return;
740 if (time_before(jiffies,
741 fcport->last_queue_full + ql2xqfullrampup * HZ))
742 return;
744 starget_for_each_device(sdev->sdev_target, fcport,
745 qla2x00_adjust_sdev_qdepth_up);
749 * qla2x00_process_completed_request() - Process a Fast Post response.
750 * @ha: SCSI driver HA context
751 * @index: SRB index
753 static void
754 qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
756 srb_t *sp;
758 /* Validate handle. */
759 if (index >= MAX_OUTSTANDING_COMMANDS) {
760 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
761 ha->host_no, index));
762 qla_printk(KERN_WARNING, ha,
763 "Invalid SCSI completion handle %d.\n", index);
765 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
766 return;
769 sp = ha->outstanding_cmds[index];
770 if (sp) {
771 /* Free outstanding command slot. */
772 ha->outstanding_cmds[index] = NULL;
774 CMD_COMPL_STATUS(sp->cmd) = 0L;
775 CMD_SCSI_STATUS(sp->cmd) = 0L;
777 /* Save ISP completion status */
778 sp->cmd->result = DID_OK << 16;
780 qla2x00_ramp_up_queue_depth(ha, sp);
781 qla2x00_sp_compl(ha, sp);
782 } else {
783 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
784 ha->host_no));
785 qla_printk(KERN_WARNING, ha,
786 "Invalid ISP SCSI completion handle\n");
788 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
793 * qla2x00_process_response_queue() - Process response queue entries.
794 * @ha: SCSI driver HA context
796 void
797 qla2x00_process_response_queue(struct scsi_qla_host *ha)
799 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
800 sts_entry_t *pkt;
801 uint16_t handle_cnt;
802 uint16_t cnt;
804 if (!ha->flags.online)
805 return;
807 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
808 pkt = (sts_entry_t *)ha->response_ring_ptr;
810 ha->rsp_ring_index++;
811 if (ha->rsp_ring_index == ha->response_q_length) {
812 ha->rsp_ring_index = 0;
813 ha->response_ring_ptr = ha->response_ring;
814 } else {
815 ha->response_ring_ptr++;
818 if (pkt->entry_status != 0) {
819 DEBUG3(printk(KERN_INFO
820 "scsi(%ld): Process error entry.\n", ha->host_no));
822 qla2x00_error_entry(ha, pkt);
823 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
824 wmb();
825 continue;
828 switch (pkt->entry_type) {
829 case STATUS_TYPE:
830 qla2x00_status_entry(ha, pkt);
831 break;
832 case STATUS_TYPE_21:
833 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
834 for (cnt = 0; cnt < handle_cnt; cnt++) {
835 qla2x00_process_completed_request(ha,
836 ((sts21_entry_t *)pkt)->handle[cnt]);
838 break;
839 case STATUS_TYPE_22:
840 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
841 for (cnt = 0; cnt < handle_cnt; cnt++) {
842 qla2x00_process_completed_request(ha,
843 ((sts22_entry_t *)pkt)->handle[cnt]);
845 break;
846 case STATUS_CONT_TYPE:
847 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
848 break;
849 default:
850 /* Type Not Supported. */
851 DEBUG4(printk(KERN_WARNING
852 "scsi(%ld): Received unknown response pkt type %x "
853 "entry status=%x.\n",
854 ha->host_no, pkt->entry_type, pkt->entry_status));
855 break;
857 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
858 wmb();
861 /* Adjust ring index */
862 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index);
865 static inline void
866 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
868 struct scsi_cmnd *cp = sp->cmd;
870 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
871 sense_len = SCSI_SENSE_BUFFERSIZE;
873 CMD_ACTUAL_SNSLEN(cp) = sense_len;
874 sp->request_sense_length = sense_len;
875 sp->request_sense_ptr = cp->sense_buffer;
876 if (sp->request_sense_length > 32)
877 sense_len = 32;
879 memcpy(cp->sense_buffer, sense_data, sense_len);
881 sp->request_sense_ptr += sense_len;
882 sp->request_sense_length -= sense_len;
883 if (sp->request_sense_length != 0)
884 sp->fcport->ha->status_srb = sp;
886 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
887 "cmd=%p pid=%ld\n", __func__, sp->fcport->ha->host_no,
888 cp->device->channel, cp->device->id, cp->device->lun, cp,
889 cp->serial_number));
890 if (sense_len)
891 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
892 CMD_ACTUAL_SNSLEN(cp)));
896 * qla2x00_status_entry() - Process a Status IOCB entry.
897 * @ha: SCSI driver HA context
898 * @pkt: Entry pointer
900 static void
901 qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
903 srb_t *sp;
904 fc_port_t *fcport;
905 struct scsi_cmnd *cp;
906 sts_entry_t *sts;
907 struct sts_entry_24xx *sts24;
908 uint16_t comp_status;
909 uint16_t scsi_status;
910 uint8_t lscsi_status;
911 int32_t resid;
912 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
913 uint8_t *rsp_info, *sense_data;
915 sts = (sts_entry_t *) pkt;
916 sts24 = (struct sts_entry_24xx *) pkt;
917 if (IS_FWI2_CAPABLE(ha)) {
918 comp_status = le16_to_cpu(sts24->comp_status);
919 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
920 } else {
921 comp_status = le16_to_cpu(sts->comp_status);
922 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
925 /* Fast path completion. */
926 if (comp_status == CS_COMPLETE && scsi_status == 0) {
927 qla2x00_process_completed_request(ha, sts->handle);
929 return;
932 /* Validate handle. */
933 if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
934 sp = ha->outstanding_cmds[sts->handle];
935 ha->outstanding_cmds[sts->handle] = NULL;
936 } else
937 sp = NULL;
939 if (sp == NULL) {
940 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
941 ha->host_no));
942 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
944 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
945 qla2xxx_wake_dpc(ha);
946 return;
948 cp = sp->cmd;
949 if (cp == NULL) {
950 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
951 "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp));
952 qla_printk(KERN_WARNING, ha,
953 "Command is NULL: already returned to OS (sp=%p)\n", sp);
955 return;
958 lscsi_status = scsi_status & STATUS_MASK;
959 CMD_ENTRY_STATUS(cp) = sts->entry_status;
960 CMD_COMPL_STATUS(cp) = comp_status;
961 CMD_SCSI_STATUS(cp) = scsi_status;
963 fcport = sp->fcport;
965 sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
966 if (IS_FWI2_CAPABLE(ha)) {
967 sense_len = le32_to_cpu(sts24->sense_len);
968 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
969 resid_len = le32_to_cpu(sts24->rsp_residual_count);
970 fw_resid_len = le32_to_cpu(sts24->residual_len);
971 rsp_info = sts24->data;
972 sense_data = sts24->data;
973 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
974 } else {
975 sense_len = le16_to_cpu(sts->req_sense_length);
976 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
977 resid_len = le32_to_cpu(sts->residual_length);
978 rsp_info = sts->rsp_info;
979 sense_data = sts->req_sense_data;
982 /* Check for any FCP transport errors. */
983 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
984 /* Sense data lies beyond any FCP RESPONSE data. */
985 if (IS_FWI2_CAPABLE(ha))
986 sense_data += rsp_info_len;
987 if (rsp_info_len > 3 && rsp_info[3]) {
988 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
989 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
990 "retrying command\n", ha->host_no,
991 cp->device->channel, cp->device->id,
992 cp->device->lun, rsp_info_len, rsp_info[0],
993 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
994 rsp_info[5], rsp_info[6], rsp_info[7]));
996 cp->result = DID_BUS_BUSY << 16;
997 qla2x00_sp_compl(ha, sp);
998 return;
1002 /* Check for overrun. */
1003 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1004 scsi_status & SS_RESIDUAL_OVER)
1005 comp_status = CS_DATA_OVERRUN;
1008 * Based on Host and scsi status generate status code for Linux
1010 switch (comp_status) {
1011 case CS_COMPLETE:
1012 case CS_QUEUE_FULL:
1013 if (scsi_status == 0) {
1014 cp->result = DID_OK << 16;
1015 break;
1017 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1018 resid = resid_len;
1019 scsi_set_resid(cp, resid);
1020 CMD_RESID_LEN(cp) = resid;
1022 if (!lscsi_status &&
1023 ((unsigned)(scsi_bufflen(cp) - resid) <
1024 cp->underflow)) {
1025 qla_printk(KERN_INFO, ha,
1026 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1027 "detected (%x of %x bytes)...returning "
1028 "error status.\n", ha->host_no,
1029 cp->device->channel, cp->device->id,
1030 cp->device->lun, resid,
1031 scsi_bufflen(cp));
1033 cp->result = DID_ERROR << 16;
1034 break;
1037 cp->result = DID_OK << 16 | lscsi_status;
1039 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1040 DEBUG2(printk(KERN_INFO
1041 "scsi(%ld): QUEUE FULL status detected "
1042 "0x%x-0x%x.\n", ha->host_no, comp_status,
1043 scsi_status));
1045 /* Adjust queue depth for all luns on the port. */
1046 fcport->last_queue_full = jiffies;
1047 starget_for_each_device(cp->device->sdev_target,
1048 fcport, qla2x00_adjust_sdev_qdepth_down);
1049 break;
1051 if (lscsi_status != SS_CHECK_CONDITION)
1052 break;
1054 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1055 if (!(scsi_status & SS_SENSE_LEN_VALID))
1056 break;
1058 qla2x00_handle_sense(sp, sense_data, sense_len);
1059 break;
1061 case CS_DATA_UNDERRUN:
1062 resid = resid_len;
1063 /* Use F/W calculated residual length. */
1064 if (IS_FWI2_CAPABLE(ha)) {
1065 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1066 lscsi_status = 0;
1067 } else if (resid != fw_resid_len) {
1068 scsi_status &= ~SS_RESIDUAL_UNDER;
1069 lscsi_status = 0;
1071 resid = fw_resid_len;
1074 if (scsi_status & SS_RESIDUAL_UNDER) {
1075 scsi_set_resid(cp, resid);
1076 CMD_RESID_LEN(cp) = resid;
1077 } else {
1078 DEBUG2(printk(KERN_INFO
1079 "scsi(%ld:%d:%d) UNDERRUN status detected "
1080 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1081 "os_underflow=0x%x\n", ha->host_no,
1082 cp->device->id, cp->device->lun, comp_status,
1083 scsi_status, resid_len, resid, cp->cmnd[0],
1084 cp->underflow));
1089 * Check to see if SCSI Status is non zero. If so report SCSI
1090 * Status.
1092 if (lscsi_status != 0) {
1093 cp->result = DID_OK << 16 | lscsi_status;
1095 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1096 DEBUG2(printk(KERN_INFO
1097 "scsi(%ld): QUEUE FULL status detected "
1098 "0x%x-0x%x.\n", ha->host_no, comp_status,
1099 scsi_status));
1102 * Adjust queue depth for all luns on the
1103 * port.
1105 fcport->last_queue_full = jiffies;
1106 starget_for_each_device(
1107 cp->device->sdev_target, fcport,
1108 qla2x00_adjust_sdev_qdepth_down);
1109 break;
1111 if (lscsi_status != SS_CHECK_CONDITION)
1112 break;
1114 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1115 if (!(scsi_status & SS_SENSE_LEN_VALID))
1116 break;
1118 qla2x00_handle_sense(sp, sense_data, sense_len);
1119 } else {
1121 * If RISC reports underrun and target does not report
1122 * it then we must have a lost frame, so tell upper
1123 * layer to retry it by reporting a bus busy.
1125 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1126 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1127 "frame(s) detected (%x of %x bytes)..."
1128 "retrying command.\n", ha->host_no,
1129 cp->device->channel, cp->device->id,
1130 cp->device->lun, resid,
1131 scsi_bufflen(cp)));
1133 cp->result = DID_BUS_BUSY << 16;
1134 break;
1137 /* Handle mid-layer underflow */
1138 if ((unsigned)(scsi_bufflen(cp) - resid) <
1139 cp->underflow) {
1140 qla_printk(KERN_INFO, ha,
1141 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1142 "detected (%x of %x bytes)...returning "
1143 "error status.\n", ha->host_no,
1144 cp->device->channel, cp->device->id,
1145 cp->device->lun, resid,
1146 scsi_bufflen(cp));
1148 cp->result = DID_ERROR << 16;
1149 break;
1152 /* Everybody online, looking good... */
1153 cp->result = DID_OK << 16;
1155 break;
1157 case CS_DATA_OVERRUN:
1158 DEBUG2(printk(KERN_INFO
1159 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1160 ha->host_no, cp->device->id, cp->device->lun, comp_status,
1161 scsi_status));
1162 DEBUG2(printk(KERN_INFO
1163 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1164 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1165 cp->cmnd[4], cp->cmnd[5]));
1166 DEBUG2(printk(KERN_INFO
1167 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1168 "status!\n",
1169 cp->serial_number, scsi_bufflen(cp), resid_len));
1171 cp->result = DID_ERROR << 16;
1172 break;
1174 case CS_PORT_LOGGED_OUT:
1175 case CS_PORT_CONFIG_CHG:
1176 case CS_PORT_BUSY:
1177 case CS_INCOMPLETE:
1178 case CS_PORT_UNAVAILABLE:
1180 * If the port is in Target Down state, return all IOs for this
1181 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1182 * retry_queue.
1184 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1185 "pid=%ld, compl status=0x%x, port state=0x%x\n",
1186 ha->host_no, cp->device->id, cp->device->lun,
1187 cp->serial_number, comp_status,
1188 atomic_read(&fcport->state)));
1191 * We are going to have the fc class block the rport
1192 * while we try to recover so instruct the mid layer
1193 * to requeue until the class decides how to handle this.
1195 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1196 if (atomic_read(&fcport->state) == FCS_ONLINE)
1197 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
1198 break;
1200 case CS_RESET:
1201 DEBUG2(printk(KERN_INFO
1202 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1203 ha->host_no, comp_status, scsi_status));
1205 cp->result = DID_RESET << 16;
1206 break;
1208 case CS_ABORTED:
1210 * hv2.19.12 - DID_ABORT does not retry the request if we
1211 * aborted this request then abort otherwise it must be a
1212 * reset.
1214 DEBUG2(printk(KERN_INFO
1215 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1216 ha->host_no, comp_status, scsi_status));
1218 cp->result = DID_RESET << 16;
1219 break;
1221 case CS_TIMEOUT:
1223 * We are going to have the fc class block the rport
1224 * while we try to recover so instruct the mid layer
1225 * to requeue until the class decides how to handle this.
1227 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1229 if (IS_FWI2_CAPABLE(ha)) {
1230 DEBUG2(printk(KERN_INFO
1231 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1232 "0x%x-0x%x\n", ha->host_no, cp->device->channel,
1233 cp->device->id, cp->device->lun, comp_status,
1234 scsi_status));
1235 break;
1237 DEBUG2(printk(KERN_INFO
1238 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1239 "sflags=%x.\n", ha->host_no, cp->device->channel,
1240 cp->device->id, cp->device->lun, comp_status, scsi_status,
1241 le16_to_cpu(sts->status_flags)));
1243 /* Check to see if logout occurred. */
1244 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1245 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
1246 break;
1248 default:
1249 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1250 "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status));
1251 qla_printk(KERN_INFO, ha,
1252 "Unknown status detected 0x%x-0x%x.\n",
1253 comp_status, scsi_status);
1255 cp->result = DID_ERROR << 16;
1256 break;
1259 /* Place command on done queue. */
1260 if (ha->status_srb == NULL)
1261 qla2x00_sp_compl(ha, sp);
1265 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1266 * @ha: SCSI driver HA context
1267 * @pkt: Entry pointer
1269 * Extended sense data.
1271 static void
1272 qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1274 uint8_t sense_sz = 0;
1275 srb_t *sp = ha->status_srb;
1276 struct scsi_cmnd *cp;
1278 if (sp != NULL && sp->request_sense_length != 0) {
1279 cp = sp->cmd;
1280 if (cp == NULL) {
1281 DEBUG2(printk("%s(): Cmd already returned back to OS "
1282 "sp=%p.\n", __func__, sp));
1283 qla_printk(KERN_INFO, ha,
1284 "cmd is NULL: already returned to OS (sp=%p)\n",
1285 sp);
1287 ha->status_srb = NULL;
1288 return;
1291 if (sp->request_sense_length > sizeof(pkt->data)) {
1292 sense_sz = sizeof(pkt->data);
1293 } else {
1294 sense_sz = sp->request_sense_length;
1297 /* Move sense data. */
1298 if (IS_FWI2_CAPABLE(ha))
1299 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1300 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1301 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1303 sp->request_sense_ptr += sense_sz;
1304 sp->request_sense_length -= sense_sz;
1306 /* Place command on done queue. */
1307 if (sp->request_sense_length == 0) {
1308 ha->status_srb = NULL;
1309 qla2x00_sp_compl(ha, sp);
1315 * qla2x00_error_entry() - Process an error entry.
1316 * @ha: SCSI driver HA context
1317 * @pkt: Entry pointer
1319 static void
1320 qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1322 srb_t *sp;
1324 #if defined(QL_DEBUG_LEVEL_2)
1325 if (pkt->entry_status & RF_INV_E_ORDER)
1326 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1327 else if (pkt->entry_status & RF_INV_E_COUNT)
1328 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1329 else if (pkt->entry_status & RF_INV_E_PARAM)
1330 qla_printk(KERN_ERR, ha,
1331 "%s: Invalid Entry Parameter\n", __func__);
1332 else if (pkt->entry_status & RF_INV_E_TYPE)
1333 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1334 else if (pkt->entry_status & RF_BUSY)
1335 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1336 else
1337 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1338 #endif
1340 /* Validate handle. */
1341 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1342 sp = ha->outstanding_cmds[pkt->handle];
1343 else
1344 sp = NULL;
1346 if (sp) {
1347 /* Free outstanding command slot. */
1348 ha->outstanding_cmds[pkt->handle] = NULL;
1350 /* Bad payload or header */
1351 if (pkt->entry_status &
1352 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1353 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1354 sp->cmd->result = DID_ERROR << 16;
1355 } else if (pkt->entry_status & RF_BUSY) {
1356 sp->cmd->result = DID_BUS_BUSY << 16;
1357 } else {
1358 sp->cmd->result = DID_ERROR << 16;
1360 qla2x00_sp_compl(ha, sp);
1362 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1363 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1364 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1365 ha->host_no));
1366 qla_printk(KERN_WARNING, ha,
1367 "Error entry - invalid handle\n");
1369 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1370 qla2xxx_wake_dpc(ha);
1375 * qla24xx_mbx_completion() - Process mailbox command completions.
1376 * @ha: SCSI driver HA context
1377 * @mb0: Mailbox0 register
1379 static void
1380 qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1382 uint16_t cnt;
1383 uint16_t __iomem *wptr;
1384 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1386 /* Load return mailbox registers. */
1387 ha->flags.mbox_int = 1;
1388 ha->mailbox_out[0] = mb0;
1389 wptr = (uint16_t __iomem *)&reg->mailbox1;
1391 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1392 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1393 wptr++;
1396 if (ha->mcp) {
1397 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1398 __func__, ha->host_no, ha->mcp->mb[0]));
1399 } else {
1400 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1401 __func__, ha->host_no));
1406 * qla24xx_process_response_queue() - Process response queue entries.
1407 * @ha: SCSI driver HA context
1409 void
1410 qla24xx_process_response_queue(struct scsi_qla_host *ha)
1412 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1413 struct sts_entry_24xx *pkt;
1415 if (!ha->flags.online)
1416 return;
1418 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
1419 pkt = (struct sts_entry_24xx *)ha->response_ring_ptr;
1421 ha->rsp_ring_index++;
1422 if (ha->rsp_ring_index == ha->response_q_length) {
1423 ha->rsp_ring_index = 0;
1424 ha->response_ring_ptr = ha->response_ring;
1425 } else {
1426 ha->response_ring_ptr++;
1429 if (pkt->entry_status != 0) {
1430 DEBUG3(printk(KERN_INFO
1431 "scsi(%ld): Process error entry.\n", ha->host_no));
1433 qla2x00_error_entry(ha, (sts_entry_t *) pkt);
1434 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1435 wmb();
1436 continue;
1439 switch (pkt->entry_type) {
1440 case STATUS_TYPE:
1441 qla2x00_status_entry(ha, pkt);
1442 break;
1443 case STATUS_CONT_TYPE:
1444 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
1445 break;
1446 case VP_RPT_ID_IOCB_TYPE:
1447 qla24xx_report_id_acquisition(ha,
1448 (struct vp_rpt_id_entry_24xx *)pkt);
1449 break;
1450 default:
1451 /* Type Not Supported. */
1452 DEBUG4(printk(KERN_WARNING
1453 "scsi(%ld): Received unknown response pkt type %x "
1454 "entry status=%x.\n",
1455 ha->host_no, pkt->entry_type, pkt->entry_status));
1456 break;
1458 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1459 wmb();
1462 /* Adjust ring index */
1463 WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index);
1466 static void
1467 qla2xxx_check_risc_status(scsi_qla_host_t *ha)
1469 int rval;
1470 uint32_t cnt;
1471 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1473 if (!IS_QLA25XX(ha))
1474 return;
1476 rval = QLA_SUCCESS;
1477 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1478 RD_REG_DWORD(&reg->iobase_addr);
1479 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1480 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1481 rval == QLA_SUCCESS; cnt--) {
1482 if (cnt) {
1483 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1484 udelay(10);
1485 } else
1486 rval = QLA_FUNCTION_TIMEOUT;
1488 if (rval == QLA_SUCCESS)
1489 goto next_test;
1491 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1492 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1493 rval == QLA_SUCCESS; cnt--) {
1494 if (cnt) {
1495 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1496 udelay(10);
1497 } else
1498 rval = QLA_FUNCTION_TIMEOUT;
1500 if (rval != QLA_SUCCESS)
1501 goto done;
1503 next_test:
1504 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1505 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1507 done:
1508 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1509 RD_REG_DWORD(&reg->iobase_window);
1513 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1514 * @irq:
1515 * @dev_id: SCSI driver HA context
1517 * Called by system whenever the host adapter generates an interrupt.
1519 * Returns handled flag.
1521 irqreturn_t
1522 qla24xx_intr_handler(int irq, void *dev_id)
1524 scsi_qla_host_t *ha;
1525 struct device_reg_24xx __iomem *reg;
1526 int status;
1527 unsigned long iter;
1528 uint32_t stat;
1529 uint32_t hccr;
1530 uint16_t mb[4];
1532 ha = (scsi_qla_host_t *) dev_id;
1533 if (!ha) {
1534 printk(KERN_INFO
1535 "%s(): NULL host pointer\n", __func__);
1536 return IRQ_NONE;
1539 reg = &ha->iobase->isp24;
1540 status = 0;
1542 spin_lock(&ha->hardware_lock);
1543 for (iter = 50; iter--; ) {
1544 stat = RD_REG_DWORD(&reg->host_status);
1545 if (stat & HSRX_RISC_PAUSED) {
1546 if (pci_channel_offline(ha->pdev))
1547 break;
1549 if (ha->hw_event_pause_errors == 0)
1550 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
1551 0, MSW(stat), LSW(stat));
1552 else if (ha->hw_event_pause_errors < 0xffffffff)
1553 ha->hw_event_pause_errors++;
1555 hccr = RD_REG_DWORD(&reg->hccr);
1557 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1558 "Dumping firmware!\n", hccr);
1560 qla2xxx_check_risc_status(ha);
1562 ha->isp_ops->fw_dump(ha, 1);
1563 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1564 break;
1565 } else if ((stat & HSRX_RISC_INT) == 0)
1566 break;
1568 switch (stat & 0xff) {
1569 case 0x1:
1570 case 0x2:
1571 case 0x10:
1572 case 0x11:
1573 qla24xx_mbx_completion(ha, MSW(stat));
1574 status |= MBX_INTERRUPT;
1576 break;
1577 case 0x12:
1578 mb[0] = MSW(stat);
1579 mb[1] = RD_REG_WORD(&reg->mailbox1);
1580 mb[2] = RD_REG_WORD(&reg->mailbox2);
1581 mb[3] = RD_REG_WORD(&reg->mailbox3);
1582 qla2x00_async_event(ha, mb);
1583 break;
1584 case 0x13:
1585 qla24xx_process_response_queue(ha);
1586 break;
1587 default:
1588 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1589 "(%d).\n",
1590 ha->host_no, stat & 0xff));
1591 break;
1593 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1594 RD_REG_DWORD_RELAXED(&reg->hccr);
1596 spin_unlock(&ha->hardware_lock);
1598 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1599 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1600 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1601 complete(&ha->mbx_intr_comp);
1604 return IRQ_HANDLED;
1607 static irqreturn_t
1608 qla24xx_msix_rsp_q(int irq, void *dev_id)
1610 scsi_qla_host_t *ha;
1611 struct device_reg_24xx __iomem *reg;
1613 ha = dev_id;
1614 reg = &ha->iobase->isp24;
1616 spin_lock_irq(&ha->hardware_lock);
1618 qla24xx_process_response_queue(ha);
1619 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1621 spin_unlock_irq(&ha->hardware_lock);
1623 return IRQ_HANDLED;
1626 static irqreturn_t
1627 qla24xx_msix_default(int irq, void *dev_id)
1629 scsi_qla_host_t *ha;
1630 struct device_reg_24xx __iomem *reg;
1631 int status;
1632 uint32_t stat;
1633 uint32_t hccr;
1634 uint16_t mb[4];
1636 ha = dev_id;
1637 reg = &ha->iobase->isp24;
1638 status = 0;
1640 spin_lock_irq(&ha->hardware_lock);
1641 do {
1642 stat = RD_REG_DWORD(&reg->host_status);
1643 if (stat & HSRX_RISC_PAUSED) {
1644 if (pci_channel_offline(ha->pdev))
1645 break;
1647 if (ha->hw_event_pause_errors == 0)
1648 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
1649 0, MSW(stat), LSW(stat));
1650 else if (ha->hw_event_pause_errors < 0xffffffff)
1651 ha->hw_event_pause_errors++;
1653 hccr = RD_REG_DWORD(&reg->hccr);
1655 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1656 "Dumping firmware!\n", hccr);
1658 qla2xxx_check_risc_status(ha);
1660 ha->isp_ops->fw_dump(ha, 1);
1661 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1662 break;
1663 } else if ((stat & HSRX_RISC_INT) == 0)
1664 break;
1666 switch (stat & 0xff) {
1667 case 0x1:
1668 case 0x2:
1669 case 0x10:
1670 case 0x11:
1671 qla24xx_mbx_completion(ha, MSW(stat));
1672 status |= MBX_INTERRUPT;
1674 break;
1675 case 0x12:
1676 mb[0] = MSW(stat);
1677 mb[1] = RD_REG_WORD(&reg->mailbox1);
1678 mb[2] = RD_REG_WORD(&reg->mailbox2);
1679 mb[3] = RD_REG_WORD(&reg->mailbox3);
1680 qla2x00_async_event(ha, mb);
1681 break;
1682 case 0x13:
1683 qla24xx_process_response_queue(ha);
1684 break;
1685 default:
1686 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1687 "(%d).\n",
1688 ha->host_no, stat & 0xff));
1689 break;
1691 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1692 } while (0);
1693 spin_unlock_irq(&ha->hardware_lock);
1695 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1696 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1697 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1698 complete(&ha->mbx_intr_comp);
1701 return IRQ_HANDLED;
1704 /* Interrupt handling helpers. */
1706 struct qla_init_msix_entry {
1707 uint16_t entry;
1708 uint16_t index;
1709 const char *name;
1710 irq_handler_t handler;
1713 static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
1714 { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
1715 "qla2xxx (default)", qla24xx_msix_default },
1717 { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
1718 "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1721 static void
1722 qla24xx_disable_msix(scsi_qla_host_t *ha)
1724 int i;
1725 struct qla_msix_entry *qentry;
1727 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1728 qentry = &ha->msix_entries[imsix_entries[i].index];
1729 if (qentry->have_irq)
1730 free_irq(qentry->msix_vector, ha);
1732 pci_disable_msix(ha->pdev);
1735 static int
1736 qla24xx_enable_msix(scsi_qla_host_t *ha)
1738 int i, ret;
1739 struct msix_entry entries[QLA_MSIX_ENTRIES];
1740 struct qla_msix_entry *qentry;
1742 for (i = 0; i < QLA_MSIX_ENTRIES; i++)
1743 entries[i].entry = imsix_entries[i].entry;
1745 ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries));
1746 if (ret) {
1747 qla_printk(KERN_WARNING, ha,
1748 "MSI-X: Failed to enable support -- %d/%d\n",
1749 QLA_MSIX_ENTRIES, ret);
1750 goto msix_out;
1752 ha->flags.msix_enabled = 1;
1754 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1755 qentry = &ha->msix_entries[imsix_entries[i].index];
1756 qentry->msix_vector = entries[i].vector;
1757 qentry->msix_entry = entries[i].entry;
1758 qentry->have_irq = 0;
1759 ret = request_irq(qentry->msix_vector,
1760 imsix_entries[i].handler, 0, imsix_entries[i].name, ha);
1761 if (ret) {
1762 qla_printk(KERN_WARNING, ha,
1763 "MSI-X: Unable to register handler -- %x/%d.\n",
1764 imsix_entries[i].index, ret);
1765 qla24xx_disable_msix(ha);
1766 goto msix_out;
1768 qentry->have_irq = 1;
1771 msix_out:
1772 return ret;
1776 qla2x00_request_irqs(scsi_qla_host_t *ha)
1778 int ret;
1779 device_reg_t __iomem *reg = ha->iobase;
1781 /* If possible, enable MSI-X. */
1782 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1783 goto skip_msix;
1785 if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
1786 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1787 DEBUG2(qla_printk(KERN_WARNING, ha,
1788 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1789 ha->chip_revision, ha->fw_attributes));
1791 goto skip_msix;
1794 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1795 (ha->pdev->subsystem_device == 0x7040 ||
1796 ha->pdev->subsystem_device == 0x7041 ||
1797 ha->pdev->subsystem_device == 0x1705)) {
1798 DEBUG2(qla_printk(KERN_WARNING, ha,
1799 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1800 ha->pdev->subsystem_vendor,
1801 ha->pdev->subsystem_device));
1803 goto skip_msi;
1806 ret = qla24xx_enable_msix(ha);
1807 if (!ret) {
1808 DEBUG2(qla_printk(KERN_INFO, ha,
1809 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1810 ha->fw_attributes));
1811 goto clear_risc_ints;
1813 qla_printk(KERN_WARNING, ha,
1814 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1815 skip_msix:
1817 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1818 goto skip_msi;
1820 ret = pci_enable_msi(ha->pdev);
1821 if (!ret) {
1822 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1823 ha->flags.msi_enabled = 1;
1825 skip_msi:
1827 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1828 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1829 if (ret) {
1830 qla_printk(KERN_WARNING, ha,
1831 "Failed to reserve interrupt %d already in use.\n",
1832 ha->pdev->irq);
1833 goto fail;
1835 ha->flags.inta_enabled = 1;
1836 ha->host->irq = ha->pdev->irq;
1837 clear_risc_ints:
1839 ha->isp_ops->disable_intrs(ha);
1840 spin_lock_irq(&ha->hardware_lock);
1841 if (IS_FWI2_CAPABLE(ha)) {
1842 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
1843 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
1844 } else {
1845 WRT_REG_WORD(&reg->isp.semaphore, 0);
1846 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
1847 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
1849 spin_unlock_irq(&ha->hardware_lock);
1851 fail:
1852 return ret;
1855 void
1856 qla2x00_free_irqs(scsi_qla_host_t *ha)
1859 if (ha->flags.msix_enabled)
1860 qla24xx_disable_msix(ha);
1861 else if (ha->flags.inta_enabled) {
1862 free_irq(ha->host->irq, ha);
1863 pci_disable_msi(ha->pdev);