[SCSI] qla2xxx: Add ISP25XX support.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / qla2xxx / qla_isr.c
blobb8f226ae26334290eb9698bd3524eae2eba683a4
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2005 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <scsi/scsi_tcq.h>
11 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
12 static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
13 static void qla2x00_status_entry(scsi_qla_host_t *, void *);
14 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
15 static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
16 static void qla2x00_ms_entry(scsi_qla_host_t *, ms_iocb_entry_t *);
18 static void qla24xx_ms_entry(scsi_qla_host_t *, struct ct_entry_24xx *);
20 /**
21 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
22 * @irq:
23 * @dev_id: SCSI driver HA context
25 * Called by system whenever the host adapter generates an interrupt.
27 * Returns handled flag.
29 irqreturn_t
30 qla2100_intr_handler(int irq, void *dev_id)
32 scsi_qla_host_t *ha;
33 struct device_reg_2xxx __iomem *reg;
34 int status;
35 unsigned long flags;
36 unsigned long iter;
37 uint16_t mb[4];
39 ha = (scsi_qla_host_t *) dev_id;
40 if (!ha) {
41 printk(KERN_INFO
42 "%s(): NULL host pointer\n", __func__);
43 return (IRQ_NONE);
46 reg = &ha->iobase->isp;
47 status = 0;
49 spin_lock_irqsave(&ha->hardware_lock, flags);
50 for (iter = 50; iter--; ) {
51 if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
52 break;
54 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
55 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
56 RD_REG_WORD(&reg->hccr);
58 /* Get mailbox data. */
59 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
60 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
61 qla2x00_mbx_completion(ha, mb[0]);
62 status |= MBX_INTERRUPT;
63 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
64 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
65 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
66 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
67 qla2x00_async_event(ha, mb);
68 } else {
69 /*EMPTY*/
70 DEBUG2(printk("scsi(%ld): Unrecognized "
71 "interrupt type (%d).\n",
72 ha->host_no, mb[0]));
74 /* Release mailbox registers. */
75 WRT_REG_WORD(&reg->semaphore, 0);
76 RD_REG_WORD(&reg->semaphore);
77 } else {
78 qla2x00_process_response_queue(ha);
80 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
81 RD_REG_WORD(&reg->hccr);
84 spin_unlock_irqrestore(&ha->hardware_lock, flags);
86 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
87 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
88 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
89 up(&ha->mbx_intr_sem);
92 return (IRQ_HANDLED);
95 /**
96 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
97 * @irq:
98 * @dev_id: SCSI driver HA context
100 * Called by system whenever the host adapter generates an interrupt.
102 * Returns handled flag.
104 irqreturn_t
105 qla2300_intr_handler(int irq, void *dev_id)
107 scsi_qla_host_t *ha;
108 struct device_reg_2xxx __iomem *reg;
109 int status;
110 unsigned long flags;
111 unsigned long iter;
112 uint32_t stat;
113 uint16_t hccr;
114 uint16_t mb[4];
116 ha = (scsi_qla_host_t *) dev_id;
117 if (!ha) {
118 printk(KERN_INFO
119 "%s(): NULL host pointer\n", __func__);
120 return (IRQ_NONE);
123 reg = &ha->iobase->isp;
124 status = 0;
126 spin_lock_irqsave(&ha->hardware_lock, flags);
127 for (iter = 50; iter--; ) {
128 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
129 if (stat & HSR_RISC_PAUSED) {
130 hccr = RD_REG_WORD(&reg->hccr);
131 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
132 qla_printk(KERN_INFO, ha, "Parity error -- "
133 "HCCR=%x, Dumping firmware!\n", hccr);
134 else
135 qla_printk(KERN_INFO, ha, "RISC paused -- "
136 "HCCR=%x, Dumping firmware!\n", hccr);
139 * Issue a "HARD" reset in order for the RISC
140 * interrupt bit to be cleared. Schedule a big
141 * hammmer to get out of the RISC PAUSED state.
143 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
144 RD_REG_WORD(&reg->hccr);
146 ha->isp_ops->fw_dump(ha, 1);
147 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
148 break;
149 } else if ((stat & HSR_RISC_INT) == 0)
150 break;
152 switch (stat & 0xff) {
153 case 0x1:
154 case 0x2:
155 case 0x10:
156 case 0x11:
157 qla2x00_mbx_completion(ha, MSW(stat));
158 status |= MBX_INTERRUPT;
160 /* Release mailbox registers. */
161 WRT_REG_WORD(&reg->semaphore, 0);
162 break;
163 case 0x12:
164 mb[0] = MSW(stat);
165 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
166 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
167 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
168 qla2x00_async_event(ha, mb);
169 break;
170 case 0x13:
171 qla2x00_process_response_queue(ha);
172 break;
173 case 0x15:
174 mb[0] = MBA_CMPLT_1_16BIT;
175 mb[1] = MSW(stat);
176 qla2x00_async_event(ha, mb);
177 break;
178 case 0x16:
179 mb[0] = MBA_SCSI_COMPLETION;
180 mb[1] = MSW(stat);
181 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
182 qla2x00_async_event(ha, mb);
183 break;
184 default:
185 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
186 "(%d).\n",
187 ha->host_no, stat & 0xff));
188 break;
190 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
191 RD_REG_WORD_RELAXED(&reg->hccr);
193 spin_unlock_irqrestore(&ha->hardware_lock, flags);
195 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
196 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
197 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
198 up(&ha->mbx_intr_sem);
201 return (IRQ_HANDLED);
205 * qla2x00_mbx_completion() - Process mailbox command completions.
206 * @ha: SCSI driver HA context
207 * @mb0: Mailbox0 register
209 static void
210 qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
212 uint16_t cnt;
213 uint16_t __iomem *wptr;
214 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
216 /* Load return mailbox registers. */
217 ha->flags.mbox_int = 1;
218 ha->mailbox_out[0] = mb0;
219 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
221 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
222 if (IS_QLA2200(ha) && cnt == 8)
223 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
224 if (cnt == 4 || cnt == 5)
225 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
226 else
227 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
229 wptr++;
232 if (ha->mcp) {
233 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
234 __func__, ha->host_no, ha->mcp->mb[0]));
235 } else {
236 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
237 __func__, ha->host_no));
242 * qla2x00_async_event() - Process aynchronous events.
243 * @ha: SCSI driver HA context
244 * @mb: Mailbox registers (0 - 3)
246 void
247 qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
249 #define LS_UNKNOWN 2
250 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
251 char *link_speed;
252 uint16_t handle_cnt;
253 uint16_t cnt;
254 uint32_t handles[5];
255 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
256 uint32_t rscn_entry, host_pid;
257 uint8_t rscn_queue_index;
259 /* Setup to process RIO completion. */
260 handle_cnt = 0;
261 switch (mb[0]) {
262 case MBA_SCSI_COMPLETION:
263 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
264 handle_cnt = 1;
265 break;
266 case MBA_CMPLT_1_16BIT:
267 handles[0] = mb[1];
268 handle_cnt = 1;
269 mb[0] = MBA_SCSI_COMPLETION;
270 break;
271 case MBA_CMPLT_2_16BIT:
272 handles[0] = mb[1];
273 handles[1] = mb[2];
274 handle_cnt = 2;
275 mb[0] = MBA_SCSI_COMPLETION;
276 break;
277 case MBA_CMPLT_3_16BIT:
278 handles[0] = mb[1];
279 handles[1] = mb[2];
280 handles[2] = mb[3];
281 handle_cnt = 3;
282 mb[0] = MBA_SCSI_COMPLETION;
283 break;
284 case MBA_CMPLT_4_16BIT:
285 handles[0] = mb[1];
286 handles[1] = mb[2];
287 handles[2] = mb[3];
288 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
289 handle_cnt = 4;
290 mb[0] = MBA_SCSI_COMPLETION;
291 break;
292 case MBA_CMPLT_5_16BIT:
293 handles[0] = mb[1];
294 handles[1] = mb[2];
295 handles[2] = mb[3];
296 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
297 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
298 handle_cnt = 5;
299 mb[0] = MBA_SCSI_COMPLETION;
300 break;
301 case MBA_CMPLT_2_32BIT:
302 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
303 handles[1] = le32_to_cpu(
304 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
305 RD_MAILBOX_REG(ha, reg, 6));
306 handle_cnt = 2;
307 mb[0] = MBA_SCSI_COMPLETION;
308 break;
309 default:
310 break;
313 switch (mb[0]) {
314 case MBA_SCSI_COMPLETION: /* Fast Post */
315 if (!ha->flags.online)
316 break;
318 for (cnt = 0; cnt < handle_cnt; cnt++)
319 qla2x00_process_completed_request(ha, handles[cnt]);
320 break;
322 case MBA_RESET: /* Reset */
323 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no));
325 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
326 break;
328 case MBA_SYSTEM_ERR: /* System Error */
329 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
330 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
331 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
333 qla_printk(KERN_INFO, ha,
334 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
335 mb[1], mb[2], mb[3]);
337 ha->isp_ops->fw_dump(ha, 1);
339 if (IS_FWI2_CAPABLE(ha)) {
340 if (mb[1] == 0 && mb[2] == 0) {
341 qla_printk(KERN_ERR, ha,
342 "Unrecoverable Hardware Error: adapter "
343 "marked OFFLINE!\n");
344 ha->flags.online = 0;
345 } else
346 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
347 } else if (mb[1] == 0) {
348 qla_printk(KERN_INFO, ha,
349 "Unrecoverable Hardware Error: adapter marked "
350 "OFFLINE!\n");
351 ha->flags.online = 0;
352 } else
353 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
354 break;
356 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
357 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
358 ha->host_no));
359 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
361 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
362 break;
364 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
365 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
366 ha->host_no));
367 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
369 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
370 break;
372 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
373 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
374 ha->host_no));
375 break;
377 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
378 DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no,
379 mb[1]));
380 qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]);
382 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
383 atomic_set(&ha->loop_state, LOOP_DOWN);
384 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
385 qla2x00_mark_all_devices_lost(ha, 1);
388 if (ha->parent) {
389 atomic_set(&ha->vp_state, VP_FAILED);
390 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
393 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
395 ha->flags.management_server_logged_in = 0;
396 break;
398 case MBA_LOOP_UP: /* Loop Up Event */
399 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
400 link_speed = link_speeds[0];
401 ha->link_data_rate = PORT_SPEED_1GB;
402 } else {
403 link_speed = link_speeds[LS_UNKNOWN];
404 if (mb[1] < 5)
405 link_speed = link_speeds[mb[1]];
406 ha->link_data_rate = mb[1];
409 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
410 ha->host_no, link_speed));
411 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
412 link_speed);
414 ha->flags.management_server_logged_in = 0;
415 break;
417 case MBA_LOOP_DOWN: /* Loop Down Event */
418 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN (%x).\n",
419 ha->host_no, mb[1]));
420 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x).\n", mb[1]);
422 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
423 atomic_set(&ha->loop_state, LOOP_DOWN);
424 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
425 ha->device_flags |= DFLG_NO_CABLE;
426 qla2x00_mark_all_devices_lost(ha, 1);
429 if (ha->parent) {
430 atomic_set(&ha->vp_state, VP_FAILED);
431 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
434 ha->flags.management_server_logged_in = 0;
435 ha->link_data_rate = PORT_SPEED_UNKNOWN;
436 if (ql2xfdmienable)
437 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
438 break;
440 case MBA_LIP_RESET: /* LIP reset occurred */
441 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
442 ha->host_no, mb[1]));
443 qla_printk(KERN_INFO, ha,
444 "LIP reset occured (%x).\n", mb[1]);
446 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
447 atomic_set(&ha->loop_state, LOOP_DOWN);
448 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
449 qla2x00_mark_all_devices_lost(ha, 1);
452 if (ha->parent) {
453 atomic_set(&ha->vp_state, VP_FAILED);
454 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
457 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
459 ha->operating_mode = LOOP;
460 ha->flags.management_server_logged_in = 0;
461 break;
463 case MBA_POINT_TO_POINT: /* Point-to-Point */
464 if (IS_QLA2100(ha))
465 break;
467 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
468 ha->host_no));
471 * Until there's a transition from loop down to loop up, treat
472 * this as loop down only.
474 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
475 atomic_set(&ha->loop_state, LOOP_DOWN);
476 if (!atomic_read(&ha->loop_down_timer))
477 atomic_set(&ha->loop_down_timer,
478 LOOP_DOWN_TIME);
479 qla2x00_mark_all_devices_lost(ha, 1);
482 if (ha->parent) {
483 atomic_set(&ha->vp_state, VP_FAILED);
484 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
487 if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
488 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
490 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
492 ha->flags.gpsc_supported = 1;
493 break;
495 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
496 if (IS_QLA2100(ha))
497 break;
499 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
500 "received.\n",
501 ha->host_no));
502 qla_printk(KERN_INFO, ha,
503 "Configuration change detected: value=%x.\n", mb[1]);
505 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
506 atomic_set(&ha->loop_state, LOOP_DOWN);
507 if (!atomic_read(&ha->loop_down_timer))
508 atomic_set(&ha->loop_down_timer,
509 LOOP_DOWN_TIME);
510 qla2x00_mark_all_devices_lost(ha, 1);
513 if (ha->parent) {
514 atomic_set(&ha->vp_state, VP_FAILED);
515 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
518 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
519 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
520 break;
522 case MBA_PORT_UPDATE: /* Port database update */
524 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
525 * event etc. earlier indicating loop is down) then process
526 * it. Otherwise ignore it and Wait for RSCN to come in.
528 atomic_set(&ha->loop_down_timer, 0);
529 if (atomic_read(&ha->loop_state) != LOOP_DOWN &&
530 atomic_read(&ha->loop_state) != LOOP_DEAD) {
531 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
532 "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1],
533 mb[2], mb[3]));
534 break;
537 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
538 ha->host_no));
539 DEBUG(printk(KERN_INFO
540 "scsi(%ld): Port database changed %04x %04x %04x.\n",
541 ha->host_no, mb[1], mb[2], mb[3]));
544 * Mark all devices as missing so we will login again.
546 atomic_set(&ha->loop_state, LOOP_UP);
548 qla2x00_mark_all_devices_lost(ha, 1);
550 ha->flags.rscn_queue_overflow = 1;
552 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
553 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
554 break;
556 case MBA_RSCN_UPDATE: /* State Change Registration */
557 /* Check if the Vport has issued a SCR */
558 if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
559 break;
561 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
562 ha->host_no));
563 DEBUG(printk(KERN_INFO
564 "scsi(%ld): RSCN database changed -- %04x %04x.\n",
565 ha->host_no, mb[1], mb[2]));
567 rscn_entry = (mb[1] << 16) | mb[2];
568 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
569 ha->d_id.b.al_pa;
570 if (rscn_entry == host_pid) {
571 DEBUG(printk(KERN_INFO
572 "scsi(%ld): Ignoring RSCN update to local host "
573 "port ID (%06x)\n",
574 ha->host_no, host_pid));
575 break;
578 rscn_queue_index = ha->rscn_in_ptr + 1;
579 if (rscn_queue_index == MAX_RSCN_COUNT)
580 rscn_queue_index = 0;
581 if (rscn_queue_index != ha->rscn_out_ptr) {
582 ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry;
583 ha->rscn_in_ptr = rscn_queue_index;
584 } else {
585 ha->flags.rscn_queue_overflow = 1;
588 atomic_set(&ha->loop_state, LOOP_UPDATE);
589 atomic_set(&ha->loop_down_timer, 0);
590 ha->flags.management_server_logged_in = 0;
592 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
593 set_bit(RSCN_UPDATE, &ha->dpc_flags);
594 break;
596 /* case MBA_RIO_RESPONSE: */
597 case MBA_ZIO_RESPONSE:
598 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
599 ha->host_no));
600 DEBUG(printk(KERN_INFO
601 "scsi(%ld): [R|Z]IO update completion.\n",
602 ha->host_no));
604 if (IS_FWI2_CAPABLE(ha))
605 qla24xx_process_response_queue(ha);
606 else
607 qla2x00_process_response_queue(ha);
608 break;
610 case MBA_DISCARD_RND_FRAME:
611 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
612 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
613 break;
615 case MBA_TRACE_NOTIFICATION:
616 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
617 ha->host_no, mb[1], mb[2]));
618 break;
621 if (!ha->parent && ha->num_vhosts)
622 qla2x00_alert_all_vps(ha, mb);
625 static void
626 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
628 fc_port_t *fcport = data;
630 if (fcport->ha->max_q_depth <= sdev->queue_depth)
631 return;
633 if (sdev->ordered_tags)
634 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
635 sdev->queue_depth + 1);
636 else
637 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
638 sdev->queue_depth + 1);
640 fcport->last_ramp_up = jiffies;
642 DEBUG2(qla_printk(KERN_INFO, fcport->ha,
643 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
644 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
645 sdev->queue_depth));
648 static void
649 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
651 fc_port_t *fcport = data;
653 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
654 return;
656 DEBUG2(qla_printk(KERN_INFO, fcport->ha,
657 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
658 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
659 sdev->queue_depth));
662 static inline void
663 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
665 fc_port_t *fcport;
666 struct scsi_device *sdev;
668 sdev = sp->cmd->device;
669 if (sdev->queue_depth >= ha->max_q_depth)
670 return;
672 fcport = sp->fcport;
673 if (time_before(jiffies,
674 fcport->last_ramp_up + ql2xqfullrampup * HZ))
675 return;
676 if (time_before(jiffies,
677 fcport->last_queue_full + ql2xqfullrampup * HZ))
678 return;
680 starget_for_each_device(sdev->sdev_target, fcport,
681 qla2x00_adjust_sdev_qdepth_up);
685 * qla2x00_process_completed_request() - Process a Fast Post response.
686 * @ha: SCSI driver HA context
687 * @index: SRB index
689 static void
690 qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
692 srb_t *sp;
694 /* Validate handle. */
695 if (index >= MAX_OUTSTANDING_COMMANDS) {
696 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
697 ha->host_no, index));
698 qla_printk(KERN_WARNING, ha,
699 "Invalid SCSI completion handle %d.\n", index);
701 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
702 return;
705 sp = ha->outstanding_cmds[index];
706 if (sp) {
707 /* Free outstanding command slot. */
708 ha->outstanding_cmds[index] = NULL;
710 CMD_COMPL_STATUS(sp->cmd) = 0L;
711 CMD_SCSI_STATUS(sp->cmd) = 0L;
713 /* Save ISP completion status */
714 sp->cmd->result = DID_OK << 16;
716 qla2x00_ramp_up_queue_depth(ha, sp);
717 qla2x00_sp_compl(ha, sp);
718 } else {
719 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
720 ha->host_no));
721 qla_printk(KERN_WARNING, ha,
722 "Invalid ISP SCSI completion handle\n");
724 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
729 * qla2x00_process_response_queue() - Process response queue entries.
730 * @ha: SCSI driver HA context
732 void
733 qla2x00_process_response_queue(struct scsi_qla_host *ha)
735 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
736 sts_entry_t *pkt;
737 uint16_t handle_cnt;
738 uint16_t cnt;
740 if (!ha->flags.online)
741 return;
743 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
744 pkt = (sts_entry_t *)ha->response_ring_ptr;
746 ha->rsp_ring_index++;
747 if (ha->rsp_ring_index == ha->response_q_length) {
748 ha->rsp_ring_index = 0;
749 ha->response_ring_ptr = ha->response_ring;
750 } else {
751 ha->response_ring_ptr++;
754 if (pkt->entry_status != 0) {
755 DEBUG3(printk(KERN_INFO
756 "scsi(%ld): Process error entry.\n", ha->host_no));
758 qla2x00_error_entry(ha, pkt);
759 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
760 wmb();
761 continue;
764 switch (pkt->entry_type) {
765 case STATUS_TYPE:
766 qla2x00_status_entry(ha, pkt);
767 break;
768 case STATUS_TYPE_21:
769 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
770 for (cnt = 0; cnt < handle_cnt; cnt++) {
771 qla2x00_process_completed_request(ha,
772 ((sts21_entry_t *)pkt)->handle[cnt]);
774 break;
775 case STATUS_TYPE_22:
776 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
777 for (cnt = 0; cnt < handle_cnt; cnt++) {
778 qla2x00_process_completed_request(ha,
779 ((sts22_entry_t *)pkt)->handle[cnt]);
781 break;
782 case STATUS_CONT_TYPE:
783 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
784 break;
785 case MS_IOCB_TYPE:
786 qla2x00_ms_entry(ha, (ms_iocb_entry_t *)pkt);
787 break;
788 default:
789 /* Type Not Supported. */
790 DEBUG4(printk(KERN_WARNING
791 "scsi(%ld): Received unknown response pkt type %x "
792 "entry status=%x.\n",
793 ha->host_no, pkt->entry_type, pkt->entry_status));
794 break;
796 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
797 wmb();
800 /* Adjust ring index */
801 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index);
805 * qla2x00_status_entry() - Process a Status IOCB entry.
806 * @ha: SCSI driver HA context
807 * @pkt: Entry pointer
809 static void
810 qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
812 srb_t *sp;
813 fc_port_t *fcport;
814 struct scsi_cmnd *cp;
815 sts_entry_t *sts;
816 struct sts_entry_24xx *sts24;
817 uint16_t comp_status;
818 uint16_t scsi_status;
819 uint8_t lscsi_status;
820 int32_t resid;
821 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
822 uint8_t *rsp_info, *sense_data;
824 sts = (sts_entry_t *) pkt;
825 sts24 = (struct sts_entry_24xx *) pkt;
826 if (IS_FWI2_CAPABLE(ha)) {
827 comp_status = le16_to_cpu(sts24->comp_status);
828 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
829 } else {
830 comp_status = le16_to_cpu(sts->comp_status);
831 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
834 /* Fast path completion. */
835 if (comp_status == CS_COMPLETE && scsi_status == 0) {
836 qla2x00_process_completed_request(ha, sts->handle);
838 return;
841 /* Validate handle. */
842 if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
843 sp = ha->outstanding_cmds[sts->handle];
844 ha->outstanding_cmds[sts->handle] = NULL;
845 } else
846 sp = NULL;
848 if (sp == NULL) {
849 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
850 ha->host_no));
851 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
853 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
854 qla2xxx_wake_dpc(ha);
855 return;
857 cp = sp->cmd;
858 if (cp == NULL) {
859 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
860 "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp));
861 qla_printk(KERN_WARNING, ha,
862 "Command is NULL: already returned to OS (sp=%p)\n", sp);
864 return;
867 lscsi_status = scsi_status & STATUS_MASK;
868 CMD_ENTRY_STATUS(cp) = sts->entry_status;
869 CMD_COMPL_STATUS(cp) = comp_status;
870 CMD_SCSI_STATUS(cp) = scsi_status;
872 fcport = sp->fcport;
874 sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
875 if (IS_FWI2_CAPABLE(ha)) {
876 sense_len = le32_to_cpu(sts24->sense_len);
877 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
878 resid_len = le32_to_cpu(sts24->rsp_residual_count);
879 fw_resid_len = le32_to_cpu(sts24->residual_len);
880 rsp_info = sts24->data;
881 sense_data = sts24->data;
882 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
883 } else {
884 sense_len = le16_to_cpu(sts->req_sense_length);
885 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
886 resid_len = le32_to_cpu(sts->residual_length);
887 rsp_info = sts->rsp_info;
888 sense_data = sts->req_sense_data;
891 /* Check for any FCP transport errors. */
892 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
893 /* Sense data lies beyond any FCP RESPONSE data. */
894 if (IS_FWI2_CAPABLE(ha))
895 sense_data += rsp_info_len;
896 if (rsp_info_len > 3 && rsp_info[3]) {
897 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
898 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
899 "retrying command\n", ha->host_no,
900 cp->device->channel, cp->device->id,
901 cp->device->lun, rsp_info_len, rsp_info[0],
902 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
903 rsp_info[5], rsp_info[6], rsp_info[7]));
905 cp->result = DID_BUS_BUSY << 16;
906 qla2x00_sp_compl(ha, sp);
907 return;
912 * Based on Host and scsi status generate status code for Linux
914 switch (comp_status) {
915 case CS_COMPLETE:
916 case CS_QUEUE_FULL:
917 if (scsi_status == 0) {
918 cp->result = DID_OK << 16;
919 break;
921 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
922 resid = resid_len;
923 scsi_set_resid(cp, resid);
924 CMD_RESID_LEN(cp) = resid;
926 if (!lscsi_status &&
927 ((unsigned)(scsi_bufflen(cp) - resid) <
928 cp->underflow)) {
929 qla_printk(KERN_INFO, ha,
930 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
931 "detected (%x of %x bytes)...returning "
932 "error status.\n", ha->host_no,
933 cp->device->channel, cp->device->id,
934 cp->device->lun, resid,
935 scsi_bufflen(cp));
937 cp->result = DID_ERROR << 16;
938 break;
941 cp->result = DID_OK << 16 | lscsi_status;
943 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
944 DEBUG2(printk(KERN_INFO
945 "scsi(%ld): QUEUE FULL status detected "
946 "0x%x-0x%x.\n", ha->host_no, comp_status,
947 scsi_status));
949 /* Adjust queue depth for all luns on the port. */
950 fcport->last_queue_full = jiffies;
951 starget_for_each_device(cp->device->sdev_target,
952 fcport, qla2x00_adjust_sdev_qdepth_down);
953 break;
955 if (lscsi_status != SS_CHECK_CONDITION)
956 break;
958 /* Copy Sense Data into sense buffer. */
959 memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer));
961 if (!(scsi_status & SS_SENSE_LEN_VALID))
962 break;
964 if (sense_len >= sizeof(cp->sense_buffer))
965 sense_len = sizeof(cp->sense_buffer);
967 CMD_ACTUAL_SNSLEN(cp) = sense_len;
968 sp->request_sense_length = sense_len;
969 sp->request_sense_ptr = cp->sense_buffer;
971 if (sp->request_sense_length > 32)
972 sense_len = 32;
974 memcpy(cp->sense_buffer, sense_data, sense_len);
976 sp->request_sense_ptr += sense_len;
977 sp->request_sense_length -= sense_len;
978 if (sp->request_sense_length != 0)
979 ha->status_srb = sp;
981 DEBUG5(printk("%s(): Check condition Sense data, "
982 "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n", __func__,
983 ha->host_no, cp->device->channel, cp->device->id,
984 cp->device->lun, cp, cp->serial_number));
985 if (sense_len)
986 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
987 CMD_ACTUAL_SNSLEN(cp)));
988 break;
990 case CS_DATA_UNDERRUN:
991 resid = resid_len;
992 /* Use F/W calculated residual length. */
993 if (IS_FWI2_CAPABLE(ha))
994 resid = fw_resid_len;
996 if (scsi_status & SS_RESIDUAL_UNDER) {
997 scsi_set_resid(cp, resid);
998 CMD_RESID_LEN(cp) = resid;
999 } else {
1000 DEBUG2(printk(KERN_INFO
1001 "scsi(%ld:%d:%d) UNDERRUN status detected "
1002 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1003 "os_underflow=0x%x\n", ha->host_no,
1004 cp->device->id, cp->device->lun, comp_status,
1005 scsi_status, resid_len, resid, cp->cmnd[0],
1006 cp->underflow));
1011 * Check to see if SCSI Status is non zero. If so report SCSI
1012 * Status.
1014 if (lscsi_status != 0) {
1015 cp->result = DID_OK << 16 | lscsi_status;
1017 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1018 DEBUG2(printk(KERN_INFO
1019 "scsi(%ld): QUEUE FULL status detected "
1020 "0x%x-0x%x.\n", ha->host_no, comp_status,
1021 scsi_status));
1024 * Adjust queue depth for all luns on the
1025 * port.
1027 fcport->last_queue_full = jiffies;
1028 starget_for_each_device(
1029 cp->device->sdev_target, fcport,
1030 qla2x00_adjust_sdev_qdepth_down);
1031 break;
1033 if (lscsi_status != SS_CHECK_CONDITION)
1034 break;
1036 /* Copy Sense Data into sense buffer */
1037 memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer));
1039 if (!(scsi_status & SS_SENSE_LEN_VALID))
1040 break;
1042 if (sense_len >= sizeof(cp->sense_buffer))
1043 sense_len = sizeof(cp->sense_buffer);
1045 CMD_ACTUAL_SNSLEN(cp) = sense_len;
1046 sp->request_sense_length = sense_len;
1047 sp->request_sense_ptr = cp->sense_buffer;
1049 if (sp->request_sense_length > 32)
1050 sense_len = 32;
1052 memcpy(cp->sense_buffer, sense_data, sense_len);
1054 sp->request_sense_ptr += sense_len;
1055 sp->request_sense_length -= sense_len;
1056 if (sp->request_sense_length != 0)
1057 ha->status_srb = sp;
1059 DEBUG5(printk("%s(): Check condition Sense data, "
1060 "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n",
1061 __func__, ha->host_no, cp->device->channel,
1062 cp->device->id, cp->device->lun, cp,
1063 cp->serial_number));
1066 * In case of a Underrun condition, set both the lscsi
1067 * status and the completion status to appropriate
1068 * values.
1070 if (resid &&
1071 ((unsigned)(cp->request_bufflen - resid) <
1072 cp->underflow)) {
1073 DEBUG2(qla_printk(KERN_INFO, ha,
1074 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1075 "detected (%x of %x bytes)...returning "
1076 "error status.\n", ha->host_no,
1077 cp->device->channel, cp->device->id,
1078 cp->device->lun, resid,
1079 cp->request_bufflen));
1081 cp->result = DID_ERROR << 16 | lscsi_status;
1084 if (sense_len)
1085 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
1086 CMD_ACTUAL_SNSLEN(cp)));
1087 } else {
1089 * If RISC reports underrun and target does not report
1090 * it then we must have a lost frame, so tell upper
1091 * layer to retry it by reporting a bus busy.
1093 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1094 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1095 "frame(s) detected (%x of %x bytes)..."
1096 "retrying command.\n", ha->host_no,
1097 cp->device->channel, cp->device->id,
1098 cp->device->lun, resid,
1099 scsi_bufflen(cp)));
1101 cp->result = DID_BUS_BUSY << 16;
1102 break;
1105 /* Handle mid-layer underflow */
1106 if ((unsigned)(scsi_bufflen(cp) - resid) <
1107 cp->underflow) {
1108 qla_printk(KERN_INFO, ha,
1109 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1110 "detected (%x of %x bytes)...returning "
1111 "error status.\n", ha->host_no,
1112 cp->device->channel, cp->device->id,
1113 cp->device->lun, resid,
1114 scsi_bufflen(cp));
1116 cp->result = DID_ERROR << 16;
1117 break;
1120 /* Everybody online, looking good... */
1121 cp->result = DID_OK << 16;
1123 break;
1125 case CS_DATA_OVERRUN:
1126 DEBUG2(printk(KERN_INFO
1127 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1128 ha->host_no, cp->device->id, cp->device->lun, comp_status,
1129 scsi_status));
1130 DEBUG2(printk(KERN_INFO
1131 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1132 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1133 cp->cmnd[4], cp->cmnd[5]));
1134 DEBUG2(printk(KERN_INFO
1135 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1136 "status!\n",
1137 cp->serial_number, scsi_bufflen(cp), resid_len));
1139 cp->result = DID_ERROR << 16;
1140 break;
1142 case CS_PORT_LOGGED_OUT:
1143 case CS_PORT_CONFIG_CHG:
1144 case CS_PORT_BUSY:
1145 case CS_INCOMPLETE:
1146 case CS_PORT_UNAVAILABLE:
1148 * If the port is in Target Down state, return all IOs for this
1149 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1150 * retry_queue.
1152 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1153 "pid=%ld, compl status=0x%x, port state=0x%x\n",
1154 ha->host_no, cp->device->id, cp->device->lun,
1155 cp->serial_number, comp_status,
1156 atomic_read(&fcport->state)));
1158 cp->result = DID_BUS_BUSY << 16;
1159 if (atomic_read(&fcport->state) == FCS_ONLINE) {
1160 qla2x00_mark_device_lost(ha, fcport, 1, 1);
1162 break;
1164 case CS_RESET:
1165 DEBUG2(printk(KERN_INFO
1166 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1167 ha->host_no, comp_status, scsi_status));
1169 cp->result = DID_RESET << 16;
1170 break;
1172 case CS_ABORTED:
1174 * hv2.19.12 - DID_ABORT does not retry the request if we
1175 * aborted this request then abort otherwise it must be a
1176 * reset.
1178 DEBUG2(printk(KERN_INFO
1179 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1180 ha->host_no, comp_status, scsi_status));
1182 cp->result = DID_RESET << 16;
1183 break;
1185 case CS_TIMEOUT:
1186 cp->result = DID_BUS_BUSY << 16;
1188 if (IS_FWI2_CAPABLE(ha)) {
1189 DEBUG2(printk(KERN_INFO
1190 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1191 "0x%x-0x%x\n", ha->host_no, cp->device->channel,
1192 cp->device->id, cp->device->lun, comp_status,
1193 scsi_status));
1194 break;
1196 DEBUG2(printk(KERN_INFO
1197 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1198 "sflags=%x.\n", ha->host_no, cp->device->channel,
1199 cp->device->id, cp->device->lun, comp_status, scsi_status,
1200 le16_to_cpu(sts->status_flags)));
1202 /* Check to see if logout occurred. */
1203 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1204 qla2x00_mark_device_lost(ha, fcport, 1, 1);
1205 break;
1207 default:
1208 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1209 "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status));
1210 qla_printk(KERN_INFO, ha,
1211 "Unknown status detected 0x%x-0x%x.\n",
1212 comp_status, scsi_status);
1214 cp->result = DID_ERROR << 16;
1215 break;
1218 /* Place command on done queue. */
1219 if (ha->status_srb == NULL)
1220 qla2x00_sp_compl(ha, sp);
1224 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1225 * @ha: SCSI driver HA context
1226 * @pkt: Entry pointer
1228 * Extended sense data.
1230 static void
1231 qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1233 uint8_t sense_sz = 0;
1234 srb_t *sp = ha->status_srb;
1235 struct scsi_cmnd *cp;
1237 if (sp != NULL && sp->request_sense_length != 0) {
1238 cp = sp->cmd;
1239 if (cp == NULL) {
1240 DEBUG2(printk("%s(): Cmd already returned back to OS "
1241 "sp=%p.\n", __func__, sp));
1242 qla_printk(KERN_INFO, ha,
1243 "cmd is NULL: already returned to OS (sp=%p)\n",
1244 sp);
1246 ha->status_srb = NULL;
1247 return;
1250 if (sp->request_sense_length > sizeof(pkt->data)) {
1251 sense_sz = sizeof(pkt->data);
1252 } else {
1253 sense_sz = sp->request_sense_length;
1256 /* Move sense data. */
1257 if (IS_FWI2_CAPABLE(ha))
1258 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1259 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1260 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1262 sp->request_sense_ptr += sense_sz;
1263 sp->request_sense_length -= sense_sz;
1265 /* Place command on done queue. */
1266 if (sp->request_sense_length == 0) {
1267 ha->status_srb = NULL;
1268 qla2x00_sp_compl(ha, sp);
1274 * qla2x00_error_entry() - Process an error entry.
1275 * @ha: SCSI driver HA context
1276 * @pkt: Entry pointer
1278 static void
1279 qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1281 srb_t *sp;
1283 #if defined(QL_DEBUG_LEVEL_2)
1284 if (pkt->entry_status & RF_INV_E_ORDER)
1285 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1286 else if (pkt->entry_status & RF_INV_E_COUNT)
1287 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1288 else if (pkt->entry_status & RF_INV_E_PARAM)
1289 qla_printk(KERN_ERR, ha,
1290 "%s: Invalid Entry Parameter\n", __func__);
1291 else if (pkt->entry_status & RF_INV_E_TYPE)
1292 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1293 else if (pkt->entry_status & RF_BUSY)
1294 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1295 else
1296 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1297 #endif
1299 /* Validate handle. */
1300 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1301 sp = ha->outstanding_cmds[pkt->handle];
1302 else
1303 sp = NULL;
1305 if (sp) {
1306 /* Free outstanding command slot. */
1307 ha->outstanding_cmds[pkt->handle] = NULL;
1309 /* Bad payload or header */
1310 if (pkt->entry_status &
1311 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1312 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1313 sp->cmd->result = DID_ERROR << 16;
1314 } else if (pkt->entry_status & RF_BUSY) {
1315 sp->cmd->result = DID_BUS_BUSY << 16;
1316 } else {
1317 sp->cmd->result = DID_ERROR << 16;
1319 qla2x00_sp_compl(ha, sp);
1321 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1322 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1323 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1324 ha->host_no));
1325 qla_printk(KERN_WARNING, ha,
1326 "Error entry - invalid handle\n");
1328 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1329 qla2xxx_wake_dpc(ha);
1334 * qla2x00_ms_entry() - Process a Management Server entry.
1335 * @ha: SCSI driver HA context
1336 * @index: Response queue out pointer
1338 static void
1339 qla2x00_ms_entry(scsi_qla_host_t *ha, ms_iocb_entry_t *pkt)
1341 srb_t *sp;
1343 DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
1344 __func__, ha->host_no, pkt, pkt->handle1));
1346 /* Validate handle. */
1347 if (pkt->handle1 < MAX_OUTSTANDING_COMMANDS)
1348 sp = ha->outstanding_cmds[pkt->handle1];
1349 else
1350 sp = NULL;
1352 if (sp == NULL) {
1353 DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
1354 ha->host_no));
1355 qla_printk(KERN_WARNING, ha, "MS entry - invalid handle\n");
1357 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1358 return;
1361 CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->status);
1362 CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
1364 /* Free outstanding command slot. */
1365 ha->outstanding_cmds[pkt->handle1] = NULL;
1367 qla2x00_sp_compl(ha, sp);
1372 * qla24xx_mbx_completion() - Process mailbox command completions.
1373 * @ha: SCSI driver HA context
1374 * @mb0: Mailbox0 register
1376 static void
1377 qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1379 uint16_t cnt;
1380 uint16_t __iomem *wptr;
1381 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1383 /* Load return mailbox registers. */
1384 ha->flags.mbox_int = 1;
1385 ha->mailbox_out[0] = mb0;
1386 wptr = (uint16_t __iomem *)&reg->mailbox1;
1388 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1389 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1390 wptr++;
1393 if (ha->mcp) {
1394 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1395 __func__, ha->host_no, ha->mcp->mb[0]));
1396 } else {
1397 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1398 __func__, ha->host_no));
1403 * qla24xx_process_response_queue() - Process response queue entries.
1404 * @ha: SCSI driver HA context
1406 void
1407 qla24xx_process_response_queue(struct scsi_qla_host *ha)
1409 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1410 struct sts_entry_24xx *pkt;
1412 if (!ha->flags.online)
1413 return;
1415 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
1416 pkt = (struct sts_entry_24xx *)ha->response_ring_ptr;
1418 ha->rsp_ring_index++;
1419 if (ha->rsp_ring_index == ha->response_q_length) {
1420 ha->rsp_ring_index = 0;
1421 ha->response_ring_ptr = ha->response_ring;
1422 } else {
1423 ha->response_ring_ptr++;
1426 if (pkt->entry_status != 0) {
1427 DEBUG3(printk(KERN_INFO
1428 "scsi(%ld): Process error entry.\n", ha->host_no));
1430 qla2x00_error_entry(ha, (sts_entry_t *) pkt);
1431 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1432 wmb();
1433 continue;
1436 switch (pkt->entry_type) {
1437 case STATUS_TYPE:
1438 qla2x00_status_entry(ha, pkt);
1439 break;
1440 case STATUS_CONT_TYPE:
1441 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
1442 break;
1443 case MS_IOCB_TYPE:
1444 qla24xx_ms_entry(ha, (struct ct_entry_24xx *)pkt);
1445 break;
1446 case VP_RPT_ID_IOCB_TYPE:
1447 qla24xx_report_id_acquisition(ha,
1448 (struct vp_rpt_id_entry_24xx *)pkt);
1449 break;
1450 default:
1451 /* Type Not Supported. */
1452 DEBUG4(printk(KERN_WARNING
1453 "scsi(%ld): Received unknown response pkt type %x "
1454 "entry status=%x.\n",
1455 ha->host_no, pkt->entry_type, pkt->entry_status));
1456 break;
1458 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1459 wmb();
1462 /* Adjust ring index */
1463 WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index);
1467 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1468 * @irq:
1469 * @dev_id: SCSI driver HA context
1471 * Called by system whenever the host adapter generates an interrupt.
1473 * Returns handled flag.
1475 irqreturn_t
1476 qla24xx_intr_handler(int irq, void *dev_id)
1478 scsi_qla_host_t *ha;
1479 struct device_reg_24xx __iomem *reg;
1480 int status;
1481 unsigned long flags;
1482 unsigned long iter;
1483 uint32_t stat;
1484 uint32_t hccr;
1485 uint16_t mb[4];
1487 ha = (scsi_qla_host_t *) dev_id;
1488 if (!ha) {
1489 printk(KERN_INFO
1490 "%s(): NULL host pointer\n", __func__);
1491 return IRQ_NONE;
1494 reg = &ha->iobase->isp24;
1495 status = 0;
1497 spin_lock_irqsave(&ha->hardware_lock, flags);
1498 for (iter = 50; iter--; ) {
1499 stat = RD_REG_DWORD(&reg->host_status);
1500 if (stat & HSRX_RISC_PAUSED) {
1501 hccr = RD_REG_DWORD(&reg->hccr);
1503 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1504 "Dumping firmware!\n", hccr);
1505 ha->isp_ops->fw_dump(ha, 1);
1506 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1507 break;
1508 } else if ((stat & HSRX_RISC_INT) == 0)
1509 break;
1511 switch (stat & 0xff) {
1512 case 0x1:
1513 case 0x2:
1514 case 0x10:
1515 case 0x11:
1516 qla24xx_mbx_completion(ha, MSW(stat));
1517 status |= MBX_INTERRUPT;
1519 break;
1520 case 0x12:
1521 mb[0] = MSW(stat);
1522 mb[1] = RD_REG_WORD(&reg->mailbox1);
1523 mb[2] = RD_REG_WORD(&reg->mailbox2);
1524 mb[3] = RD_REG_WORD(&reg->mailbox3);
1525 qla2x00_async_event(ha, mb);
1526 break;
1527 case 0x13:
1528 qla24xx_process_response_queue(ha);
1529 break;
1530 default:
1531 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1532 "(%d).\n",
1533 ha->host_no, stat & 0xff));
1534 break;
1536 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1537 RD_REG_DWORD_RELAXED(&reg->hccr);
1539 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1541 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1542 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1543 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1544 up(&ha->mbx_intr_sem);
1547 return IRQ_HANDLED;
1551 * qla24xx_ms_entry() - Process a Management Server entry.
1552 * @ha: SCSI driver HA context
1553 * @index: Response queue out pointer
1555 static void
1556 qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt)
1558 srb_t *sp;
1560 DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
1561 __func__, ha->host_no, pkt, pkt->handle));
1563 DEBUG9(printk("%s: ct pkt dump:\n", __func__));
1564 DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx)));
1566 /* Validate handle. */
1567 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1568 sp = ha->outstanding_cmds[pkt->handle];
1569 else
1570 sp = NULL;
1572 if (sp == NULL) {
1573 DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
1574 ha->host_no));
1575 DEBUG10(printk("scsi(%ld): MS entry - invalid handle\n",
1576 ha->host_no));
1577 qla_printk(KERN_WARNING, ha, "MS entry - invalid handle %d\n",
1578 pkt->handle);
1580 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1581 return;
1584 CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->comp_status);
1585 CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
1587 /* Free outstanding command slot. */
1588 ha->outstanding_cmds[pkt->handle] = NULL;
1590 qla2x00_sp_compl(ha, sp);
1593 static irqreturn_t
1594 qla24xx_msix_rsp_q(int irq, void *dev_id)
1596 scsi_qla_host_t *ha;
1597 struct device_reg_24xx __iomem *reg;
1598 unsigned long flags;
1600 ha = dev_id;
1601 reg = &ha->iobase->isp24;
1603 spin_lock_irqsave(&ha->hardware_lock, flags);
1605 qla24xx_process_response_queue(ha);
1607 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1608 RD_REG_DWORD_RELAXED(&reg->hccr);
1610 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1612 return IRQ_HANDLED;
1615 static irqreturn_t
1616 qla24xx_msix_default(int irq, void *dev_id)
1618 scsi_qla_host_t *ha;
1619 struct device_reg_24xx __iomem *reg;
1620 int status;
1621 unsigned long flags;
1622 unsigned long iter;
1623 uint32_t stat;
1624 uint32_t hccr;
1625 uint16_t mb[4];
1627 ha = dev_id;
1628 reg = &ha->iobase->isp24;
1629 status = 0;
1631 spin_lock_irqsave(&ha->hardware_lock, flags);
1632 for (iter = 50; iter--; ) {
1633 stat = RD_REG_DWORD(&reg->host_status);
1634 if (stat & HSRX_RISC_PAUSED) {
1635 hccr = RD_REG_DWORD(&reg->hccr);
1637 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1638 "Dumping firmware!\n", hccr);
1639 ha->isp_ops->fw_dump(ha, 1);
1640 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1641 break;
1642 } else if ((stat & HSRX_RISC_INT) == 0)
1643 break;
1645 switch (stat & 0xff) {
1646 case 0x1:
1647 case 0x2:
1648 case 0x10:
1649 case 0x11:
1650 qla24xx_mbx_completion(ha, MSW(stat));
1651 status |= MBX_INTERRUPT;
1653 break;
1654 case 0x12:
1655 mb[0] = MSW(stat);
1656 mb[1] = RD_REG_WORD(&reg->mailbox1);
1657 mb[2] = RD_REG_WORD(&reg->mailbox2);
1658 mb[3] = RD_REG_WORD(&reg->mailbox3);
1659 qla2x00_async_event(ha, mb);
1660 break;
1661 case 0x13:
1662 qla24xx_process_response_queue(ha);
1663 break;
1664 default:
1665 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1666 "(%d).\n",
1667 ha->host_no, stat & 0xff));
1668 break;
1670 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1671 RD_REG_DWORD_RELAXED(&reg->hccr);
1673 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1675 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1676 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1677 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1678 up(&ha->mbx_intr_sem);
1681 return IRQ_HANDLED;
1684 /* Interrupt handling helpers. */
1686 struct qla_init_msix_entry {
1687 uint16_t entry;
1688 uint16_t index;
1689 const char *name;
1690 irq_handler_t handler;
1693 static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
1694 { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
1695 "qla2xxx (default)", qla24xx_msix_default },
1697 { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
1698 "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1701 static void
1702 qla24xx_disable_msix(scsi_qla_host_t *ha)
1704 int i;
1705 struct qla_msix_entry *qentry;
1707 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1708 qentry = &ha->msix_entries[imsix_entries[i].index];
1709 if (qentry->have_irq)
1710 free_irq(qentry->msix_vector, ha);
1712 pci_disable_msix(ha->pdev);
1715 static int
1716 qla24xx_enable_msix(scsi_qla_host_t *ha)
1718 int i, ret;
1719 struct msix_entry entries[QLA_MSIX_ENTRIES];
1720 struct qla_msix_entry *qentry;
1722 for (i = 0; i < QLA_MSIX_ENTRIES; i++)
1723 entries[i].entry = imsix_entries[i].entry;
1725 ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries));
1726 if (ret) {
1727 qla_printk(KERN_WARNING, ha,
1728 "MSI-X: Failed to enable support -- %d/%d\n",
1729 QLA_MSIX_ENTRIES, ret);
1730 goto msix_out;
1732 ha->flags.msix_enabled = 1;
1734 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1735 qentry = &ha->msix_entries[imsix_entries[i].index];
1736 qentry->msix_vector = entries[i].vector;
1737 qentry->msix_entry = entries[i].entry;
1738 qentry->have_irq = 0;
1739 ret = request_irq(qentry->msix_vector,
1740 imsix_entries[i].handler, 0, imsix_entries[i].name, ha);
1741 if (ret) {
1742 qla_printk(KERN_WARNING, ha,
1743 "MSI-X: Unable to register handler -- %x/%d.\n",
1744 imsix_entries[i].index, ret);
1745 qla24xx_disable_msix(ha);
1746 goto msix_out;
1748 qentry->have_irq = 1;
1751 msix_out:
1752 return ret;
1756 qla2x00_request_irqs(scsi_qla_host_t *ha)
1758 int ret;
1760 /* If possible, enable MSI-X. */
1761 if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
1762 goto skip_msix;
1764 if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
1765 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1766 DEBUG2(qla_printk(KERN_WARNING, ha,
1767 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1768 ha->chip_revision, ha->fw_attributes));
1770 goto skip_msix;
1773 ret = qla24xx_enable_msix(ha);
1774 if (!ret) {
1775 DEBUG2(qla_printk(KERN_INFO, ha,
1776 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1777 ha->fw_attributes));
1778 return ret;
1780 qla_printk(KERN_WARNING, ha,
1781 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1782 skip_msix:
1784 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha))
1785 goto skip_msi;
1787 ret = pci_enable_msi(ha->pdev);
1788 if (!ret) {
1789 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1790 ha->flags.msi_enabled = 1;
1792 skip_msi:
1794 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1795 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1796 if (!ret) {
1797 ha->flags.inta_enabled = 1;
1798 ha->host->irq = ha->pdev->irq;
1799 } else {
1800 qla_printk(KERN_WARNING, ha,
1801 "Failed to reserve interrupt %d already in use.\n",
1802 ha->pdev->irq);
1805 return ret;
1808 void
1809 qla2x00_free_irqs(scsi_qla_host_t *ha)
1812 if (ha->flags.msix_enabled)
1813 qla24xx_disable_msix(ha);
1814 else if (ha->flags.inta_enabled) {
1815 free_irq(ha->host->irq, ha);
1816 pci_disable_msi(ha->pdev);