kbuild: improve scripts/gcc-version.sh output a bit when called without args
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / qla2xxx / qla_isr.c
blobeecae9905ece16b2af17e3d133115778b5a7fb2a
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2005 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <scsi/scsi_tcq.h>
11 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
12 static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
13 static void qla2x00_status_entry(scsi_qla_host_t *, void *);
14 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
15 static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
16 static void qla2x00_ms_entry(scsi_qla_host_t *, ms_iocb_entry_t *);
18 static void qla24xx_ms_entry(scsi_qla_host_t *, struct ct_entry_24xx *);
20 /**
21 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
22 * @irq:
23 * @dev_id: SCSI driver HA context
25 * Called by system whenever the host adapter generates an interrupt.
27 * Returns handled flag.
29 irqreturn_t
30 qla2100_intr_handler(int irq, void *dev_id)
32 scsi_qla_host_t *ha;
33 struct device_reg_2xxx __iomem *reg;
34 int status;
35 unsigned long flags;
36 unsigned long iter;
37 uint16_t mb[4];
39 ha = (scsi_qla_host_t *) dev_id;
40 if (!ha) {
41 printk(KERN_INFO
42 "%s(): NULL host pointer\n", __func__);
43 return (IRQ_NONE);
46 reg = &ha->iobase->isp;
47 status = 0;
49 spin_lock_irqsave(&ha->hardware_lock, flags);
50 for (iter = 50; iter--; ) {
51 if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
52 break;
54 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
55 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
56 RD_REG_WORD(&reg->hccr);
58 /* Get mailbox data. */
59 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
60 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
61 qla2x00_mbx_completion(ha, mb[0]);
62 status |= MBX_INTERRUPT;
63 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
64 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
65 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
66 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
67 qla2x00_async_event(ha, mb);
68 } else {
69 /*EMPTY*/
70 DEBUG2(printk("scsi(%ld): Unrecognized "
71 "interrupt type (%d).\n",
72 ha->host_no, mb[0]));
74 /* Release mailbox registers. */
75 WRT_REG_WORD(&reg->semaphore, 0);
76 RD_REG_WORD(&reg->semaphore);
77 } else {
78 qla2x00_process_response_queue(ha);
80 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
81 RD_REG_WORD(&reg->hccr);
84 spin_unlock_irqrestore(&ha->hardware_lock, flags);
86 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
87 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
88 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
89 up(&ha->mbx_intr_sem);
92 return (IRQ_HANDLED);
95 /**
96 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
97 * @irq:
98 * @dev_id: SCSI driver HA context
100 * Called by system whenever the host adapter generates an interrupt.
102 * Returns handled flag.
104 irqreturn_t
105 qla2300_intr_handler(int irq, void *dev_id)
107 scsi_qla_host_t *ha;
108 struct device_reg_2xxx __iomem *reg;
109 int status;
110 unsigned long flags;
111 unsigned long iter;
112 uint32_t stat;
113 uint16_t hccr;
114 uint16_t mb[4];
116 ha = (scsi_qla_host_t *) dev_id;
117 if (!ha) {
118 printk(KERN_INFO
119 "%s(): NULL host pointer\n", __func__);
120 return (IRQ_NONE);
123 reg = &ha->iobase->isp;
124 status = 0;
126 spin_lock_irqsave(&ha->hardware_lock, flags);
127 for (iter = 50; iter--; ) {
128 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
129 if (stat & HSR_RISC_PAUSED) {
130 hccr = RD_REG_WORD(&reg->hccr);
131 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
132 qla_printk(KERN_INFO, ha, "Parity error -- "
133 "HCCR=%x, Dumping firmware!\n", hccr);
134 else
135 qla_printk(KERN_INFO, ha, "RISC paused -- "
136 "HCCR=%x, Dumping firmware!\n", hccr);
139 * Issue a "HARD" reset in order for the RISC
140 * interrupt bit to be cleared. Schedule a big
141 * hammmer to get out of the RISC PAUSED state.
143 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
144 RD_REG_WORD(&reg->hccr);
146 ha->isp_ops->fw_dump(ha, 1);
147 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
148 break;
149 } else if ((stat & HSR_RISC_INT) == 0)
150 break;
152 switch (stat & 0xff) {
153 case 0x1:
154 case 0x2:
155 case 0x10:
156 case 0x11:
157 qla2x00_mbx_completion(ha, MSW(stat));
158 status |= MBX_INTERRUPT;
160 /* Release mailbox registers. */
161 WRT_REG_WORD(&reg->semaphore, 0);
162 break;
163 case 0x12:
164 mb[0] = MSW(stat);
165 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
166 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
167 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
168 qla2x00_async_event(ha, mb);
169 break;
170 case 0x13:
171 qla2x00_process_response_queue(ha);
172 break;
173 case 0x15:
174 mb[0] = MBA_CMPLT_1_16BIT;
175 mb[1] = MSW(stat);
176 qla2x00_async_event(ha, mb);
177 break;
178 case 0x16:
179 mb[0] = MBA_SCSI_COMPLETION;
180 mb[1] = MSW(stat);
181 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
182 qla2x00_async_event(ha, mb);
183 break;
184 default:
185 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
186 "(%d).\n",
187 ha->host_no, stat & 0xff));
188 break;
190 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
191 RD_REG_WORD_RELAXED(&reg->hccr);
193 spin_unlock_irqrestore(&ha->hardware_lock, flags);
195 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
196 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
197 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
198 up(&ha->mbx_intr_sem);
201 return (IRQ_HANDLED);
205 * qla2x00_mbx_completion() - Process mailbox command completions.
206 * @ha: SCSI driver HA context
207 * @mb0: Mailbox0 register
209 static void
210 qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
212 uint16_t cnt;
213 uint16_t __iomem *wptr;
214 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
216 /* Load return mailbox registers. */
217 ha->flags.mbox_int = 1;
218 ha->mailbox_out[0] = mb0;
219 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
221 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
222 if (IS_QLA2200(ha) && cnt == 8)
223 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
224 if (cnt == 4 || cnt == 5)
225 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
226 else
227 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
229 wptr++;
232 if (ha->mcp) {
233 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
234 __func__, ha->host_no, ha->mcp->mb[0]));
235 } else {
236 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
237 __func__, ha->host_no));
242 * qla2x00_async_event() - Process aynchronous events.
243 * @ha: SCSI driver HA context
244 * @mb: Mailbox registers (0 - 3)
246 void
247 qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
249 #define LS_UNKNOWN 2
250 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
251 char *link_speed;
252 uint16_t handle_cnt;
253 uint16_t cnt;
254 uint32_t handles[5];
255 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
256 uint32_t rscn_entry, host_pid;
257 uint8_t rscn_queue_index;
259 /* Setup to process RIO completion. */
260 handle_cnt = 0;
261 switch (mb[0]) {
262 case MBA_SCSI_COMPLETION:
263 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
264 handle_cnt = 1;
265 break;
266 case MBA_CMPLT_1_16BIT:
267 handles[0] = mb[1];
268 handle_cnt = 1;
269 mb[0] = MBA_SCSI_COMPLETION;
270 break;
271 case MBA_CMPLT_2_16BIT:
272 handles[0] = mb[1];
273 handles[1] = mb[2];
274 handle_cnt = 2;
275 mb[0] = MBA_SCSI_COMPLETION;
276 break;
277 case MBA_CMPLT_3_16BIT:
278 handles[0] = mb[1];
279 handles[1] = mb[2];
280 handles[2] = mb[3];
281 handle_cnt = 3;
282 mb[0] = MBA_SCSI_COMPLETION;
283 break;
284 case MBA_CMPLT_4_16BIT:
285 handles[0] = mb[1];
286 handles[1] = mb[2];
287 handles[2] = mb[3];
288 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
289 handle_cnt = 4;
290 mb[0] = MBA_SCSI_COMPLETION;
291 break;
292 case MBA_CMPLT_5_16BIT:
293 handles[0] = mb[1];
294 handles[1] = mb[2];
295 handles[2] = mb[3];
296 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
297 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
298 handle_cnt = 5;
299 mb[0] = MBA_SCSI_COMPLETION;
300 break;
301 case MBA_CMPLT_2_32BIT:
302 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
303 handles[1] = le32_to_cpu(
304 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
305 RD_MAILBOX_REG(ha, reg, 6));
306 handle_cnt = 2;
307 mb[0] = MBA_SCSI_COMPLETION;
308 break;
309 default:
310 break;
313 switch (mb[0]) {
314 case MBA_SCSI_COMPLETION: /* Fast Post */
315 if (!ha->flags.online)
316 break;
318 for (cnt = 0; cnt < handle_cnt; cnt++)
319 qla2x00_process_completed_request(ha, handles[cnt]);
320 break;
322 case MBA_RESET: /* Reset */
323 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no));
325 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
326 break;
328 case MBA_SYSTEM_ERR: /* System Error */
329 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
330 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
331 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
333 qla_printk(KERN_INFO, ha,
334 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
335 mb[1], mb[2], mb[3]);
337 ha->isp_ops->fw_dump(ha, 1);
339 if (IS_FWI2_CAPABLE(ha)) {
340 if (mb[1] == 0 && mb[2] == 0) {
341 qla_printk(KERN_ERR, ha,
342 "Unrecoverable Hardware Error: adapter "
343 "marked OFFLINE!\n");
344 ha->flags.online = 0;
345 } else
346 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
347 } else if (mb[1] == 0) {
348 qla_printk(KERN_INFO, ha,
349 "Unrecoverable Hardware Error: adapter marked "
350 "OFFLINE!\n");
351 ha->flags.online = 0;
352 } else
353 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
354 break;
356 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
357 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
358 ha->host_no));
359 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
361 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
362 break;
364 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
365 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
366 ha->host_no));
367 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
369 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
370 break;
372 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
373 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
374 ha->host_no));
375 break;
377 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
378 DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no,
379 mb[1]));
380 qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]);
382 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
383 atomic_set(&ha->loop_state, LOOP_DOWN);
384 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
385 qla2x00_mark_all_devices_lost(ha, 1);
388 if (ha->parent) {
389 atomic_set(&ha->vp_state, VP_FAILED);
390 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
393 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
395 ha->flags.management_server_logged_in = 0;
396 break;
398 case MBA_LOOP_UP: /* Loop Up Event */
399 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
400 link_speed = link_speeds[0];
401 ha->link_data_rate = PORT_SPEED_1GB;
402 } else {
403 link_speed = link_speeds[LS_UNKNOWN];
404 if (mb[1] < 5)
405 link_speed = link_speeds[mb[1]];
406 ha->link_data_rate = mb[1];
409 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
410 ha->host_no, link_speed));
411 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
412 link_speed);
414 ha->flags.management_server_logged_in = 0;
415 break;
417 case MBA_LOOP_DOWN: /* Loop Down Event */
418 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN (%x).\n",
419 ha->host_no, mb[1]));
420 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x).\n", mb[1]);
422 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
423 atomic_set(&ha->loop_state, LOOP_DOWN);
424 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
425 ha->device_flags |= DFLG_NO_CABLE;
426 qla2x00_mark_all_devices_lost(ha, 1);
429 if (ha->parent) {
430 atomic_set(&ha->vp_state, VP_FAILED);
431 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
434 ha->flags.management_server_logged_in = 0;
435 ha->link_data_rate = PORT_SPEED_UNKNOWN;
436 if (ql2xfdmienable)
437 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
438 break;
440 case MBA_LIP_RESET: /* LIP reset occurred */
441 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
442 ha->host_no, mb[1]));
443 qla_printk(KERN_INFO, ha,
444 "LIP reset occured (%x).\n", mb[1]);
446 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
447 atomic_set(&ha->loop_state, LOOP_DOWN);
448 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
449 qla2x00_mark_all_devices_lost(ha, 1);
452 if (ha->parent) {
453 atomic_set(&ha->vp_state, VP_FAILED);
454 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
457 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
459 ha->operating_mode = LOOP;
460 ha->flags.management_server_logged_in = 0;
461 break;
463 case MBA_POINT_TO_POINT: /* Point-to-Point */
464 if (IS_QLA2100(ha))
465 break;
467 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
468 ha->host_no));
471 * Until there's a transition from loop down to loop up, treat
472 * this as loop down only.
474 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
475 atomic_set(&ha->loop_state, LOOP_DOWN);
476 if (!atomic_read(&ha->loop_down_timer))
477 atomic_set(&ha->loop_down_timer,
478 LOOP_DOWN_TIME);
479 qla2x00_mark_all_devices_lost(ha, 1);
482 if (ha->parent) {
483 atomic_set(&ha->vp_state, VP_FAILED);
484 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
487 if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
488 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
490 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
492 ha->flags.gpsc_supported = 1;
493 ha->flags.management_server_logged_in = 0;
494 break;
496 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
497 if (IS_QLA2100(ha))
498 break;
500 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
501 "received.\n",
502 ha->host_no));
503 qla_printk(KERN_INFO, ha,
504 "Configuration change detected: value=%x.\n", mb[1]);
506 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
507 atomic_set(&ha->loop_state, LOOP_DOWN);
508 if (!atomic_read(&ha->loop_down_timer))
509 atomic_set(&ha->loop_down_timer,
510 LOOP_DOWN_TIME);
511 qla2x00_mark_all_devices_lost(ha, 1);
514 if (ha->parent) {
515 atomic_set(&ha->vp_state, VP_FAILED);
516 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
519 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
520 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
521 break;
523 case MBA_PORT_UPDATE: /* Port database update */
525 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
526 * event etc. earlier indicating loop is down) then process
527 * it. Otherwise ignore it and Wait for RSCN to come in.
529 atomic_set(&ha->loop_down_timer, 0);
530 if (atomic_read(&ha->loop_state) != LOOP_DOWN &&
531 atomic_read(&ha->loop_state) != LOOP_DEAD) {
532 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
533 "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1],
534 mb[2], mb[3]));
535 break;
538 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
539 ha->host_no));
540 DEBUG(printk(KERN_INFO
541 "scsi(%ld): Port database changed %04x %04x %04x.\n",
542 ha->host_no, mb[1], mb[2], mb[3]));
545 * Mark all devices as missing so we will login again.
547 atomic_set(&ha->loop_state, LOOP_UP);
549 qla2x00_mark_all_devices_lost(ha, 1);
551 ha->flags.rscn_queue_overflow = 1;
553 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
554 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
555 break;
557 case MBA_RSCN_UPDATE: /* State Change Registration */
558 /* Check if the Vport has issued a SCR */
559 if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
560 break;
562 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
563 ha->host_no));
564 DEBUG(printk(KERN_INFO
565 "scsi(%ld): RSCN database changed -- %04x %04x.\n",
566 ha->host_no, mb[1], mb[2]));
568 rscn_entry = (mb[1] << 16) | mb[2];
569 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
570 ha->d_id.b.al_pa;
571 if (rscn_entry == host_pid) {
572 DEBUG(printk(KERN_INFO
573 "scsi(%ld): Ignoring RSCN update to local host "
574 "port ID (%06x)\n",
575 ha->host_no, host_pid));
576 break;
579 rscn_queue_index = ha->rscn_in_ptr + 1;
580 if (rscn_queue_index == MAX_RSCN_COUNT)
581 rscn_queue_index = 0;
582 if (rscn_queue_index != ha->rscn_out_ptr) {
583 ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry;
584 ha->rscn_in_ptr = rscn_queue_index;
585 } else {
586 ha->flags.rscn_queue_overflow = 1;
589 atomic_set(&ha->loop_state, LOOP_UPDATE);
590 atomic_set(&ha->loop_down_timer, 0);
591 ha->flags.management_server_logged_in = 0;
593 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
594 set_bit(RSCN_UPDATE, &ha->dpc_flags);
595 break;
597 /* case MBA_RIO_RESPONSE: */
598 case MBA_ZIO_RESPONSE:
599 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
600 ha->host_no));
601 DEBUG(printk(KERN_INFO
602 "scsi(%ld): [R|Z]IO update completion.\n",
603 ha->host_no));
605 if (IS_FWI2_CAPABLE(ha))
606 qla24xx_process_response_queue(ha);
607 else
608 qla2x00_process_response_queue(ha);
609 break;
611 case MBA_DISCARD_RND_FRAME:
612 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
613 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
614 break;
616 case MBA_TRACE_NOTIFICATION:
617 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
618 ha->host_no, mb[1], mb[2]));
619 break;
622 if (!ha->parent && ha->num_vhosts)
623 qla2x00_alert_all_vps(ha, mb);
626 static void
627 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
629 fc_port_t *fcport = data;
631 if (fcport->ha->max_q_depth <= sdev->queue_depth)
632 return;
634 if (sdev->ordered_tags)
635 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
636 sdev->queue_depth + 1);
637 else
638 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
639 sdev->queue_depth + 1);
641 fcport->last_ramp_up = jiffies;
643 DEBUG2(qla_printk(KERN_INFO, fcport->ha,
644 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
645 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
646 sdev->queue_depth));
649 static void
650 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
652 fc_port_t *fcport = data;
654 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
655 return;
657 DEBUG2(qla_printk(KERN_INFO, fcport->ha,
658 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
659 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
660 sdev->queue_depth));
663 static inline void
664 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
666 fc_port_t *fcport;
667 struct scsi_device *sdev;
669 sdev = sp->cmd->device;
670 if (sdev->queue_depth >= ha->max_q_depth)
671 return;
673 fcport = sp->fcport;
674 if (time_before(jiffies,
675 fcport->last_ramp_up + ql2xqfullrampup * HZ))
676 return;
677 if (time_before(jiffies,
678 fcport->last_queue_full + ql2xqfullrampup * HZ))
679 return;
681 starget_for_each_device(sdev->sdev_target, fcport,
682 qla2x00_adjust_sdev_qdepth_up);
686 * qla2x00_process_completed_request() - Process a Fast Post response.
687 * @ha: SCSI driver HA context
688 * @index: SRB index
690 static void
691 qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
693 srb_t *sp;
695 /* Validate handle. */
696 if (index >= MAX_OUTSTANDING_COMMANDS) {
697 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
698 ha->host_no, index));
699 qla_printk(KERN_WARNING, ha,
700 "Invalid SCSI completion handle %d.\n", index);
702 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
703 return;
706 sp = ha->outstanding_cmds[index];
707 if (sp) {
708 /* Free outstanding command slot. */
709 ha->outstanding_cmds[index] = NULL;
711 CMD_COMPL_STATUS(sp->cmd) = 0L;
712 CMD_SCSI_STATUS(sp->cmd) = 0L;
714 /* Save ISP completion status */
715 sp->cmd->result = DID_OK << 16;
717 qla2x00_ramp_up_queue_depth(ha, sp);
718 qla2x00_sp_compl(ha, sp);
719 } else {
720 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
721 ha->host_no));
722 qla_printk(KERN_WARNING, ha,
723 "Invalid ISP SCSI completion handle\n");
725 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
730 * qla2x00_process_response_queue() - Process response queue entries.
731 * @ha: SCSI driver HA context
733 void
734 qla2x00_process_response_queue(struct scsi_qla_host *ha)
736 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
737 sts_entry_t *pkt;
738 uint16_t handle_cnt;
739 uint16_t cnt;
741 if (!ha->flags.online)
742 return;
744 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
745 pkt = (sts_entry_t *)ha->response_ring_ptr;
747 ha->rsp_ring_index++;
748 if (ha->rsp_ring_index == ha->response_q_length) {
749 ha->rsp_ring_index = 0;
750 ha->response_ring_ptr = ha->response_ring;
751 } else {
752 ha->response_ring_ptr++;
755 if (pkt->entry_status != 0) {
756 DEBUG3(printk(KERN_INFO
757 "scsi(%ld): Process error entry.\n", ha->host_no));
759 qla2x00_error_entry(ha, pkt);
760 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
761 wmb();
762 continue;
765 switch (pkt->entry_type) {
766 case STATUS_TYPE:
767 qla2x00_status_entry(ha, pkt);
768 break;
769 case STATUS_TYPE_21:
770 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
771 for (cnt = 0; cnt < handle_cnt; cnt++) {
772 qla2x00_process_completed_request(ha,
773 ((sts21_entry_t *)pkt)->handle[cnt]);
775 break;
776 case STATUS_TYPE_22:
777 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
778 for (cnt = 0; cnt < handle_cnt; cnt++) {
779 qla2x00_process_completed_request(ha,
780 ((sts22_entry_t *)pkt)->handle[cnt]);
782 break;
783 case STATUS_CONT_TYPE:
784 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
785 break;
786 case MS_IOCB_TYPE:
787 qla2x00_ms_entry(ha, (ms_iocb_entry_t *)pkt);
788 break;
789 default:
790 /* Type Not Supported. */
791 DEBUG4(printk(KERN_WARNING
792 "scsi(%ld): Received unknown response pkt type %x "
793 "entry status=%x.\n",
794 ha->host_no, pkt->entry_type, pkt->entry_status));
795 break;
797 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
798 wmb();
801 /* Adjust ring index */
802 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index);
806 * qla2x00_status_entry() - Process a Status IOCB entry.
807 * @ha: SCSI driver HA context
808 * @pkt: Entry pointer
810 static void
811 qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
813 srb_t *sp;
814 fc_port_t *fcport;
815 struct scsi_cmnd *cp;
816 sts_entry_t *sts;
817 struct sts_entry_24xx *sts24;
818 uint16_t comp_status;
819 uint16_t scsi_status;
820 uint8_t lscsi_status;
821 int32_t resid;
822 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
823 uint8_t *rsp_info, *sense_data;
825 sts = (sts_entry_t *) pkt;
826 sts24 = (struct sts_entry_24xx *) pkt;
827 if (IS_FWI2_CAPABLE(ha)) {
828 comp_status = le16_to_cpu(sts24->comp_status);
829 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
830 } else {
831 comp_status = le16_to_cpu(sts->comp_status);
832 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
835 /* Fast path completion. */
836 if (comp_status == CS_COMPLETE && scsi_status == 0) {
837 qla2x00_process_completed_request(ha, sts->handle);
839 return;
842 /* Validate handle. */
843 if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
844 sp = ha->outstanding_cmds[sts->handle];
845 ha->outstanding_cmds[sts->handle] = NULL;
846 } else
847 sp = NULL;
849 if (sp == NULL) {
850 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
851 ha->host_no));
852 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
854 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
855 qla2xxx_wake_dpc(ha);
856 return;
858 cp = sp->cmd;
859 if (cp == NULL) {
860 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
861 "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp));
862 qla_printk(KERN_WARNING, ha,
863 "Command is NULL: already returned to OS (sp=%p)\n", sp);
865 return;
868 lscsi_status = scsi_status & STATUS_MASK;
869 CMD_ENTRY_STATUS(cp) = sts->entry_status;
870 CMD_COMPL_STATUS(cp) = comp_status;
871 CMD_SCSI_STATUS(cp) = scsi_status;
873 fcport = sp->fcport;
875 sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
876 if (IS_FWI2_CAPABLE(ha)) {
877 sense_len = le32_to_cpu(sts24->sense_len);
878 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
879 resid_len = le32_to_cpu(sts24->rsp_residual_count);
880 fw_resid_len = le32_to_cpu(sts24->residual_len);
881 rsp_info = sts24->data;
882 sense_data = sts24->data;
883 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
884 } else {
885 sense_len = le16_to_cpu(sts->req_sense_length);
886 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
887 resid_len = le32_to_cpu(sts->residual_length);
888 rsp_info = sts->rsp_info;
889 sense_data = sts->req_sense_data;
892 /* Check for any FCP transport errors. */
893 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
894 /* Sense data lies beyond any FCP RESPONSE data. */
895 if (IS_FWI2_CAPABLE(ha))
896 sense_data += rsp_info_len;
897 if (rsp_info_len > 3 && rsp_info[3]) {
898 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
899 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
900 "retrying command\n", ha->host_no,
901 cp->device->channel, cp->device->id,
902 cp->device->lun, rsp_info_len, rsp_info[0],
903 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
904 rsp_info[5], rsp_info[6], rsp_info[7]));
906 cp->result = DID_BUS_BUSY << 16;
907 qla2x00_sp_compl(ha, sp);
908 return;
913 * Based on Host and scsi status generate status code for Linux
915 switch (comp_status) {
916 case CS_COMPLETE:
917 case CS_QUEUE_FULL:
918 if (scsi_status == 0) {
919 cp->result = DID_OK << 16;
920 break;
922 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
923 resid = resid_len;
924 scsi_set_resid(cp, resid);
925 CMD_RESID_LEN(cp) = resid;
927 if (!lscsi_status &&
928 ((unsigned)(scsi_bufflen(cp) - resid) <
929 cp->underflow)) {
930 qla_printk(KERN_INFO, ha,
931 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
932 "detected (%x of %x bytes)...returning "
933 "error status.\n", ha->host_no,
934 cp->device->channel, cp->device->id,
935 cp->device->lun, resid,
936 scsi_bufflen(cp));
938 cp->result = DID_ERROR << 16;
939 break;
942 cp->result = DID_OK << 16 | lscsi_status;
944 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
945 DEBUG2(printk(KERN_INFO
946 "scsi(%ld): QUEUE FULL status detected "
947 "0x%x-0x%x.\n", ha->host_no, comp_status,
948 scsi_status));
950 /* Adjust queue depth for all luns on the port. */
951 fcport->last_queue_full = jiffies;
952 starget_for_each_device(cp->device->sdev_target,
953 fcport, qla2x00_adjust_sdev_qdepth_down);
954 break;
956 if (lscsi_status != SS_CHECK_CONDITION)
957 break;
959 /* Copy Sense Data into sense buffer. */
960 memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer));
962 if (!(scsi_status & SS_SENSE_LEN_VALID))
963 break;
965 if (sense_len >= sizeof(cp->sense_buffer))
966 sense_len = sizeof(cp->sense_buffer);
968 CMD_ACTUAL_SNSLEN(cp) = sense_len;
969 sp->request_sense_length = sense_len;
970 sp->request_sense_ptr = cp->sense_buffer;
972 if (sp->request_sense_length > 32)
973 sense_len = 32;
975 memcpy(cp->sense_buffer, sense_data, sense_len);
977 sp->request_sense_ptr += sense_len;
978 sp->request_sense_length -= sense_len;
979 if (sp->request_sense_length != 0)
980 ha->status_srb = sp;
982 DEBUG5(printk("%s(): Check condition Sense data, "
983 "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n", __func__,
984 ha->host_no, cp->device->channel, cp->device->id,
985 cp->device->lun, cp, cp->serial_number));
986 if (sense_len)
987 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
988 CMD_ACTUAL_SNSLEN(cp)));
989 break;
991 case CS_DATA_UNDERRUN:
992 resid = resid_len;
993 /* Use F/W calculated residual length. */
994 if (IS_FWI2_CAPABLE(ha))
995 resid = fw_resid_len;
997 if (scsi_status & SS_RESIDUAL_UNDER) {
998 scsi_set_resid(cp, resid);
999 CMD_RESID_LEN(cp) = resid;
1000 } else {
1001 DEBUG2(printk(KERN_INFO
1002 "scsi(%ld:%d:%d) UNDERRUN status detected "
1003 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1004 "os_underflow=0x%x\n", ha->host_no,
1005 cp->device->id, cp->device->lun, comp_status,
1006 scsi_status, resid_len, resid, cp->cmnd[0],
1007 cp->underflow));
1012 * Check to see if SCSI Status is non zero. If so report SCSI
1013 * Status.
1015 if (lscsi_status != 0) {
1016 cp->result = DID_OK << 16 | lscsi_status;
1018 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1019 DEBUG2(printk(KERN_INFO
1020 "scsi(%ld): QUEUE FULL status detected "
1021 "0x%x-0x%x.\n", ha->host_no, comp_status,
1022 scsi_status));
1025 * Adjust queue depth for all luns on the
1026 * port.
1028 fcport->last_queue_full = jiffies;
1029 starget_for_each_device(
1030 cp->device->sdev_target, fcport,
1031 qla2x00_adjust_sdev_qdepth_down);
1032 break;
1034 if (lscsi_status != SS_CHECK_CONDITION)
1035 break;
1037 /* Copy Sense Data into sense buffer */
1038 memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer));
1040 if (!(scsi_status & SS_SENSE_LEN_VALID))
1041 break;
1043 if (sense_len >= sizeof(cp->sense_buffer))
1044 sense_len = sizeof(cp->sense_buffer);
1046 CMD_ACTUAL_SNSLEN(cp) = sense_len;
1047 sp->request_sense_length = sense_len;
1048 sp->request_sense_ptr = cp->sense_buffer;
1050 if (sp->request_sense_length > 32)
1051 sense_len = 32;
1053 memcpy(cp->sense_buffer, sense_data, sense_len);
1055 sp->request_sense_ptr += sense_len;
1056 sp->request_sense_length -= sense_len;
1057 if (sp->request_sense_length != 0)
1058 ha->status_srb = sp;
1060 DEBUG5(printk("%s(): Check condition Sense data, "
1061 "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n",
1062 __func__, ha->host_no, cp->device->channel,
1063 cp->device->id, cp->device->lun, cp,
1064 cp->serial_number));
1067 * In case of a Underrun condition, set both the lscsi
1068 * status and the completion status to appropriate
1069 * values.
1071 if (resid &&
1072 ((unsigned)(scsi_bufflen(cp) - resid) <
1073 cp->underflow)) {
1074 DEBUG2(qla_printk(KERN_INFO, ha,
1075 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1076 "detected (%x of %x bytes)...returning "
1077 "error status.\n", ha->host_no,
1078 cp->device->channel, cp->device->id,
1079 cp->device->lun, resid,
1080 scsi_bufflen(cp)));
1082 cp->result = DID_ERROR << 16 | lscsi_status;
1085 if (sense_len)
1086 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
1087 CMD_ACTUAL_SNSLEN(cp)));
1088 } else {
1090 * If RISC reports underrun and target does not report
1091 * it then we must have a lost frame, so tell upper
1092 * layer to retry it by reporting a bus busy.
1094 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1095 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1096 "frame(s) detected (%x of %x bytes)..."
1097 "retrying command.\n", ha->host_no,
1098 cp->device->channel, cp->device->id,
1099 cp->device->lun, resid,
1100 scsi_bufflen(cp)));
1102 cp->result = DID_BUS_BUSY << 16;
1103 break;
1106 /* Handle mid-layer underflow */
1107 if ((unsigned)(scsi_bufflen(cp) - resid) <
1108 cp->underflow) {
1109 qla_printk(KERN_INFO, ha,
1110 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1111 "detected (%x of %x bytes)...returning "
1112 "error status.\n", ha->host_no,
1113 cp->device->channel, cp->device->id,
1114 cp->device->lun, resid,
1115 scsi_bufflen(cp));
1117 cp->result = DID_ERROR << 16;
1118 break;
1121 /* Everybody online, looking good... */
1122 cp->result = DID_OK << 16;
1124 break;
1126 case CS_DATA_OVERRUN:
1127 DEBUG2(printk(KERN_INFO
1128 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1129 ha->host_no, cp->device->id, cp->device->lun, comp_status,
1130 scsi_status));
1131 DEBUG2(printk(KERN_INFO
1132 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1133 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1134 cp->cmnd[4], cp->cmnd[5]));
1135 DEBUG2(printk(KERN_INFO
1136 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1137 "status!\n",
1138 cp->serial_number, scsi_bufflen(cp), resid_len));
1140 cp->result = DID_ERROR << 16;
1141 break;
1143 case CS_PORT_LOGGED_OUT:
1144 case CS_PORT_CONFIG_CHG:
1145 case CS_PORT_BUSY:
1146 case CS_INCOMPLETE:
1147 case CS_PORT_UNAVAILABLE:
1149 * If the port is in Target Down state, return all IOs for this
1150 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1151 * retry_queue.
1153 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1154 "pid=%ld, compl status=0x%x, port state=0x%x\n",
1155 ha->host_no, cp->device->id, cp->device->lun,
1156 cp->serial_number, comp_status,
1157 atomic_read(&fcport->state)));
1159 cp->result = DID_BUS_BUSY << 16;
1160 if (atomic_read(&fcport->state) == FCS_ONLINE) {
1161 qla2x00_mark_device_lost(ha, fcport, 1, 1);
1163 break;
1165 case CS_RESET:
1166 DEBUG2(printk(KERN_INFO
1167 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1168 ha->host_no, comp_status, scsi_status));
1170 cp->result = DID_RESET << 16;
1171 break;
1173 case CS_ABORTED:
1175 * hv2.19.12 - DID_ABORT does not retry the request if we
1176 * aborted this request then abort otherwise it must be a
1177 * reset.
1179 DEBUG2(printk(KERN_INFO
1180 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1181 ha->host_no, comp_status, scsi_status));
1183 cp->result = DID_RESET << 16;
1184 break;
1186 case CS_TIMEOUT:
1187 cp->result = DID_BUS_BUSY << 16;
1189 if (IS_FWI2_CAPABLE(ha)) {
1190 DEBUG2(printk(KERN_INFO
1191 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1192 "0x%x-0x%x\n", ha->host_no, cp->device->channel,
1193 cp->device->id, cp->device->lun, comp_status,
1194 scsi_status));
1195 break;
1197 DEBUG2(printk(KERN_INFO
1198 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1199 "sflags=%x.\n", ha->host_no, cp->device->channel,
1200 cp->device->id, cp->device->lun, comp_status, scsi_status,
1201 le16_to_cpu(sts->status_flags)));
1203 /* Check to see if logout occurred. */
1204 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1205 qla2x00_mark_device_lost(ha, fcport, 1, 1);
1206 break;
1208 default:
1209 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1210 "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status));
1211 qla_printk(KERN_INFO, ha,
1212 "Unknown status detected 0x%x-0x%x.\n",
1213 comp_status, scsi_status);
1215 cp->result = DID_ERROR << 16;
1216 break;
1219 /* Place command on done queue. */
1220 if (ha->status_srb == NULL)
1221 qla2x00_sp_compl(ha, sp);
1225 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1226 * @ha: SCSI driver HA context
1227 * @pkt: Entry pointer
1229 * Extended sense data.
1231 static void
1232 qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1234 uint8_t sense_sz = 0;
1235 srb_t *sp = ha->status_srb;
1236 struct scsi_cmnd *cp;
1238 if (sp != NULL && sp->request_sense_length != 0) {
1239 cp = sp->cmd;
1240 if (cp == NULL) {
1241 DEBUG2(printk("%s(): Cmd already returned back to OS "
1242 "sp=%p.\n", __func__, sp));
1243 qla_printk(KERN_INFO, ha,
1244 "cmd is NULL: already returned to OS (sp=%p)\n",
1245 sp);
1247 ha->status_srb = NULL;
1248 return;
1251 if (sp->request_sense_length > sizeof(pkt->data)) {
1252 sense_sz = sizeof(pkt->data);
1253 } else {
1254 sense_sz = sp->request_sense_length;
1257 /* Move sense data. */
1258 if (IS_FWI2_CAPABLE(ha))
1259 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1260 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1261 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1263 sp->request_sense_ptr += sense_sz;
1264 sp->request_sense_length -= sense_sz;
1266 /* Place command on done queue. */
1267 if (sp->request_sense_length == 0) {
1268 ha->status_srb = NULL;
1269 qla2x00_sp_compl(ha, sp);
1275 * qla2x00_error_entry() - Process an error entry.
1276 * @ha: SCSI driver HA context
1277 * @pkt: Entry pointer
1279 static void
1280 qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1282 srb_t *sp;
1284 #if defined(QL_DEBUG_LEVEL_2)
1285 if (pkt->entry_status & RF_INV_E_ORDER)
1286 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1287 else if (pkt->entry_status & RF_INV_E_COUNT)
1288 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1289 else if (pkt->entry_status & RF_INV_E_PARAM)
1290 qla_printk(KERN_ERR, ha,
1291 "%s: Invalid Entry Parameter\n", __func__);
1292 else if (pkt->entry_status & RF_INV_E_TYPE)
1293 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1294 else if (pkt->entry_status & RF_BUSY)
1295 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1296 else
1297 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1298 #endif
1300 /* Validate handle. */
1301 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1302 sp = ha->outstanding_cmds[pkt->handle];
1303 else
1304 sp = NULL;
1306 if (sp) {
1307 /* Free outstanding command slot. */
1308 ha->outstanding_cmds[pkt->handle] = NULL;
1310 /* Bad payload or header */
1311 if (pkt->entry_status &
1312 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1313 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1314 sp->cmd->result = DID_ERROR << 16;
1315 } else if (pkt->entry_status & RF_BUSY) {
1316 sp->cmd->result = DID_BUS_BUSY << 16;
1317 } else {
1318 sp->cmd->result = DID_ERROR << 16;
1320 qla2x00_sp_compl(ha, sp);
1322 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1323 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1324 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1325 ha->host_no));
1326 qla_printk(KERN_WARNING, ha,
1327 "Error entry - invalid handle\n");
1329 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1330 qla2xxx_wake_dpc(ha);
1335 * qla2x00_ms_entry() - Process a Management Server entry.
1336 * @ha: SCSI driver HA context
1337 * @index: Response queue out pointer
1339 static void
1340 qla2x00_ms_entry(scsi_qla_host_t *ha, ms_iocb_entry_t *pkt)
1342 srb_t *sp;
1344 DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
1345 __func__, ha->host_no, pkt, pkt->handle1));
1347 /* Validate handle. */
1348 if (pkt->handle1 < MAX_OUTSTANDING_COMMANDS)
1349 sp = ha->outstanding_cmds[pkt->handle1];
1350 else
1351 sp = NULL;
1353 if (sp == NULL) {
1354 DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
1355 ha->host_no));
1356 qla_printk(KERN_WARNING, ha, "MS entry - invalid handle\n");
1358 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1359 return;
1362 CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->status);
1363 CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
1365 /* Free outstanding command slot. */
1366 ha->outstanding_cmds[pkt->handle1] = NULL;
1368 qla2x00_sp_compl(ha, sp);
1373 * qla24xx_mbx_completion() - Process mailbox command completions.
1374 * @ha: SCSI driver HA context
1375 * @mb0: Mailbox0 register
1377 static void
1378 qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1380 uint16_t cnt;
1381 uint16_t __iomem *wptr;
1382 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1384 /* Load return mailbox registers. */
1385 ha->flags.mbox_int = 1;
1386 ha->mailbox_out[0] = mb0;
1387 wptr = (uint16_t __iomem *)&reg->mailbox1;
1389 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1390 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1391 wptr++;
1394 if (ha->mcp) {
1395 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1396 __func__, ha->host_no, ha->mcp->mb[0]));
1397 } else {
1398 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1399 __func__, ha->host_no));
1404 * qla24xx_process_response_queue() - Process response queue entries.
1405 * @ha: SCSI driver HA context
1407 void
1408 qla24xx_process_response_queue(struct scsi_qla_host *ha)
1410 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1411 struct sts_entry_24xx *pkt;
1413 if (!ha->flags.online)
1414 return;
1416 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
1417 pkt = (struct sts_entry_24xx *)ha->response_ring_ptr;
1419 ha->rsp_ring_index++;
1420 if (ha->rsp_ring_index == ha->response_q_length) {
1421 ha->rsp_ring_index = 0;
1422 ha->response_ring_ptr = ha->response_ring;
1423 } else {
1424 ha->response_ring_ptr++;
1427 if (pkt->entry_status != 0) {
1428 DEBUG3(printk(KERN_INFO
1429 "scsi(%ld): Process error entry.\n", ha->host_no));
1431 qla2x00_error_entry(ha, (sts_entry_t *) pkt);
1432 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1433 wmb();
1434 continue;
1437 switch (pkt->entry_type) {
1438 case STATUS_TYPE:
1439 qla2x00_status_entry(ha, pkt);
1440 break;
1441 case STATUS_CONT_TYPE:
1442 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
1443 break;
1444 case MS_IOCB_TYPE:
1445 qla24xx_ms_entry(ha, (struct ct_entry_24xx *)pkt);
1446 break;
1447 case VP_RPT_ID_IOCB_TYPE:
1448 qla24xx_report_id_acquisition(ha,
1449 (struct vp_rpt_id_entry_24xx *)pkt);
1450 break;
1451 default:
1452 /* Type Not Supported. */
1453 DEBUG4(printk(KERN_WARNING
1454 "scsi(%ld): Received unknown response pkt type %x "
1455 "entry status=%x.\n",
1456 ha->host_no, pkt->entry_type, pkt->entry_status));
1457 break;
1459 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1460 wmb();
1463 /* Adjust ring index */
1464 WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index);
1468 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1469 * @irq:
1470 * @dev_id: SCSI driver HA context
1472 * Called by system whenever the host adapter generates an interrupt.
1474 * Returns handled flag.
1476 irqreturn_t
1477 qla24xx_intr_handler(int irq, void *dev_id)
1479 scsi_qla_host_t *ha;
1480 struct device_reg_24xx __iomem *reg;
1481 int status;
1482 unsigned long flags;
1483 unsigned long iter;
1484 uint32_t stat;
1485 uint32_t hccr;
1486 uint16_t mb[4];
1488 ha = (scsi_qla_host_t *) dev_id;
1489 if (!ha) {
1490 printk(KERN_INFO
1491 "%s(): NULL host pointer\n", __func__);
1492 return IRQ_NONE;
1495 reg = &ha->iobase->isp24;
1496 status = 0;
1498 spin_lock_irqsave(&ha->hardware_lock, flags);
1499 for (iter = 50; iter--; ) {
1500 stat = RD_REG_DWORD(&reg->host_status);
1501 if (stat & HSRX_RISC_PAUSED) {
1502 hccr = RD_REG_DWORD(&reg->hccr);
1504 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1505 "Dumping firmware!\n", hccr);
1506 ha->isp_ops->fw_dump(ha, 1);
1507 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1508 break;
1509 } else if ((stat & HSRX_RISC_INT) == 0)
1510 break;
1512 switch (stat & 0xff) {
1513 case 0x1:
1514 case 0x2:
1515 case 0x10:
1516 case 0x11:
1517 qla24xx_mbx_completion(ha, MSW(stat));
1518 status |= MBX_INTERRUPT;
1520 break;
1521 case 0x12:
1522 mb[0] = MSW(stat);
1523 mb[1] = RD_REG_WORD(&reg->mailbox1);
1524 mb[2] = RD_REG_WORD(&reg->mailbox2);
1525 mb[3] = RD_REG_WORD(&reg->mailbox3);
1526 qla2x00_async_event(ha, mb);
1527 break;
1528 case 0x13:
1529 qla24xx_process_response_queue(ha);
1530 break;
1531 default:
1532 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1533 "(%d).\n",
1534 ha->host_no, stat & 0xff));
1535 break;
1537 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1538 RD_REG_DWORD_RELAXED(&reg->hccr);
1540 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1542 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1543 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1544 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1545 up(&ha->mbx_intr_sem);
1548 return IRQ_HANDLED;
1552 * qla24xx_ms_entry() - Process a Management Server entry.
1553 * @ha: SCSI driver HA context
1554 * @index: Response queue out pointer
1556 static void
1557 qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt)
1559 srb_t *sp;
1561 DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
1562 __func__, ha->host_no, pkt, pkt->handle));
1564 DEBUG9(printk("%s: ct pkt dump:\n", __func__));
1565 DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx)));
1567 /* Validate handle. */
1568 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1569 sp = ha->outstanding_cmds[pkt->handle];
1570 else
1571 sp = NULL;
1573 if (sp == NULL) {
1574 DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
1575 ha->host_no));
1576 DEBUG10(printk("scsi(%ld): MS entry - invalid handle\n",
1577 ha->host_no));
1578 qla_printk(KERN_WARNING, ha, "MS entry - invalid handle %d\n",
1579 pkt->handle);
1581 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1582 return;
1585 CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->comp_status);
1586 CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
1588 /* Free outstanding command slot. */
1589 ha->outstanding_cmds[pkt->handle] = NULL;
1591 qla2x00_sp_compl(ha, sp);
1594 static irqreturn_t
1595 qla24xx_msix_rsp_q(int irq, void *dev_id)
1597 scsi_qla_host_t *ha;
1598 struct device_reg_24xx __iomem *reg;
1599 unsigned long flags;
1601 ha = dev_id;
1602 reg = &ha->iobase->isp24;
1604 spin_lock_irqsave(&ha->hardware_lock, flags);
1606 qla24xx_process_response_queue(ha);
1608 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1609 RD_REG_DWORD_RELAXED(&reg->hccr);
1611 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1613 return IRQ_HANDLED;
1616 static irqreturn_t
1617 qla24xx_msix_default(int irq, void *dev_id)
1619 scsi_qla_host_t *ha;
1620 struct device_reg_24xx __iomem *reg;
1621 int status;
1622 unsigned long flags;
1623 unsigned long iter;
1624 uint32_t stat;
1625 uint32_t hccr;
1626 uint16_t mb[4];
1628 ha = dev_id;
1629 reg = &ha->iobase->isp24;
1630 status = 0;
1632 spin_lock_irqsave(&ha->hardware_lock, flags);
1633 for (iter = 50; iter--; ) {
1634 stat = RD_REG_DWORD(&reg->host_status);
1635 if (stat & HSRX_RISC_PAUSED) {
1636 hccr = RD_REG_DWORD(&reg->hccr);
1638 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1639 "Dumping firmware!\n", hccr);
1640 ha->isp_ops->fw_dump(ha, 1);
1641 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1642 break;
1643 } else if ((stat & HSRX_RISC_INT) == 0)
1644 break;
1646 switch (stat & 0xff) {
1647 case 0x1:
1648 case 0x2:
1649 case 0x10:
1650 case 0x11:
1651 qla24xx_mbx_completion(ha, MSW(stat));
1652 status |= MBX_INTERRUPT;
1654 break;
1655 case 0x12:
1656 mb[0] = MSW(stat);
1657 mb[1] = RD_REG_WORD(&reg->mailbox1);
1658 mb[2] = RD_REG_WORD(&reg->mailbox2);
1659 mb[3] = RD_REG_WORD(&reg->mailbox3);
1660 qla2x00_async_event(ha, mb);
1661 break;
1662 case 0x13:
1663 qla24xx_process_response_queue(ha);
1664 break;
1665 default:
1666 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1667 "(%d).\n",
1668 ha->host_no, stat & 0xff));
1669 break;
1671 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1672 RD_REG_DWORD_RELAXED(&reg->hccr);
1674 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1676 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1677 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1678 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1679 up(&ha->mbx_intr_sem);
1682 return IRQ_HANDLED;
1685 /* Interrupt handling helpers. */
1687 struct qla_init_msix_entry {
1688 uint16_t entry;
1689 uint16_t index;
1690 const char *name;
1691 irq_handler_t handler;
1694 static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
1695 { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
1696 "qla2xxx (default)", qla24xx_msix_default },
1698 { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
1699 "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1702 static void
1703 qla24xx_disable_msix(scsi_qla_host_t *ha)
1705 int i;
1706 struct qla_msix_entry *qentry;
1708 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1709 qentry = &ha->msix_entries[imsix_entries[i].index];
1710 if (qentry->have_irq)
1711 free_irq(qentry->msix_vector, ha);
1713 pci_disable_msix(ha->pdev);
1716 static int
1717 qla24xx_enable_msix(scsi_qla_host_t *ha)
1719 int i, ret;
1720 struct msix_entry entries[QLA_MSIX_ENTRIES];
1721 struct qla_msix_entry *qentry;
1723 for (i = 0; i < QLA_MSIX_ENTRIES; i++)
1724 entries[i].entry = imsix_entries[i].entry;
1726 ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries));
1727 if (ret) {
1728 qla_printk(KERN_WARNING, ha,
1729 "MSI-X: Failed to enable support -- %d/%d\n",
1730 QLA_MSIX_ENTRIES, ret);
1731 goto msix_out;
1733 ha->flags.msix_enabled = 1;
1735 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1736 qentry = &ha->msix_entries[imsix_entries[i].index];
1737 qentry->msix_vector = entries[i].vector;
1738 qentry->msix_entry = entries[i].entry;
1739 qentry->have_irq = 0;
1740 ret = request_irq(qentry->msix_vector,
1741 imsix_entries[i].handler, 0, imsix_entries[i].name, ha);
1742 if (ret) {
1743 qla_printk(KERN_WARNING, ha,
1744 "MSI-X: Unable to register handler -- %x/%d.\n",
1745 imsix_entries[i].index, ret);
1746 qla24xx_disable_msix(ha);
1747 goto msix_out;
1749 qentry->have_irq = 1;
1752 msix_out:
1753 return ret;
1757 qla2x00_request_irqs(scsi_qla_host_t *ha)
1759 int ret;
1761 /* If possible, enable MSI-X. */
1762 if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
1763 goto skip_msix;
1765 if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
1766 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1767 DEBUG2(qla_printk(KERN_WARNING, ha,
1768 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1769 ha->chip_revision, ha->fw_attributes));
1771 goto skip_msix;
1774 ret = qla24xx_enable_msix(ha);
1775 if (!ret) {
1776 DEBUG2(qla_printk(KERN_INFO, ha,
1777 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1778 ha->fw_attributes));
1779 return ret;
1781 qla_printk(KERN_WARNING, ha,
1782 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1783 skip_msix:
1785 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha))
1786 goto skip_msi;
1788 ret = pci_enable_msi(ha->pdev);
1789 if (!ret) {
1790 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1791 ha->flags.msi_enabled = 1;
1793 skip_msi:
1795 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1796 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1797 if (!ret) {
1798 ha->flags.inta_enabled = 1;
1799 ha->host->irq = ha->pdev->irq;
1800 } else {
1801 qla_printk(KERN_WARNING, ha,
1802 "Failed to reserve interrupt %d already in use.\n",
1803 ha->pdev->irq);
1806 return ret;
1809 void
1810 qla2x00_free_irqs(scsi_qla_host_t *ha)
1813 if (ha->flags.msix_enabled)
1814 qla24xx_disable_msix(ha);
1815 else if (ha->flags.inta_enabled) {
1816 free_irq(ha->host->irq, ha);
1817 pci_disable_msi(ha->pdev);