1 /* qlogicpti.c: Performance Technologies QlogicISP sbus card driver.
3 * Copyright (C) 1996, 2006 David S. Miller (davem@davemloft.net)
5 * A lot of this driver was directly stolen from Erik H. Moe's PCI
6 * Qlogic ISP driver. Mucho kudos to him for this code.
8 * An even bigger kudos to John Grana at Performance Technologies
9 * for providing me with the hardware to write this driver, you rule
12 * May, 2, 1997: Added support for QLGC,isp --jj
15 #include <linux/kernel.h>
16 #include <linux/delay.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/proc_fs.h>
22 #include <linux/stat.h>
23 #include <linux/init.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/jiffies.h>
29 #include <asm/byteorder.h>
31 #include "qlogicpti.h"
35 #include <asm/system.h>
36 #include <asm/ptrace.h>
37 #include <asm/pgtable.h>
38 #include <asm/oplib.h>
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_eh.h>
46 #include <scsi/scsi_tcq.h>
47 #include <scsi/scsi_host.h>
49 #define MAX_TARGETS 16
50 #define MAX_LUNS 8 /* 32 for 1.31 F/W */
52 #define DEFAULT_LOOP_COUNT 10000
54 #include "qlogicpti_asm.c"
56 static struct qlogicpti
*qptichain
= NULL
;
57 static DEFINE_SPINLOCK(qptichain_lock
);
59 #define PACKB(a, b) (((a)<<4)|(b))
61 static const u_char mbox_param
[] = {
62 PACKB(1, 1), /* MBOX_NO_OP */
63 PACKB(5, 5), /* MBOX_LOAD_RAM */
64 PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */
65 PACKB(5, 5), /* MBOX_DUMP_RAM */
66 PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */
67 PACKB(2, 3), /* MBOX_READ_RAM_WORD */
68 PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */
69 PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */
70 PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */
71 PACKB(0, 0), /* 0x0009 */
72 PACKB(0, 0), /* 0x000a */
73 PACKB(0, 0), /* 0x000b */
74 PACKB(0, 0), /* 0x000c */
75 PACKB(0, 0), /* 0x000d */
76 PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */
77 PACKB(0, 0), /* 0x000f */
78 PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */
79 PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */
80 PACKB(4, 4), /* MBOX_EXECUTE_IOCB */
81 PACKB(2, 2), /* MBOX_WAKE_UP */
82 PACKB(1, 6), /* MBOX_STOP_FIRMWARE */
83 PACKB(4, 4), /* MBOX_ABORT */
84 PACKB(2, 2), /* MBOX_ABORT_DEVICE */
85 PACKB(3, 3), /* MBOX_ABORT_TARGET */
86 PACKB(2, 2), /* MBOX_BUS_RESET */
87 PACKB(2, 3), /* MBOX_STOP_QUEUE */
88 PACKB(2, 3), /* MBOX_START_QUEUE */
89 PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */
90 PACKB(2, 3), /* MBOX_ABORT_QUEUE */
91 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */
92 PACKB(0, 0), /* 0x001e */
93 PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */
94 PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */
95 PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */
96 PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */
97 PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */
98 PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */
99 PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */
100 PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */
101 PACKB(1, 3), /* MBOX_GET_SBUS_PARAMS */
102 PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */
103 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */
104 PACKB(0, 0), /* 0x002a */
105 PACKB(0, 0), /* 0x002b */
106 PACKB(0, 0), /* 0x002c */
107 PACKB(0, 0), /* 0x002d */
108 PACKB(0, 0), /* 0x002e */
109 PACKB(0, 0), /* 0x002f */
110 PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */
111 PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */
112 PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */
113 PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */
114 PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */
115 PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */
116 PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */
117 PACKB(3, 3), /* MBOX_SET_SBUS_CONTROL_PARAMS */
118 PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */
119 PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */
120 PACKB(0, 0), /* 0x003a */
121 PACKB(0, 0), /* 0x003b */
122 PACKB(0, 0), /* 0x003c */
123 PACKB(0, 0), /* 0x003d */
124 PACKB(0, 0), /* 0x003e */
125 PACKB(0, 0), /* 0x003f */
126 PACKB(0, 0), /* 0x0040 */
127 PACKB(0, 0), /* 0x0041 */
128 PACKB(0, 0) /* 0x0042 */
131 #define MAX_MBOX_COMMAND ARRAY_SIZE(mbox_param)
133 /* queue length's _must_ be power of two: */
134 #define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql))
135 #define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \
136 QLOGICPTI_REQ_QUEUE_LEN)
137 #define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN)
139 static inline void qlogicpti_enable_irqs(struct qlogicpti
*qpti
)
141 sbus_writew(SBUS_CTRL_ERIRQ
| SBUS_CTRL_GENAB
,
142 qpti
->qregs
+ SBUS_CTRL
);
145 static inline void qlogicpti_disable_irqs(struct qlogicpti
*qpti
)
147 sbus_writew(0, qpti
->qregs
+ SBUS_CTRL
);
150 static inline void set_sbus_cfg1(struct qlogicpti
*qpti
)
153 u8 bursts
= qpti
->bursts
;
155 #if 0 /* It appears that at least PTI cards do not support
156 * 64-byte bursts and that setting the B64 bit actually
157 * is a nop and the chip ends up using the smallest burst
160 if (sbus_can_burst64(qpti
->sdev
) && (bursts
& DMA_BURST64
)) {
161 val
= (SBUS_CFG1_BENAB
| SBUS_CFG1_B64
);
164 if (bursts
& DMA_BURST32
) {
165 val
= (SBUS_CFG1_BENAB
| SBUS_CFG1_B32
);
166 } else if (bursts
& DMA_BURST16
) {
167 val
= (SBUS_CFG1_BENAB
| SBUS_CFG1_B16
);
168 } else if (bursts
& DMA_BURST8
) {
169 val
= (SBUS_CFG1_BENAB
| SBUS_CFG1_B8
);
171 val
= 0; /* No sbus bursts for you... */
173 sbus_writew(val
, qpti
->qregs
+ SBUS_CFG1
);
176 static int qlogicpti_mbox_command(struct qlogicpti
*qpti
, u_short param
[], int force
)
181 if (mbox_param
[param
[0]] == 0)
184 /* Set SBUS semaphore. */
185 tmp
= sbus_readw(qpti
->qregs
+ SBUS_SEMAPHORE
);
186 tmp
|= SBUS_SEMAPHORE_LCK
;
187 sbus_writew(tmp
, qpti
->qregs
+ SBUS_SEMAPHORE
);
189 /* Wait for host IRQ bit to clear. */
190 loop_count
= DEFAULT_LOOP_COUNT
;
191 while (--loop_count
&& (sbus_readw(qpti
->qregs
+ HCCTRL
) & HCCTRL_HIRQ
)) {
196 printk(KERN_EMERG
"qlogicpti%d: mbox_command loop timeout #1\n",
199 /* Write mailbox command registers. */
200 switch (mbox_param
[param
[0]] >> 4) {
201 case 6: sbus_writew(param
[5], qpti
->qregs
+ MBOX5
);
202 case 5: sbus_writew(param
[4], qpti
->qregs
+ MBOX4
);
203 case 4: sbus_writew(param
[3], qpti
->qregs
+ MBOX3
);
204 case 3: sbus_writew(param
[2], qpti
->qregs
+ MBOX2
);
205 case 2: sbus_writew(param
[1], qpti
->qregs
+ MBOX1
);
206 case 1: sbus_writew(param
[0], qpti
->qregs
+ MBOX0
);
209 /* Clear RISC interrupt. */
210 tmp
= sbus_readw(qpti
->qregs
+ HCCTRL
);
212 sbus_writew(tmp
, qpti
->qregs
+ HCCTRL
);
214 /* Clear SBUS semaphore. */
215 sbus_writew(0, qpti
->qregs
+ SBUS_SEMAPHORE
);
217 /* Set HOST interrupt. */
218 tmp
= sbus_readw(qpti
->qregs
+ HCCTRL
);
220 sbus_writew(tmp
, qpti
->qregs
+ HCCTRL
);
222 /* Wait for HOST interrupt clears. */
223 loop_count
= DEFAULT_LOOP_COUNT
;
224 while (--loop_count
&&
225 (sbus_readw(qpti
->qregs
+ HCCTRL
) & HCCTRL_CRIRQ
))
228 printk(KERN_EMERG
"qlogicpti%d: mbox_command[%04x] loop timeout #2\n",
229 qpti
->qpti_id
, param
[0]);
231 /* Wait for SBUS semaphore to get set. */
232 loop_count
= DEFAULT_LOOP_COUNT
;
233 while (--loop_count
&&
234 !(sbus_readw(qpti
->qregs
+ SBUS_SEMAPHORE
) & SBUS_SEMAPHORE_LCK
)) {
237 /* Workaround for some buggy chips. */
238 if (sbus_readw(qpti
->qregs
+ MBOX0
) & 0x4000)
242 printk(KERN_EMERG
"qlogicpti%d: mbox_command[%04x] loop timeout #3\n",
243 qpti
->qpti_id
, param
[0]);
245 /* Wait for MBOX busy condition to go away. */
246 loop_count
= DEFAULT_LOOP_COUNT
;
247 while (--loop_count
&& (sbus_readw(qpti
->qregs
+ MBOX0
) == 0x04))
250 printk(KERN_EMERG
"qlogicpti%d: mbox_command[%04x] loop timeout #4\n",
251 qpti
->qpti_id
, param
[0]);
253 /* Read back output parameters. */
254 switch (mbox_param
[param
[0]] & 0xf) {
255 case 6: param
[5] = sbus_readw(qpti
->qregs
+ MBOX5
);
256 case 5: param
[4] = sbus_readw(qpti
->qregs
+ MBOX4
);
257 case 4: param
[3] = sbus_readw(qpti
->qregs
+ MBOX3
);
258 case 3: param
[2] = sbus_readw(qpti
->qregs
+ MBOX2
);
259 case 2: param
[1] = sbus_readw(qpti
->qregs
+ MBOX1
);
260 case 1: param
[0] = sbus_readw(qpti
->qregs
+ MBOX0
);
263 /* Clear RISC interrupt. */
264 tmp
= sbus_readw(qpti
->qregs
+ HCCTRL
);
266 sbus_writew(tmp
, qpti
->qregs
+ HCCTRL
);
268 /* Release SBUS semaphore. */
269 tmp
= sbus_readw(qpti
->qregs
+ SBUS_SEMAPHORE
);
270 tmp
&= ~(SBUS_SEMAPHORE_LCK
);
271 sbus_writew(tmp
, qpti
->qregs
+ SBUS_SEMAPHORE
);
277 static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti
*qpti
)
281 qpti
->host_param
.initiator_scsi_id
= qpti
->scsi_id
;
282 qpti
->host_param
.bus_reset_delay
= 3;
283 qpti
->host_param
.retry_count
= 0;
284 qpti
->host_param
.retry_delay
= 5;
285 qpti
->host_param
.async_data_setup_time
= 3;
286 qpti
->host_param
.req_ack_active_negation
= 1;
287 qpti
->host_param
.data_line_active_negation
= 1;
288 qpti
->host_param
.data_dma_burst_enable
= 1;
289 qpti
->host_param
.command_dma_burst_enable
= 1;
290 qpti
->host_param
.tag_aging
= 8;
291 qpti
->host_param
.selection_timeout
= 250;
292 qpti
->host_param
.max_queue_depth
= 256;
294 for(i
= 0; i
< MAX_TARGETS
; i
++) {
296 * disconnect, parity, arq, reneg on reset, and, oddly enough
297 * tags...the midlayer's notion of tagged support has to match
298 * our device settings, and since we base whether we enable a
299 * tag on a per-cmnd basis upon what the midlayer sez, we
300 * actually enable the capability here.
302 qpti
->dev_param
[i
].device_flags
= 0xcd;
303 qpti
->dev_param
[i
].execution_throttle
= 16;
305 qpti
->dev_param
[i
].synchronous_period
= 12;
306 qpti
->dev_param
[i
].synchronous_offset
= 8;
308 qpti
->dev_param
[i
].synchronous_period
= 25;
309 qpti
->dev_param
[i
].synchronous_offset
= 12;
311 qpti
->dev_param
[i
].device_enable
= 1;
313 /* this is very important to set! */
314 qpti
->sbits
= 1 << qpti
->scsi_id
;
317 static int qlogicpti_reset_hardware(struct Scsi_Host
*host
)
319 struct qlogicpti
*qpti
= (struct qlogicpti
*) host
->hostdata
;
321 unsigned short risc_code_addr
;
325 risc_code_addr
= 0x1000; /* all load addresses are at 0x1000 */
327 spin_lock_irqsave(host
->host_lock
, flags
);
329 sbus_writew(HCCTRL_PAUSE
, qpti
->qregs
+ HCCTRL
);
331 /* Only reset the scsi bus if it is not free. */
332 if (sbus_readw(qpti
->qregs
+ CPU_PCTRL
) & CPU_PCTRL_BSY
) {
333 sbus_writew(CPU_ORIDE_RMOD
, qpti
->qregs
+ CPU_ORIDE
);
334 sbus_writew(CPU_CMD_BRESET
, qpti
->qregs
+ CPU_CMD
);
338 sbus_writew(SBUS_CTRL_RESET
, qpti
->qregs
+ SBUS_CTRL
);
339 sbus_writew((DMA_CTRL_CCLEAR
| DMA_CTRL_CIRQ
), qpti
->qregs
+ CMD_DMA_CTRL
);
340 sbus_writew((DMA_CTRL_CCLEAR
| DMA_CTRL_CIRQ
), qpti
->qregs
+ DATA_DMA_CTRL
);
342 loop_count
= DEFAULT_LOOP_COUNT
;
343 while (--loop_count
&& ((sbus_readw(qpti
->qregs
+ MBOX0
) & 0xff) == 0x04))
346 printk(KERN_EMERG
"qlogicpti%d: reset_hardware loop timeout\n",
349 sbus_writew(HCCTRL_PAUSE
, qpti
->qregs
+ HCCTRL
);
351 qlogicpti_enable_irqs(qpti
);
353 if (sbus_readw(qpti
->qregs
+ RISC_PSR
) & RISC_PSR_ULTRA
) {
355 sbus_writew((RISC_MTREG_P0ULTRA
| RISC_MTREG_P1ULTRA
),
356 qpti
->qregs
+ RISC_MTREG
);
359 sbus_writew((RISC_MTREG_P0DFLT
| RISC_MTREG_P1DFLT
),
360 qpti
->qregs
+ RISC_MTREG
);
363 /* reset adapter and per-device default values. */
364 /* do it after finding out whether we're ultra mode capable */
365 qlogicpti_set_hostdev_defaults(qpti
);
367 /* Release the RISC processor. */
368 sbus_writew(HCCTRL_REL
, qpti
->qregs
+ HCCTRL
);
370 /* Get RISC to start executing the firmware code. */
371 param
[0] = MBOX_EXEC_FIRMWARE
;
372 param
[1] = risc_code_addr
;
373 if (qlogicpti_mbox_command(qpti
, param
, 1)) {
374 printk(KERN_EMERG
"qlogicpti%d: Cannot execute ISP firmware.\n",
376 spin_unlock_irqrestore(host
->host_lock
, flags
);
380 /* Set initiator scsi ID. */
381 param
[0] = MBOX_SET_INIT_SCSI_ID
;
382 param
[1] = qpti
->host_param
.initiator_scsi_id
;
383 if (qlogicpti_mbox_command(qpti
, param
, 1) ||
384 (param
[0] != MBOX_COMMAND_COMPLETE
)) {
385 printk(KERN_EMERG
"qlogicpti%d: Cannot set initiator SCSI ID.\n",
387 spin_unlock_irqrestore(host
->host_lock
, flags
);
391 /* Initialize state of the queues, both hw and sw. */
392 qpti
->req_in_ptr
= qpti
->res_out_ptr
= 0;
394 param
[0] = MBOX_INIT_RES_QUEUE
;
395 param
[1] = RES_QUEUE_LEN
+ 1;
396 param
[2] = (u_short
) (qpti
->res_dvma
>> 16);
397 param
[3] = (u_short
) (qpti
->res_dvma
& 0xffff);
398 param
[4] = param
[5] = 0;
399 if (qlogicpti_mbox_command(qpti
, param
, 1)) {
400 printk(KERN_EMERG
"qlogicpti%d: Cannot init response queue.\n",
402 spin_unlock_irqrestore(host
->host_lock
, flags
);
406 param
[0] = MBOX_INIT_REQ_QUEUE
;
407 param
[1] = QLOGICPTI_REQ_QUEUE_LEN
+ 1;
408 param
[2] = (u_short
) (qpti
->req_dvma
>> 16);
409 param
[3] = (u_short
) (qpti
->req_dvma
& 0xffff);
410 param
[4] = param
[5] = 0;
411 if (qlogicpti_mbox_command(qpti
, param
, 1)) {
412 printk(KERN_EMERG
"qlogicpti%d: Cannot init request queue.\n",
414 spin_unlock_irqrestore(host
->host_lock
, flags
);
418 param
[0] = MBOX_SET_RETRY_COUNT
;
419 param
[1] = qpti
->host_param
.retry_count
;
420 param
[2] = qpti
->host_param
.retry_delay
;
421 qlogicpti_mbox_command(qpti
, param
, 0);
423 param
[0] = MBOX_SET_TAG_AGE_LIMIT
;
424 param
[1] = qpti
->host_param
.tag_aging
;
425 qlogicpti_mbox_command(qpti
, param
, 0);
427 for (i
= 0; i
< MAX_TARGETS
; i
++) {
428 param
[0] = MBOX_GET_DEV_QUEUE_PARAMS
;
430 qlogicpti_mbox_command(qpti
, param
, 0);
433 param
[0] = MBOX_GET_FIRMWARE_STATUS
;
434 qlogicpti_mbox_command(qpti
, param
, 0);
436 param
[0] = MBOX_SET_SELECT_TIMEOUT
;
437 param
[1] = qpti
->host_param
.selection_timeout
;
438 qlogicpti_mbox_command(qpti
, param
, 0);
440 for (i
= 0; i
< MAX_TARGETS
; i
++) {
441 param
[0] = MBOX_SET_TARGET_PARAMS
;
443 param
[2] = (qpti
->dev_param
[i
].device_flags
<< 8);
445 * Since we're now loading 1.31 f/w, force narrow/async.
448 param
[3] = 0; /* no offset, we do not have sync mode yet */
449 qlogicpti_mbox_command(qpti
, param
, 0);
453 * Always (sigh) do an initial bus reset (kicks f/w).
455 param
[0] = MBOX_BUS_RESET
;
456 param
[1] = qpti
->host_param
.bus_reset_delay
;
457 qlogicpti_mbox_command(qpti
, param
, 0);
458 qpti
->send_marker
= 1;
460 spin_unlock_irqrestore(host
->host_lock
, flags
);
464 #define PTI_RESET_LIMIT 400
466 static int __devinit
qlogicpti_load_firmware(struct qlogicpti
*qpti
)
468 struct Scsi_Host
*host
= qpti
->qhost
;
469 unsigned short csum
= 0;
470 unsigned short param
[6];
471 unsigned short *risc_code
, risc_code_addr
, risc_code_length
;
475 risc_code
= &sbus_risc_code01
[0];
476 risc_code_addr
= 0x1000; /* all f/w modules load at 0x1000 */
477 risc_code_length
= sbus_risc_code_length01
;
479 spin_lock_irqsave(host
->host_lock
, flags
);
481 /* Verify the checksum twice, one before loading it, and once
482 * afterwards via the mailbox commands.
484 for (i
= 0; i
< risc_code_length
; i
++)
485 csum
+= risc_code
[i
];
487 spin_unlock_irqrestore(host
->host_lock
, flags
);
488 printk(KERN_EMERG
"qlogicpti%d: Aieee, firmware checksum failed!",
492 sbus_writew(SBUS_CTRL_RESET
, qpti
->qregs
+ SBUS_CTRL
);
493 sbus_writew((DMA_CTRL_CCLEAR
| DMA_CTRL_CIRQ
), qpti
->qregs
+ CMD_DMA_CTRL
);
494 sbus_writew((DMA_CTRL_CCLEAR
| DMA_CTRL_CIRQ
), qpti
->qregs
+ DATA_DMA_CTRL
);
495 timeout
= PTI_RESET_LIMIT
;
496 while (--timeout
&& (sbus_readw(qpti
->qregs
+ SBUS_CTRL
) & SBUS_CTRL_RESET
))
499 spin_unlock_irqrestore(host
->host_lock
, flags
);
500 printk(KERN_EMERG
"qlogicpti%d: Cannot reset the ISP.", qpti
->qpti_id
);
504 sbus_writew(HCCTRL_RESET
, qpti
->qregs
+ HCCTRL
);
507 sbus_writew((SBUS_CTRL_GENAB
| SBUS_CTRL_ERIRQ
), qpti
->qregs
+ SBUS_CTRL
);
509 sbus_writew(0, qpti
->qregs
+ SBUS_SEMAPHORE
);
511 if (sbus_readw(qpti
->qregs
+ RISC_PSR
) & RISC_PSR_ULTRA
) {
513 sbus_writew((RISC_MTREG_P0ULTRA
| RISC_MTREG_P1ULTRA
),
514 qpti
->qregs
+ RISC_MTREG
);
517 sbus_writew((RISC_MTREG_P0DFLT
| RISC_MTREG_P1DFLT
),
518 qpti
->qregs
+ RISC_MTREG
);
521 sbus_writew(HCCTRL_REL
, qpti
->qregs
+ HCCTRL
);
523 /* Pin lines are only stable while RISC is paused. */
524 sbus_writew(HCCTRL_PAUSE
, qpti
->qregs
+ HCCTRL
);
525 if (sbus_readw(qpti
->qregs
+ CPU_PDIFF
) & CPU_PDIFF_MODE
)
526 qpti
->differential
= 1;
528 qpti
->differential
= 0;
529 sbus_writew(HCCTRL_REL
, qpti
->qregs
+ HCCTRL
);
531 /* This shouldn't be necessary- we've reset things so we should be
532 running from the ROM now.. */
534 param
[0] = MBOX_STOP_FIRMWARE
;
535 param
[1] = param
[2] = param
[3] = param
[4] = param
[5] = 0;
536 if (qlogicpti_mbox_command(qpti
, param
, 1)) {
537 printk(KERN_EMERG
"qlogicpti%d: Cannot stop firmware for reload.\n",
539 spin_unlock_irqrestore(host
->host_lock
, flags
);
544 for (i
= 0; i
< risc_code_length
; i
++) {
545 param
[0] = MBOX_WRITE_RAM_WORD
;
546 param
[1] = risc_code_addr
+ i
;
547 param
[2] = risc_code
[i
];
548 if (qlogicpti_mbox_command(qpti
, param
, 1) ||
549 param
[0] != MBOX_COMMAND_COMPLETE
) {
550 printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n",
552 spin_unlock_irqrestore(host
->host_lock
, flags
);
557 /* Reset the ISP again. */
558 sbus_writew(HCCTRL_RESET
, qpti
->qregs
+ HCCTRL
);
561 qlogicpti_enable_irqs(qpti
);
562 sbus_writew(0, qpti
->qregs
+ SBUS_SEMAPHORE
);
563 sbus_writew(HCCTRL_REL
, qpti
->qregs
+ HCCTRL
);
565 /* Ask ISP to verify the checksum of the new code. */
566 param
[0] = MBOX_VERIFY_CHECKSUM
;
567 param
[1] = risc_code_addr
;
568 if (qlogicpti_mbox_command(qpti
, param
, 1) ||
569 (param
[0] != MBOX_COMMAND_COMPLETE
)) {
570 printk(KERN_EMERG
"qlogicpti%d: New firmware csum failure!\n",
572 spin_unlock_irqrestore(host
->host_lock
, flags
);
576 /* Start using newly downloaded firmware. */
577 param
[0] = MBOX_EXEC_FIRMWARE
;
578 param
[1] = risc_code_addr
;
579 qlogicpti_mbox_command(qpti
, param
, 1);
581 param
[0] = MBOX_ABOUT_FIRMWARE
;
582 if (qlogicpti_mbox_command(qpti
, param
, 1) ||
583 (param
[0] != MBOX_COMMAND_COMPLETE
)) {
584 printk(KERN_EMERG
"qlogicpti%d: AboutFirmware cmd fails.\n",
586 spin_unlock_irqrestore(host
->host_lock
, flags
);
590 /* Snag the major and minor revisions from the result. */
591 qpti
->fware_majrev
= param
[1];
592 qpti
->fware_minrev
= param
[2];
593 qpti
->fware_micrev
= param
[3];
595 /* Set the clock rate */
596 param
[0] = MBOX_SET_CLOCK_RATE
;
597 param
[1] = qpti
->clock
;
598 if (qlogicpti_mbox_command(qpti
, param
, 1) ||
599 (param
[0] != MBOX_COMMAND_COMPLETE
)) {
600 printk(KERN_EMERG
"qlogicpti%d: could not set clock rate.\n",
602 spin_unlock_irqrestore(host
->host_lock
, flags
);
606 if (qpti
->is_pti
!= 0) {
607 /* Load scsi initiator ID and interrupt level into sbus static ram. */
608 param
[0] = MBOX_WRITE_RAM_WORD
;
610 param
[2] = (unsigned short) qpti
->scsi_id
;
611 qlogicpti_mbox_command(qpti
, param
, 1);
613 param
[0] = MBOX_WRITE_RAM_WORD
;
615 param
[2] = (unsigned short) 3;
616 qlogicpti_mbox_command(qpti
, param
, 1);
619 spin_unlock_irqrestore(host
->host_lock
, flags
);
623 static int qlogicpti_verify_tmon(struct qlogicpti
*qpti
)
625 int curstat
= sbus_readb(qpti
->sreg
);
628 if (!(curstat
& SREG_FUSE
) && (qpti
->swsreg
& SREG_FUSE
))
629 printk("qlogicpti%d: Fuse returned to normal state.\n", qpti
->qpti_id
);
630 if (!(curstat
& SREG_TPOWER
) && (qpti
->swsreg
& SREG_TPOWER
))
631 printk("qlogicpti%d: termpwr back to normal state.\n", qpti
->qpti_id
);
632 if (curstat
!= qpti
->swsreg
) {
634 if (curstat
& SREG_FUSE
) {
636 printk("qlogicpti%d: Fuse is open!\n", qpti
->qpti_id
);
638 if (curstat
& SREG_TPOWER
) {
640 printk("qlogicpti%d: termpwr failure\n", qpti
->qpti_id
);
642 if (qpti
->differential
&&
643 (curstat
& SREG_DSENSE
) != SREG_DSENSE
) {
645 printk("qlogicpti%d: You have a single ended device on a "
646 "differential bus! Please fix!\n", qpti
->qpti_id
);
648 qpti
->swsreg
= curstat
;
654 static irqreturn_t
qpti_intr(int irq
, void *dev_id
);
656 static void __init
qpti_chain_add(struct qlogicpti
*qpti
)
658 spin_lock_irq(&qptichain_lock
);
659 if (qptichain
!= NULL
) {
660 struct qlogicpti
*qlink
= qptichain
;
669 spin_unlock_irq(&qptichain_lock
);
672 static void __init
qpti_chain_del(struct qlogicpti
*qpti
)
674 spin_lock_irq(&qptichain_lock
);
675 if (qptichain
== qpti
) {
676 qptichain
= qpti
->next
;
678 struct qlogicpti
*qlink
= qptichain
;
679 while(qlink
->next
!= qpti
)
681 qlink
->next
= qpti
->next
;
684 spin_unlock_irq(&qptichain_lock
);
687 static int __init
qpti_map_regs(struct qlogicpti
*qpti
)
689 struct sbus_dev
*sdev
= qpti
->sdev
;
691 qpti
->qregs
= sbus_ioremap(&sdev
->resource
[0], 0,
692 sdev
->reg_addrs
[0].reg_size
,
695 printk("PTI: Qlogic/ISP registers are unmappable\n");
699 qpti
->sreg
= sbus_ioremap(&sdev
->resource
[0], (16 * 4096),
700 sizeof(unsigned char),
701 "PTI Qlogic/ISP statreg");
703 printk("PTI: Qlogic/ISP status register is unmappable\n");
710 static int __init
qpti_register_irq(struct qlogicpti
*qpti
)
712 struct sbus_dev
*sdev
= qpti
->sdev
;
714 qpti
->qhost
->irq
= qpti
->irq
= sdev
->irqs
[0];
716 /* We used to try various overly-clever things to
717 * reduce the interrupt processing overhead on
718 * sun4c/sun4m when multiple PTI's shared the
719 * same IRQ. It was too complex and messy to
722 if (request_irq(qpti
->irq
, qpti_intr
,
723 IRQF_SHARED
, "Qlogic/PTI", qpti
))
726 printk("qlogicpti%d: IRQ %d ", qpti
->qpti_id
, qpti
->irq
);
731 printk("qlogicpti%d: Cannot acquire irq line\n", qpti
->qpti_id
);
735 static void __init
qpti_get_scsi_id(struct qlogicpti
*qpti
)
737 qpti
->scsi_id
= prom_getintdefault(qpti
->prom_node
,
740 if (qpti
->scsi_id
== -1)
741 qpti
->scsi_id
= prom_getintdefault(qpti
->prom_node
,
744 if (qpti
->scsi_id
== -1)
746 prom_getintdefault(qpti
->sdev
->bus
->prom_node
,
747 "scsi-initiator-id", 7);
748 qpti
->qhost
->this_id
= qpti
->scsi_id
;
749 qpti
->qhost
->max_sectors
= 64;
751 printk("SCSI ID %d ", qpti
->scsi_id
);
754 static void qpti_get_bursts(struct qlogicpti
*qpti
)
756 struct sbus_dev
*sdev
= qpti
->sdev
;
759 bursts
= prom_getintdefault(qpti
->prom_node
, "burst-sizes", 0xff);
760 bmask
= prom_getintdefault(sdev
->bus
->prom_node
,
761 "burst-sizes", 0xff);
764 if (bursts
== 0xff ||
765 (bursts
& DMA_BURST16
) == 0 ||
766 (bursts
& DMA_BURST32
) == 0)
767 bursts
= (DMA_BURST32
- 1);
769 qpti
->bursts
= bursts
;
772 static void qpti_get_clock(struct qlogicpti
*qpti
)
776 /* Check for what the clock input to this card is.
779 cfreq
= prom_getintdefault(qpti
->prom_node
,"clock-frequency",40000000);
780 qpti
->clock
= (cfreq
+ 500000)/1000000;
781 if (qpti
->clock
== 0) /* bullshit */
785 /* The request and response queues must each be aligned
786 * on a page boundary.
788 static int __init
qpti_map_queues(struct qlogicpti
*qpti
)
790 struct sbus_dev
*sdev
= qpti
->sdev
;
792 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
793 qpti
->res_cpu
= sbus_alloc_consistent(sdev
,
794 QSIZE(RES_QUEUE_LEN
),
796 if (qpti
->res_cpu
== NULL
||
797 qpti
->res_dvma
== 0) {
798 printk("QPTI: Cannot map response queue.\n");
802 qpti
->req_cpu
= sbus_alloc_consistent(sdev
,
803 QSIZE(QLOGICPTI_REQ_QUEUE_LEN
),
805 if (qpti
->req_cpu
== NULL
||
806 qpti
->req_dvma
== 0) {
807 sbus_free_consistent(sdev
, QSIZE(RES_QUEUE_LEN
),
808 qpti
->res_cpu
, qpti
->res_dvma
);
809 printk("QPTI: Cannot map request queue.\n");
812 memset(qpti
->res_cpu
, 0, QSIZE(RES_QUEUE_LEN
));
813 memset(qpti
->req_cpu
, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN
));
817 const char *qlogicpti_info(struct Scsi_Host
*host
)
820 struct qlogicpti
*qpti
= (struct qlogicpti
*) host
->hostdata
;
822 sprintf(buf
, "PTI Qlogic,ISP SBUS SCSI irq %d regs at %p",
823 qpti
->qhost
->irq
, qpti
->qregs
);
827 /* I am a certified frobtronicist. */
828 static inline void marker_frob(struct Command_Entry
*cmd
)
830 struct Marker_Entry
*marker
= (struct Marker_Entry
*) cmd
;
832 memset(marker
, 0, sizeof(struct Marker_Entry
));
833 marker
->hdr
.entry_cnt
= 1;
834 marker
->hdr
.entry_type
= ENTRY_MARKER
;
835 marker
->modifier
= SYNC_ALL
;
839 static inline void cmd_frob(struct Command_Entry
*cmd
, struct scsi_cmnd
*Cmnd
,
840 struct qlogicpti
*qpti
)
842 memset(cmd
, 0, sizeof(struct Command_Entry
));
843 cmd
->hdr
.entry_cnt
= 1;
844 cmd
->hdr
.entry_type
= ENTRY_COMMAND
;
845 cmd
->target_id
= Cmnd
->device
->id
;
846 cmd
->target_lun
= Cmnd
->device
->lun
;
847 cmd
->cdb_length
= Cmnd
->cmd_len
;
848 cmd
->control_flags
= 0;
849 if (Cmnd
->device
->tagged_supported
) {
850 if (qpti
->cmd_count
[Cmnd
->device
->id
] == 0)
851 qpti
->tag_ages
[Cmnd
->device
->id
] = jiffies
;
852 if (time_after(jiffies
, qpti
->tag_ages
[Cmnd
->device
->id
] + (5*HZ
))) {
853 cmd
->control_flags
= CFLAG_ORDERED_TAG
;
854 qpti
->tag_ages
[Cmnd
->device
->id
] = jiffies
;
856 cmd
->control_flags
= CFLAG_SIMPLE_TAG
;
858 if ((Cmnd
->cmnd
[0] == WRITE_6
) ||
859 (Cmnd
->cmnd
[0] == WRITE_10
) ||
860 (Cmnd
->cmnd
[0] == WRITE_12
))
861 cmd
->control_flags
|= CFLAG_WRITE
;
863 cmd
->control_flags
|= CFLAG_READ
;
865 memcpy(cmd
->cdb
, Cmnd
->cmnd
, Cmnd
->cmd_len
);
868 /* Do it to it baby. */
869 static inline int load_cmd(struct scsi_cmnd
*Cmnd
, struct Command_Entry
*cmd
,
870 struct qlogicpti
*qpti
, u_int in_ptr
, u_int out_ptr
)
873 struct scatterlist
*sg
;
879 sg
= (struct scatterlist
*) Cmnd
->request_buffer
;
880 sg_count
= sbus_map_sg(qpti
->sdev
, sg
, Cmnd
->use_sg
, Cmnd
->sc_data_direction
);
883 cmd
->segment_cnt
= sg_count
;
885 /* Fill in first four sg entries: */
889 for (i
= 0; i
< n
; i
++, sg
++) {
890 ds
[i
].d_base
= sg_dma_address(sg
);
891 ds
[i
].d_count
= sg_dma_len(sg
);
894 while (sg_count
> 0) {
895 struct Continuation_Entry
*cont
;
897 ++cmd
->hdr
.entry_cnt
;
898 cont
= (struct Continuation_Entry
*) &qpti
->req_cpu
[in_ptr
];
899 in_ptr
= NEXT_REQ_PTR(in_ptr
);
900 if (in_ptr
== out_ptr
)
903 cont
->hdr
.entry_type
= ENTRY_CONTINUATION
;
904 cont
->hdr
.entry_cnt
= 0;
905 cont
->hdr
.sys_def_1
= 0;
912 for (i
= 0; i
< n
; i
++, sg
++) {
913 ds
[i
].d_base
= sg_dma_address(sg
);
914 ds
[i
].d_count
= sg_dma_len(sg
);
918 } else if (Cmnd
->request_bufflen
) {
919 Cmnd
->SCp
.ptr
= (char *)(unsigned long)
920 sbus_map_single(qpti
->sdev
,
921 Cmnd
->request_buffer
,
922 Cmnd
->request_bufflen
,
923 Cmnd
->sc_data_direction
);
925 cmd
->dataseg
[0].d_base
= (u32
) ((unsigned long)Cmnd
->SCp
.ptr
);
926 cmd
->dataseg
[0].d_count
= Cmnd
->request_bufflen
;
927 cmd
->segment_cnt
= 1;
929 cmd
->dataseg
[0].d_base
= 0;
930 cmd
->dataseg
[0].d_count
= 0;
931 cmd
->segment_cnt
= 1; /* Shouldn't this be 0? */
934 /* Committed, record Scsi_Cmd so we can find it later. */
935 cmd
->handle
= in_ptr
;
936 qpti
->cmd_slots
[in_ptr
] = Cmnd
;
938 qpti
->cmd_count
[Cmnd
->device
->id
]++;
939 sbus_writew(in_ptr
, qpti
->qregs
+ MBOX4
);
940 qpti
->req_in_ptr
= in_ptr
;
945 static inline void update_can_queue(struct Scsi_Host
*host
, u_int in_ptr
, u_int out_ptr
)
947 /* Temporary workaround until bug is found and fixed (one bug has been found
948 already, but fixing it makes things even worse) -jj */
949 int num_free
= QLOGICPTI_REQ_QUEUE_LEN
- REQ_QUEUE_DEPTH(in_ptr
, out_ptr
) - 64;
950 host
->can_queue
= host
->host_busy
+ num_free
;
951 host
->sg_tablesize
= QLOGICPTI_MAX_SG(num_free
);
954 static unsigned int scsi_rbuf_get(struct scsi_cmnd
*cmd
, unsigned char **buf_out
)
960 struct scatterlist
*sg
;
962 sg
= (struct scatterlist
*) cmd
->request_buffer
;
963 buf
= kmap_atomic(sg
->page
, KM_IRQ0
) + sg
->offset
;
966 buf
= cmd
->request_buffer
;
967 buflen
= cmd
->request_bufflen
;
974 static void scsi_rbuf_put(struct scsi_cmnd
*cmd
, unsigned char *buf
)
977 struct scatterlist
*sg
;
979 sg
= (struct scatterlist
*) cmd
->request_buffer
;
980 kunmap_atomic(buf
- sg
->offset
, KM_IRQ0
);
985 * Until we scan the entire bus with inquiries, go throught this fella...
987 static void ourdone(struct scsi_cmnd
*Cmnd
)
989 struct qlogicpti
*qpti
= (struct qlogicpti
*) Cmnd
->device
->host
->hostdata
;
990 int tgt
= Cmnd
->device
->id
;
991 void (*done
) (struct scsi_cmnd
*);
993 /* This grot added by DaveM, blame him for ugliness.
994 * The issue is that in the 2.3.x driver we use the
995 * host_scribble portion of the scsi command as a
996 * completion linked list at interrupt service time,
997 * so we have to store the done function pointer elsewhere.
999 done
= (void (*)(struct scsi_cmnd
*))
1000 (((unsigned long) Cmnd
->SCp
.Message
)
1002 | ((unsigned long) Cmnd
->SCp
.Status
<< 32UL)
1006 if ((qpti
->sbits
& (1 << tgt
)) == 0) {
1007 int ok
= host_byte(Cmnd
->result
) == DID_OK
;
1008 if (Cmnd
->cmnd
[0] == 0x12 && ok
) {
1010 unsigned int iqd_len
;
1012 iqd_len
= scsi_rbuf_get(Cmnd
, &iqd
);
1014 /* tags handled in midlayer */
1015 /* enable sync mode? */
1016 if (iqd
[7] & 0x10) {
1017 qpti
->dev_param
[tgt
].device_flags
|= 0x10;
1019 qpti
->dev_param
[tgt
].synchronous_offset
= 0;
1020 qpti
->dev_param
[tgt
].synchronous_period
= 0;
1022 /* are we wide capable? */
1023 if (iqd
[7] & 0x20) {
1024 qpti
->dev_param
[tgt
].device_flags
|= 0x20;
1027 scsi_rbuf_put(Cmnd
, iqd
);
1029 qpti
->sbits
|= (1 << tgt
);
1031 qpti
->sbits
|= (1 << tgt
);
1037 static int qlogicpti_queuecommand(struct scsi_cmnd
*Cmnd
, void (*done
)(struct scsi_cmnd
*));
1039 static int qlogicpti_queuecommand_slow(struct scsi_cmnd
*Cmnd
,
1040 void (*done
)(struct scsi_cmnd
*))
1042 struct qlogicpti
*qpti
= (struct qlogicpti
*) Cmnd
->device
->host
->hostdata
;
1045 * done checking this host adapter?
1046 * If not, then rewrite the command
1047 * to finish through ourdone so we
1048 * can peek at Inquiry data results.
1050 if (qpti
->sbits
&& qpti
->sbits
!= 0xffff) {
1051 /* See above about in ourdone this ugliness... */
1052 Cmnd
->SCp
.Message
= ((unsigned long)done
) & 0xffffffff;
1053 #ifdef CONFIG_SPARC64
1054 Cmnd
->SCp
.Status
= ((unsigned long)done
>> 32UL) & 0xffffffff;
1056 return qlogicpti_queuecommand(Cmnd
, ourdone
);
1060 * We've peeked at all targets for this bus- time
1061 * to set parameters for devices for real now.
1063 if (qpti
->sbits
== 0xffff) {
1065 for(i
= 0; i
< MAX_TARGETS
; i
++) {
1067 param
[0] = MBOX_SET_TARGET_PARAMS
;
1068 param
[1] = (i
<< 8);
1069 param
[2] = (qpti
->dev_param
[i
].device_flags
<< 8);
1070 if (qpti
->dev_param
[i
].device_flags
& 0x10) {
1071 param
[3] = (qpti
->dev_param
[i
].synchronous_offset
<< 8) |
1072 qpti
->dev_param
[i
].synchronous_period
;
1076 (void) qlogicpti_mbox_command(qpti
, param
, 0);
1079 * set to zero so any traverse through ourdone
1080 * doesn't start the whole process again,
1085 /* check to see if we're done with all adapters... */
1086 for (qpti
= qptichain
; qpti
!= NULL
; qpti
= qpti
->next
) {
1093 * if we hit the end of the chain w/o finding adapters still
1094 * capability-configuring, then we're done with all adapters
1098 Cmnd
->device
->host
->hostt
->queuecommand
= qlogicpti_queuecommand
;
1100 return qlogicpti_queuecommand(Cmnd
, done
);
1104 * The middle SCSI layer ensures that queuecommand never gets invoked
1105 * concurrently with itself or the interrupt handler (though the
1106 * interrupt handler may call this routine as part of
1107 * request-completion handling).
1109 * "This code must fly." -davem
1111 static int qlogicpti_queuecommand(struct scsi_cmnd
*Cmnd
, void (*done
)(struct scsi_cmnd
*))
1113 struct Scsi_Host
*host
= Cmnd
->device
->host
;
1114 struct qlogicpti
*qpti
= (struct qlogicpti
*) host
->hostdata
;
1115 struct Command_Entry
*cmd
;
1119 Cmnd
->scsi_done
= done
;
1121 in_ptr
= qpti
->req_in_ptr
;
1122 cmd
= (struct Command_Entry
*) &qpti
->req_cpu
[in_ptr
];
1123 out_ptr
= sbus_readw(qpti
->qregs
+ MBOX4
);
1124 in_ptr
= NEXT_REQ_PTR(in_ptr
);
1125 if (in_ptr
== out_ptr
)
1128 if (qpti
->send_marker
) {
1130 qpti
->send_marker
= 0;
1131 if (NEXT_REQ_PTR(in_ptr
) == out_ptr
) {
1132 sbus_writew(in_ptr
, qpti
->qregs
+ MBOX4
);
1133 qpti
->req_in_ptr
= in_ptr
;
1136 cmd
= (struct Command_Entry
*) &qpti
->req_cpu
[in_ptr
];
1137 in_ptr
= NEXT_REQ_PTR(in_ptr
);
1139 cmd_frob(cmd
, Cmnd
, qpti
);
1140 if ((in_ptr
= load_cmd(Cmnd
, cmd
, qpti
, in_ptr
, out_ptr
)) == -1)
1143 update_can_queue(host
, in_ptr
, out_ptr
);
1148 printk(KERN_EMERG
"qlogicpti%d: request queue overflow\n",
1151 /* Unfortunately, unless you use the new EH code, which
1152 * we don't, the midlayer will ignore the return value,
1153 * which is insane. We pick up the pieces like this.
1155 Cmnd
->result
= DID_BUS_BUSY
;
1160 static int qlogicpti_return_status(struct Status_Entry
*sts
, int id
)
1162 int host_status
= DID_ERROR
;
1164 switch (sts
->completion_status
) {
1166 host_status
= DID_OK
;
1169 if (!(sts
->state_flags
& SF_GOT_BUS
))
1170 host_status
= DID_NO_CONNECT
;
1171 else if (!(sts
->state_flags
& SF_GOT_TARGET
))
1172 host_status
= DID_BAD_TARGET
;
1173 else if (!(sts
->state_flags
& SF_SENT_CDB
))
1174 host_status
= DID_ERROR
;
1175 else if (!(sts
->state_flags
& SF_TRANSFERRED_DATA
))
1176 host_status
= DID_ERROR
;
1177 else if (!(sts
->state_flags
& SF_GOT_STATUS
))
1178 host_status
= DID_ERROR
;
1179 else if (!(sts
->state_flags
& SF_GOT_SENSE
))
1180 host_status
= DID_ERROR
;
1183 case CS_TRANSPORT_ERROR
:
1184 host_status
= DID_ERROR
;
1186 case CS_RESET_OCCURRED
:
1188 host_status
= DID_RESET
;
1191 host_status
= DID_ABORT
;
1194 host_status
= DID_TIME_OUT
;
1196 case CS_DATA_OVERRUN
:
1197 case CS_COMMAND_OVERRUN
:
1198 case CS_STATUS_OVERRUN
:
1199 case CS_BAD_MESSAGE
:
1200 case CS_NO_MESSAGE_OUT
:
1201 case CS_EXT_ID_FAILED
:
1202 case CS_IDE_MSG_FAILED
:
1203 case CS_ABORT_MSG_FAILED
:
1204 case CS_NOP_MSG_FAILED
:
1205 case CS_PARITY_ERROR_MSG_FAILED
:
1206 case CS_DEVICE_RESET_MSG_FAILED
:
1207 case CS_ID_MSG_FAILED
:
1208 case CS_UNEXP_BUS_FREE
:
1209 host_status
= DID_ERROR
;
1211 case CS_DATA_UNDERRUN
:
1212 host_status
= DID_OK
;
1215 printk(KERN_EMERG
"qlogicpti%d: unknown completion status 0x%04x\n",
1216 id
, sts
->completion_status
);
1217 host_status
= DID_ERROR
;
1221 return (sts
->scsi_status
& STATUS_MASK
) | (host_status
<< 16);
1224 static struct scsi_cmnd
*qlogicpti_intr_handler(struct qlogicpti
*qpti
)
1226 struct scsi_cmnd
*Cmnd
, *done_queue
= NULL
;
1227 struct Status_Entry
*sts
;
1228 u_int in_ptr
, out_ptr
;
1230 if (!(sbus_readw(qpti
->qregs
+ SBUS_STAT
) & SBUS_STAT_RINT
))
1233 in_ptr
= sbus_readw(qpti
->qregs
+ MBOX5
);
1234 sbus_writew(HCCTRL_CRIRQ
, qpti
->qregs
+ HCCTRL
);
1235 if (sbus_readw(qpti
->qregs
+ SBUS_SEMAPHORE
) & SBUS_SEMAPHORE_LCK
) {
1236 switch (sbus_readw(qpti
->qregs
+ MBOX0
)) {
1237 case ASYNC_SCSI_BUS_RESET
:
1238 case EXECUTION_TIMEOUT_RESET
:
1239 qpti
->send_marker
= 1;
1241 case INVALID_COMMAND
:
1242 case HOST_INTERFACE_ERROR
:
1244 case COMMAND_PARAM_ERROR
:
1247 sbus_writew(0, qpti
->qregs
+ SBUS_SEMAPHORE
);
1250 /* This looks like a network driver! */
1251 out_ptr
= qpti
->res_out_ptr
;
1252 while (out_ptr
!= in_ptr
) {
1255 sts
= (struct Status_Entry
*) &qpti
->res_cpu
[out_ptr
];
1256 out_ptr
= NEXT_RES_PTR(out_ptr
);
1258 /* We store an index in the handle, not the pointer in
1259 * some form. This avoids problems due to the fact
1260 * that the handle provided is only 32-bits. -DaveM
1262 cmd_slot
= sts
->handle
;
1263 Cmnd
= qpti
->cmd_slots
[cmd_slot
];
1264 qpti
->cmd_slots
[cmd_slot
] = NULL
;
1266 if (sts
->completion_status
== CS_RESET_OCCURRED
||
1267 sts
->completion_status
== CS_ABORTED
||
1268 (sts
->status_flags
& STF_BUS_RESET
))
1269 qpti
->send_marker
= 1;
1271 if (sts
->state_flags
& SF_GOT_SENSE
)
1272 memcpy(Cmnd
->sense_buffer
, sts
->req_sense_data
,
1273 sizeof(Cmnd
->sense_buffer
));
1275 if (sts
->hdr
.entry_type
== ENTRY_STATUS
)
1277 qlogicpti_return_status(sts
, qpti
->qpti_id
);
1279 Cmnd
->result
= DID_ERROR
<< 16;
1282 sbus_unmap_sg(qpti
->sdev
,
1283 (struct scatterlist
*)Cmnd
->request_buffer
,
1285 Cmnd
->sc_data_direction
);
1286 } else if (Cmnd
->request_bufflen
) {
1287 sbus_unmap_single(qpti
->sdev
,
1288 (__u32
)((unsigned long)Cmnd
->SCp
.ptr
),
1289 Cmnd
->request_bufflen
,
1290 Cmnd
->sc_data_direction
);
1292 qpti
->cmd_count
[Cmnd
->device
->id
]--;
1293 sbus_writew(out_ptr
, qpti
->qregs
+ MBOX5
);
1294 Cmnd
->host_scribble
= (unsigned char *) done_queue
;
1297 qpti
->res_out_ptr
= out_ptr
;
1302 static irqreturn_t
qpti_intr(int irq
, void *dev_id
)
1304 struct qlogicpti
*qpti
= dev_id
;
1305 unsigned long flags
;
1306 struct scsi_cmnd
*dq
;
1308 spin_lock_irqsave(qpti
->qhost
->host_lock
, flags
);
1309 dq
= qlogicpti_intr_handler(qpti
);
1313 struct scsi_cmnd
*next
;
1315 next
= (struct scsi_cmnd
*) dq
->host_scribble
;
1318 } while (dq
!= NULL
);
1320 spin_unlock_irqrestore(qpti
->qhost
->host_lock
, flags
);
1325 static int qlogicpti_abort(struct scsi_cmnd
*Cmnd
)
1328 struct Scsi_Host
*host
= Cmnd
->device
->host
;
1329 struct qlogicpti
*qpti
= (struct qlogicpti
*) host
->hostdata
;
1330 int return_status
= SUCCESS
;
1334 printk(KERN_WARNING
"qlogicpti%d: Aborting cmd for tgt[%d] lun[%d]\n",
1335 qpti
->qpti_id
, (int)Cmnd
->device
->id
, (int)Cmnd
->device
->lun
);
1337 qlogicpti_disable_irqs(qpti
);
1339 /* Find the 32-bit cookie we gave to the firmware for
1342 for (i
= 0; i
< QLOGICPTI_REQ_QUEUE_LEN
+ 1; i
++)
1343 if (qpti
->cmd_slots
[i
] == Cmnd
)
1347 param
[0] = MBOX_ABORT
;
1348 param
[1] = (((u_short
) Cmnd
->device
->id
) << 8) | Cmnd
->device
->lun
;
1349 param
[2] = cmd_cookie
>> 16;
1350 param
[3] = cmd_cookie
& 0xffff;
1351 if (qlogicpti_mbox_command(qpti
, param
, 0) ||
1352 (param
[0] != MBOX_COMMAND_COMPLETE
)) {
1353 printk(KERN_EMERG
"qlogicpti%d: scsi abort failure: %x\n",
1354 qpti
->qpti_id
, param
[0]);
1355 return_status
= FAILED
;
1358 qlogicpti_enable_irqs(qpti
);
1360 return return_status
;
1363 static int qlogicpti_reset(struct scsi_cmnd
*Cmnd
)
1366 struct Scsi_Host
*host
= Cmnd
->device
->host
;
1367 struct qlogicpti
*qpti
= (struct qlogicpti
*) host
->hostdata
;
1368 int return_status
= SUCCESS
;
1370 printk(KERN_WARNING
"qlogicpti%d: Resetting SCSI bus!\n",
1373 qlogicpti_disable_irqs(qpti
);
1375 param
[0] = MBOX_BUS_RESET
;
1376 param
[1] = qpti
->host_param
.bus_reset_delay
;
1377 if (qlogicpti_mbox_command(qpti
, param
, 0) ||
1378 (param
[0] != MBOX_COMMAND_COMPLETE
)) {
1379 printk(KERN_EMERG
"qlogicisp%d: scsi bus reset failure: %x\n",
1380 qpti
->qpti_id
, param
[0]);
1381 return_status
= FAILED
;
1384 qlogicpti_enable_irqs(qpti
);
1386 return return_status
;
1389 static struct scsi_host_template qpti_template
= {
1390 .module
= THIS_MODULE
,
1391 .name
= "qlogicpti",
1392 .info
= qlogicpti_info
,
1393 .queuecommand
= qlogicpti_queuecommand_slow
,
1394 .eh_abort_handler
= qlogicpti_abort
,
1395 .eh_bus_reset_handler
= qlogicpti_reset
,
1396 .can_queue
= QLOGICPTI_REQ_QUEUE_LEN
,
1398 .sg_tablesize
= QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN
),
1400 .use_clustering
= ENABLE_CLUSTERING
,
1403 static int __devinit
qpti_sbus_probe(struct of_device
*dev
, const struct of_device_id
*match
)
1406 struct sbus_dev
*sdev
= to_sbus_device(&dev
->dev
);
1407 struct device_node
*dp
= dev
->node
;
1408 struct scsi_host_template
*tpnt
= match
->data
;
1409 struct Scsi_Host
*host
;
1410 struct qlogicpti
*qpti
;
1413 /* Sometimes Antares cards come up not completely
1414 * setup, and we get a report of a zero IRQ.
1416 if (sdev
->irqs
[0] == 0)
1419 host
= scsi_host_alloc(tpnt
, sizeof(struct qlogicpti
));
1423 qpti
= (struct qlogicpti
*) host
->hostdata
;
1425 host
->max_id
= MAX_TARGETS
;
1428 qpti
->qpti_id
= nqptis
;
1429 qpti
->prom_node
= sdev
->prom_node
;
1430 strcpy(qpti
->prom_name
, sdev
->ofdev
.node
->name
);
1431 qpti
->is_pti
= strcmp(qpti
->prom_name
, "QLGC,isp");
1433 if (qpti_map_regs(qpti
) < 0)
1436 if (qpti_register_irq(qpti
) < 0)
1437 goto fail_unmap_regs
;
1439 qpti_get_scsi_id(qpti
);
1440 qpti_get_bursts(qpti
);
1441 qpti_get_clock(qpti
);
1443 /* Clear out scsi_cmnd array. */
1444 memset(qpti
->cmd_slots
, 0, sizeof(qpti
->cmd_slots
));
1446 if (qpti_map_queues(qpti
) < 0)
1449 /* Load the firmware. */
1450 if (qlogicpti_load_firmware(qpti
))
1451 goto fail_unmap_queues
;
1453 /* Check the PTI status reg. */
1454 if (qlogicpti_verify_tmon(qpti
))
1455 goto fail_unmap_queues
;
1458 /* Reset the ISP and init res/req queues. */
1459 if (qlogicpti_reset_hardware(host
))
1460 goto fail_unmap_queues
;
1462 printk("(Firmware v%d.%d.%d)", qpti
->fware_majrev
,
1463 qpti
->fware_minrev
, qpti
->fware_micrev
);
1465 fcode
= of_get_property(dp
, "isp-fcode", NULL
);
1466 if (fcode
&& fcode
[0])
1467 printk("(FCode %s)", fcode
);
1468 if (of_find_property(dp
, "differential", NULL
) != NULL
)
1469 qpti
->differential
= 1;
1471 printk("\nqlogicpti%d: [%s Wide, using %s interface]\n",
1473 (qpti
->ultra
? "Ultra" : "Fast"),
1474 (qpti
->differential
? "differential" : "single ended"));
1476 if (scsi_add_host(host
, &dev
->dev
)) {
1477 printk("qlogicpti%d: Failed scsi_add_host\n", qpti
->qpti_id
);
1478 goto fail_unmap_queues
;
1481 dev_set_drvdata(&sdev
->ofdev
.dev
, qpti
);
1483 qpti_chain_add(qpti
);
1485 scsi_scan_host(host
);
1491 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1492 sbus_free_consistent(qpti
->sdev
,
1493 QSIZE(RES_QUEUE_LEN
),
1494 qpti
->res_cpu
, qpti
->res_dvma
);
1495 sbus_free_consistent(qpti
->sdev
,
1496 QSIZE(QLOGICPTI_REQ_QUEUE_LEN
),
1497 qpti
->req_cpu
, qpti
->req_dvma
);
1501 sbus_iounmap(qpti
->qregs
,
1502 qpti
->sdev
->reg_addrs
[0].reg_size
);
1504 sbus_iounmap(qpti
->sreg
, sizeof(unsigned char));
1507 free_irq(qpti
->irq
, qpti
);
1510 scsi_host_put(host
);
1515 static int __devexit
qpti_sbus_remove(struct of_device
*dev
)
1517 struct qlogicpti
*qpti
= dev_get_drvdata(&dev
->dev
);
1519 qpti_chain_del(qpti
);
1521 scsi_remove_host(qpti
->qhost
);
1523 /* Shut up the card. */
1524 sbus_writew(0, qpti
->qregs
+ SBUS_CTRL
);
1526 /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */
1527 free_irq(qpti
->irq
, qpti
);
1529 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1530 sbus_free_consistent(qpti
->sdev
,
1531 QSIZE(RES_QUEUE_LEN
),
1532 qpti
->res_cpu
, qpti
->res_dvma
);
1533 sbus_free_consistent(qpti
->sdev
,
1534 QSIZE(QLOGICPTI_REQ_QUEUE_LEN
),
1535 qpti
->req_cpu
, qpti
->req_dvma
);
1538 sbus_iounmap(qpti
->qregs
, qpti
->sdev
->reg_addrs
[0].reg_size
);
1540 sbus_iounmap(qpti
->sreg
, sizeof(unsigned char));
1542 scsi_host_put(qpti
->qhost
);
1547 static struct of_device_id qpti_match
[] = {
1550 .data
= &qpti_template
,
1553 .name
= "PTI,ptisp",
1554 .data
= &qpti_template
,
1558 .data
= &qpti_template
,
1562 .data
= &qpti_template
,
1566 MODULE_DEVICE_TABLE(of
, qpti_match
);
1568 static struct of_platform_driver qpti_sbus_driver
= {
1570 .match_table
= qpti_match
,
1571 .probe
= qpti_sbus_probe
,
1572 .remove
= __devexit_p(qpti_sbus_remove
),
1575 static int __init
qpti_init(void)
1577 return of_register_driver(&qpti_sbus_driver
, &sbus_bus_type
);
1580 static void __exit
qpti_exit(void)
1582 of_unregister_driver(&qpti_sbus_driver
);
1585 MODULE_DESCRIPTION("QlogicISP SBUS driver");
1586 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
1587 MODULE_LICENSE("GPL");
1588 MODULE_VERSION("2.0");
1590 module_init(qpti_init
);
1591 module_exit(qpti_exit
);