1 /* qlogicpti.c: Performance Technologies QlogicISP sbus card driver.
3 * Copyright (C) 1996, 2006 David S. Miller (davem@davemloft.net)
5 * A lot of this driver was directly stolen from Erik H. Moe's PCI
6 * Qlogic ISP driver. Mucho kudos to him for this code.
8 * An even bigger kudos to John Grana at Performance Technologies
9 * for providing me with the hardware to write this driver, you rule
12 * May, 2, 1997: Added support for QLGC,isp --jj
15 #include <linux/kernel.h>
16 #include <linux/delay.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/proc_fs.h>
22 #include <linux/stat.h>
23 #include <linux/init.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/jiffies.h>
29 #include <asm/byteorder.h>
31 #include "qlogicpti.h"
35 #include <asm/system.h>
36 #include <asm/ptrace.h>
37 #include <asm/pgtable.h>
38 #include <asm/oplib.h>
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_eh.h>
46 #include <scsi/scsi_tcq.h>
47 #include <scsi/scsi_host.h>
49 #define MAX_TARGETS 16
50 #define MAX_LUNS 8 /* 32 for 1.31 F/W */
52 #define DEFAULT_LOOP_COUNT 10000
54 #include "qlogicpti_asm.c"
56 static struct qlogicpti
*qptichain
= NULL
;
57 static DEFINE_SPINLOCK(qptichain_lock
);
59 #define PACKB(a, b) (((a)<<4)|(b))
61 static const u_char mbox_param
[] = {
62 PACKB(1, 1), /* MBOX_NO_OP */
63 PACKB(5, 5), /* MBOX_LOAD_RAM */
64 PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */
65 PACKB(5, 5), /* MBOX_DUMP_RAM */
66 PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */
67 PACKB(2, 3), /* MBOX_READ_RAM_WORD */
68 PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */
69 PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */
70 PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */
71 PACKB(0, 0), /* 0x0009 */
72 PACKB(0, 0), /* 0x000a */
73 PACKB(0, 0), /* 0x000b */
74 PACKB(0, 0), /* 0x000c */
75 PACKB(0, 0), /* 0x000d */
76 PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */
77 PACKB(0, 0), /* 0x000f */
78 PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */
79 PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */
80 PACKB(4, 4), /* MBOX_EXECUTE_IOCB */
81 PACKB(2, 2), /* MBOX_WAKE_UP */
82 PACKB(1, 6), /* MBOX_STOP_FIRMWARE */
83 PACKB(4, 4), /* MBOX_ABORT */
84 PACKB(2, 2), /* MBOX_ABORT_DEVICE */
85 PACKB(3, 3), /* MBOX_ABORT_TARGET */
86 PACKB(2, 2), /* MBOX_BUS_RESET */
87 PACKB(2, 3), /* MBOX_STOP_QUEUE */
88 PACKB(2, 3), /* MBOX_START_QUEUE */
89 PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */
90 PACKB(2, 3), /* MBOX_ABORT_QUEUE */
91 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */
92 PACKB(0, 0), /* 0x001e */
93 PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */
94 PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */
95 PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */
96 PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */
97 PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */
98 PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */
99 PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */
100 PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */
101 PACKB(1, 3), /* MBOX_GET_SBUS_PARAMS */
102 PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */
103 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */
104 PACKB(0, 0), /* 0x002a */
105 PACKB(0, 0), /* 0x002b */
106 PACKB(0, 0), /* 0x002c */
107 PACKB(0, 0), /* 0x002d */
108 PACKB(0, 0), /* 0x002e */
109 PACKB(0, 0), /* 0x002f */
110 PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */
111 PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */
112 PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */
113 PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */
114 PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */
115 PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */
116 PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */
117 PACKB(3, 3), /* MBOX_SET_SBUS_CONTROL_PARAMS */
118 PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */
119 PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */
120 PACKB(0, 0), /* 0x003a */
121 PACKB(0, 0), /* 0x003b */
122 PACKB(0, 0), /* 0x003c */
123 PACKB(0, 0), /* 0x003d */
124 PACKB(0, 0), /* 0x003e */
125 PACKB(0, 0), /* 0x003f */
126 PACKB(0, 0), /* 0x0040 */
127 PACKB(0, 0), /* 0x0041 */
128 PACKB(0, 0) /* 0x0042 */
131 #define MAX_MBOX_COMMAND ARRAY_SIZE(mbox_param)
133 /* queue length's _must_ be power of two: */
134 #define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql))
135 #define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \
136 QLOGICPTI_REQ_QUEUE_LEN)
137 #define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN)
139 static inline void qlogicpti_enable_irqs(struct qlogicpti
*qpti
)
141 sbus_writew(SBUS_CTRL_ERIRQ
| SBUS_CTRL_GENAB
,
142 qpti
->qregs
+ SBUS_CTRL
);
145 static inline void qlogicpti_disable_irqs(struct qlogicpti
*qpti
)
147 sbus_writew(0, qpti
->qregs
+ SBUS_CTRL
);
150 static inline void set_sbus_cfg1(struct qlogicpti
*qpti
)
153 u8 bursts
= qpti
->bursts
;
155 #if 0 /* It appears that at least PTI cards do not support
156 * 64-byte bursts and that setting the B64 bit actually
157 * is a nop and the chip ends up using the smallest burst
160 if (sbus_can_burst64(qpti
->sdev
) && (bursts
& DMA_BURST64
)) {
161 val
= (SBUS_CFG1_BENAB
| SBUS_CFG1_B64
);
164 if (bursts
& DMA_BURST32
) {
165 val
= (SBUS_CFG1_BENAB
| SBUS_CFG1_B32
);
166 } else if (bursts
& DMA_BURST16
) {
167 val
= (SBUS_CFG1_BENAB
| SBUS_CFG1_B16
);
168 } else if (bursts
& DMA_BURST8
) {
169 val
= (SBUS_CFG1_BENAB
| SBUS_CFG1_B8
);
171 val
= 0; /* No sbus bursts for you... */
173 sbus_writew(val
, qpti
->qregs
+ SBUS_CFG1
);
176 static int qlogicpti_mbox_command(struct qlogicpti
*qpti
, u_short param
[], int force
)
181 if (mbox_param
[param
[0]] == 0)
184 /* Set SBUS semaphore. */
185 tmp
= sbus_readw(qpti
->qregs
+ SBUS_SEMAPHORE
);
186 tmp
|= SBUS_SEMAPHORE_LCK
;
187 sbus_writew(tmp
, qpti
->qregs
+ SBUS_SEMAPHORE
);
189 /* Wait for host IRQ bit to clear. */
190 loop_count
= DEFAULT_LOOP_COUNT
;
191 while (--loop_count
&& (sbus_readw(qpti
->qregs
+ HCCTRL
) & HCCTRL_HIRQ
)) {
196 printk(KERN_EMERG
"qlogicpti: mbox_command loop timeout #1\n");
198 /* Write mailbox command registers. */
199 switch (mbox_param
[param
[0]] >> 4) {
200 case 6: sbus_writew(param
[5], qpti
->qregs
+ MBOX5
);
201 case 5: sbus_writew(param
[4], qpti
->qregs
+ MBOX4
);
202 case 4: sbus_writew(param
[3], qpti
->qregs
+ MBOX3
);
203 case 3: sbus_writew(param
[2], qpti
->qregs
+ MBOX2
);
204 case 2: sbus_writew(param
[1], qpti
->qregs
+ MBOX1
);
205 case 1: sbus_writew(param
[0], qpti
->qregs
+ MBOX0
);
208 /* Clear RISC interrupt. */
209 tmp
= sbus_readw(qpti
->qregs
+ HCCTRL
);
211 sbus_writew(tmp
, qpti
->qregs
+ HCCTRL
);
213 /* Clear SBUS semaphore. */
214 sbus_writew(0, qpti
->qregs
+ SBUS_SEMAPHORE
);
216 /* Set HOST interrupt. */
217 tmp
= sbus_readw(qpti
->qregs
+ HCCTRL
);
219 sbus_writew(tmp
, qpti
->qregs
+ HCCTRL
);
221 /* Wait for HOST interrupt clears. */
222 loop_count
= DEFAULT_LOOP_COUNT
;
223 while (--loop_count
&&
224 (sbus_readw(qpti
->qregs
+ HCCTRL
) & HCCTRL_CRIRQ
))
227 printk(KERN_EMERG
"qlogicpti: mbox_command[%04x] loop timeout #2\n",
230 /* Wait for SBUS semaphore to get set. */
231 loop_count
= DEFAULT_LOOP_COUNT
;
232 while (--loop_count
&&
233 !(sbus_readw(qpti
->qregs
+ SBUS_SEMAPHORE
) & SBUS_SEMAPHORE_LCK
)) {
236 /* Workaround for some buggy chips. */
237 if (sbus_readw(qpti
->qregs
+ MBOX0
) & 0x4000)
241 printk(KERN_EMERG
"qlogicpti: mbox_command[%04x] loop timeout #3\n",
244 /* Wait for MBOX busy condition to go away. */
245 loop_count
= DEFAULT_LOOP_COUNT
;
246 while (--loop_count
&& (sbus_readw(qpti
->qregs
+ MBOX0
) == 0x04))
249 printk(KERN_EMERG
"qlogicpti: mbox_command[%04x] loop timeout #4\n",
252 /* Read back output parameters. */
253 switch (mbox_param
[param
[0]] & 0xf) {
254 case 6: param
[5] = sbus_readw(qpti
->qregs
+ MBOX5
);
255 case 5: param
[4] = sbus_readw(qpti
->qregs
+ MBOX4
);
256 case 4: param
[3] = sbus_readw(qpti
->qregs
+ MBOX3
);
257 case 3: param
[2] = sbus_readw(qpti
->qregs
+ MBOX2
);
258 case 2: param
[1] = sbus_readw(qpti
->qregs
+ MBOX1
);
259 case 1: param
[0] = sbus_readw(qpti
->qregs
+ MBOX0
);
262 /* Clear RISC interrupt. */
263 tmp
= sbus_readw(qpti
->qregs
+ HCCTRL
);
265 sbus_writew(tmp
, qpti
->qregs
+ HCCTRL
);
267 /* Release SBUS semaphore. */
268 tmp
= sbus_readw(qpti
->qregs
+ SBUS_SEMAPHORE
);
269 tmp
&= ~(SBUS_SEMAPHORE_LCK
);
270 sbus_writew(tmp
, qpti
->qregs
+ SBUS_SEMAPHORE
);
276 static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti
*qpti
)
280 qpti
->host_param
.initiator_scsi_id
= qpti
->scsi_id
;
281 qpti
->host_param
.bus_reset_delay
= 3;
282 qpti
->host_param
.retry_count
= 0;
283 qpti
->host_param
.retry_delay
= 5;
284 qpti
->host_param
.async_data_setup_time
= 3;
285 qpti
->host_param
.req_ack_active_negation
= 1;
286 qpti
->host_param
.data_line_active_negation
= 1;
287 qpti
->host_param
.data_dma_burst_enable
= 1;
288 qpti
->host_param
.command_dma_burst_enable
= 1;
289 qpti
->host_param
.tag_aging
= 8;
290 qpti
->host_param
.selection_timeout
= 250;
291 qpti
->host_param
.max_queue_depth
= 256;
293 for(i
= 0; i
< MAX_TARGETS
; i
++) {
295 * disconnect, parity, arq, reneg on reset, and, oddly enough
296 * tags...the midlayer's notion of tagged support has to match
297 * our device settings, and since we base whether we enable a
298 * tag on a per-cmnd basis upon what the midlayer sez, we
299 * actually enable the capability here.
301 qpti
->dev_param
[i
].device_flags
= 0xcd;
302 qpti
->dev_param
[i
].execution_throttle
= 16;
304 qpti
->dev_param
[i
].synchronous_period
= 12;
305 qpti
->dev_param
[i
].synchronous_offset
= 8;
307 qpti
->dev_param
[i
].synchronous_period
= 25;
308 qpti
->dev_param
[i
].synchronous_offset
= 12;
310 qpti
->dev_param
[i
].device_enable
= 1;
312 /* this is very important to set! */
313 qpti
->sbits
= 1 << qpti
->scsi_id
;
316 static int qlogicpti_reset_hardware(struct Scsi_Host
*host
)
318 struct qlogicpti
*qpti
= (struct qlogicpti
*) host
->hostdata
;
320 unsigned short risc_code_addr
;
324 risc_code_addr
= 0x1000; /* all load addresses are at 0x1000 */
326 spin_lock_irqsave(host
->host_lock
, flags
);
328 sbus_writew(HCCTRL_PAUSE
, qpti
->qregs
+ HCCTRL
);
330 /* Only reset the scsi bus if it is not free. */
331 if (sbus_readw(qpti
->qregs
+ CPU_PCTRL
) & CPU_PCTRL_BSY
) {
332 sbus_writew(CPU_ORIDE_RMOD
, qpti
->qregs
+ CPU_ORIDE
);
333 sbus_writew(CPU_CMD_BRESET
, qpti
->qregs
+ CPU_CMD
);
337 sbus_writew(SBUS_CTRL_RESET
, qpti
->qregs
+ SBUS_CTRL
);
338 sbus_writew((DMA_CTRL_CCLEAR
| DMA_CTRL_CIRQ
), qpti
->qregs
+ CMD_DMA_CTRL
);
339 sbus_writew((DMA_CTRL_CCLEAR
| DMA_CTRL_CIRQ
), qpti
->qregs
+ DATA_DMA_CTRL
);
341 loop_count
= DEFAULT_LOOP_COUNT
;
342 while (--loop_count
&& ((sbus_readw(qpti
->qregs
+ MBOX0
) & 0xff) == 0x04))
345 printk(KERN_EMERG
"qlogicpti: reset_hardware loop timeout\n");
347 sbus_writew(HCCTRL_PAUSE
, qpti
->qregs
+ HCCTRL
);
349 qlogicpti_enable_irqs(qpti
);
351 if (sbus_readw(qpti
->qregs
+ RISC_PSR
) & RISC_PSR_ULTRA
) {
353 sbus_writew((RISC_MTREG_P0ULTRA
| RISC_MTREG_P1ULTRA
),
354 qpti
->qregs
+ RISC_MTREG
);
357 sbus_writew((RISC_MTREG_P0DFLT
| RISC_MTREG_P1DFLT
),
358 qpti
->qregs
+ RISC_MTREG
);
361 /* reset adapter and per-device default values. */
362 /* do it after finding out whether we're ultra mode capable */
363 qlogicpti_set_hostdev_defaults(qpti
);
365 /* Release the RISC processor. */
366 sbus_writew(HCCTRL_REL
, qpti
->qregs
+ HCCTRL
);
368 /* Get RISC to start executing the firmware code. */
369 param
[0] = MBOX_EXEC_FIRMWARE
;
370 param
[1] = risc_code_addr
;
371 if (qlogicpti_mbox_command(qpti
, param
, 1)) {
372 printk(KERN_EMERG
"qlogicpti%d: Cannot execute ISP firmware.\n",
374 spin_unlock_irqrestore(host
->host_lock
, flags
);
378 /* Set initiator scsi ID. */
379 param
[0] = MBOX_SET_INIT_SCSI_ID
;
380 param
[1] = qpti
->host_param
.initiator_scsi_id
;
381 if (qlogicpti_mbox_command(qpti
, param
, 1) ||
382 (param
[0] != MBOX_COMMAND_COMPLETE
)) {
383 printk(KERN_EMERG
"qlogicpti%d: Cannot set initiator SCSI ID.\n",
385 spin_unlock_irqrestore(host
->host_lock
, flags
);
389 /* Initialize state of the queues, both hw and sw. */
390 qpti
->req_in_ptr
= qpti
->res_out_ptr
= 0;
392 param
[0] = MBOX_INIT_RES_QUEUE
;
393 param
[1] = RES_QUEUE_LEN
+ 1;
394 param
[2] = (u_short
) (qpti
->res_dvma
>> 16);
395 param
[3] = (u_short
) (qpti
->res_dvma
& 0xffff);
396 param
[4] = param
[5] = 0;
397 if (qlogicpti_mbox_command(qpti
, param
, 1)) {
398 printk(KERN_EMERG
"qlogicpti%d: Cannot init response queue.\n",
400 spin_unlock_irqrestore(host
->host_lock
, flags
);
404 param
[0] = MBOX_INIT_REQ_QUEUE
;
405 param
[1] = QLOGICPTI_REQ_QUEUE_LEN
+ 1;
406 param
[2] = (u_short
) (qpti
->req_dvma
>> 16);
407 param
[3] = (u_short
) (qpti
->req_dvma
& 0xffff);
408 param
[4] = param
[5] = 0;
409 if (qlogicpti_mbox_command(qpti
, param
, 1)) {
410 printk(KERN_EMERG
"qlogicpti%d: Cannot init request queue.\n",
412 spin_unlock_irqrestore(host
->host_lock
, flags
);
416 param
[0] = MBOX_SET_RETRY_COUNT
;
417 param
[1] = qpti
->host_param
.retry_count
;
418 param
[2] = qpti
->host_param
.retry_delay
;
419 qlogicpti_mbox_command(qpti
, param
, 0);
421 param
[0] = MBOX_SET_TAG_AGE_LIMIT
;
422 param
[1] = qpti
->host_param
.tag_aging
;
423 qlogicpti_mbox_command(qpti
, param
, 0);
425 for (i
= 0; i
< MAX_TARGETS
; i
++) {
426 param
[0] = MBOX_GET_DEV_QUEUE_PARAMS
;
428 qlogicpti_mbox_command(qpti
, param
, 0);
431 param
[0] = MBOX_GET_FIRMWARE_STATUS
;
432 qlogicpti_mbox_command(qpti
, param
, 0);
434 param
[0] = MBOX_SET_SELECT_TIMEOUT
;
435 param
[1] = qpti
->host_param
.selection_timeout
;
436 qlogicpti_mbox_command(qpti
, param
, 0);
438 for (i
= 0; i
< MAX_TARGETS
; i
++) {
439 param
[0] = MBOX_SET_TARGET_PARAMS
;
441 param
[2] = (qpti
->dev_param
[i
].device_flags
<< 8);
443 * Since we're now loading 1.31 f/w, force narrow/async.
446 param
[3] = 0; /* no offset, we do not have sync mode yet */
447 qlogicpti_mbox_command(qpti
, param
, 0);
451 * Always (sigh) do an initial bus reset (kicks f/w).
453 param
[0] = MBOX_BUS_RESET
;
454 param
[1] = qpti
->host_param
.bus_reset_delay
;
455 qlogicpti_mbox_command(qpti
, param
, 0);
456 qpti
->send_marker
= 1;
458 spin_unlock_irqrestore(host
->host_lock
, flags
);
462 #define PTI_RESET_LIMIT 400
464 static int __init
qlogicpti_load_firmware(struct qlogicpti
*qpti
)
466 struct Scsi_Host
*host
= qpti
->qhost
;
467 unsigned short csum
= 0;
468 unsigned short param
[6];
469 unsigned short *risc_code
, risc_code_addr
, risc_code_length
;
473 risc_code
= &sbus_risc_code01
[0];
474 risc_code_addr
= 0x1000; /* all f/w modules load at 0x1000 */
475 risc_code_length
= sbus_risc_code_length01
;
477 spin_lock_irqsave(host
->host_lock
, flags
);
479 /* Verify the checksum twice, one before loading it, and once
480 * afterwards via the mailbox commands.
482 for (i
= 0; i
< risc_code_length
; i
++)
483 csum
+= risc_code
[i
];
485 spin_unlock_irqrestore(host
->host_lock
, flags
);
486 printk(KERN_EMERG
"qlogicpti%d: Aieee, firmware checksum failed!",
490 sbus_writew(SBUS_CTRL_RESET
, qpti
->qregs
+ SBUS_CTRL
);
491 sbus_writew((DMA_CTRL_CCLEAR
| DMA_CTRL_CIRQ
), qpti
->qregs
+ CMD_DMA_CTRL
);
492 sbus_writew((DMA_CTRL_CCLEAR
| DMA_CTRL_CIRQ
), qpti
->qregs
+ DATA_DMA_CTRL
);
493 timeout
= PTI_RESET_LIMIT
;
494 while (--timeout
&& (sbus_readw(qpti
->qregs
+ SBUS_CTRL
) & SBUS_CTRL_RESET
))
497 spin_unlock_irqrestore(host
->host_lock
, flags
);
498 printk(KERN_EMERG
"qlogicpti%d: Cannot reset the ISP.", qpti
->qpti_id
);
502 sbus_writew(HCCTRL_RESET
, qpti
->qregs
+ HCCTRL
);
505 sbus_writew((SBUS_CTRL_GENAB
| SBUS_CTRL_ERIRQ
), qpti
->qregs
+ SBUS_CTRL
);
507 sbus_writew(0, qpti
->qregs
+ SBUS_SEMAPHORE
);
509 if (sbus_readw(qpti
->qregs
+ RISC_PSR
) & RISC_PSR_ULTRA
) {
511 sbus_writew((RISC_MTREG_P0ULTRA
| RISC_MTREG_P1ULTRA
),
512 qpti
->qregs
+ RISC_MTREG
);
515 sbus_writew((RISC_MTREG_P0DFLT
| RISC_MTREG_P1DFLT
),
516 qpti
->qregs
+ RISC_MTREG
);
519 sbus_writew(HCCTRL_REL
, qpti
->qregs
+ HCCTRL
);
521 /* Pin lines are only stable while RISC is paused. */
522 sbus_writew(HCCTRL_PAUSE
, qpti
->qregs
+ HCCTRL
);
523 if (sbus_readw(qpti
->qregs
+ CPU_PDIFF
) & CPU_PDIFF_MODE
)
524 qpti
->differential
= 1;
526 qpti
->differential
= 0;
527 sbus_writew(HCCTRL_REL
, qpti
->qregs
+ HCCTRL
);
529 /* This shouldn't be necessary- we've reset things so we should be
530 running from the ROM now.. */
532 param
[0] = MBOX_STOP_FIRMWARE
;
533 param
[1] = param
[2] = param
[3] = param
[4] = param
[5] = 0;
534 if (qlogicpti_mbox_command(qpti
, param
, 1)) {
535 printk(KERN_EMERG
"qlogicpti%d: Cannot stop firmware for reload.\n",
537 spin_unlock_irqrestore(host
->host_lock
, flags
);
542 for (i
= 0; i
< risc_code_length
; i
++) {
543 param
[0] = MBOX_WRITE_RAM_WORD
;
544 param
[1] = risc_code_addr
+ i
;
545 param
[2] = risc_code
[i
];
546 if (qlogicpti_mbox_command(qpti
, param
, 1) ||
547 param
[0] != MBOX_COMMAND_COMPLETE
) {
548 printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n",
550 spin_unlock_irqrestore(host
->host_lock
, flags
);
555 /* Reset the ISP again. */
556 sbus_writew(HCCTRL_RESET
, qpti
->qregs
+ HCCTRL
);
559 qlogicpti_enable_irqs(qpti
);
560 sbus_writew(0, qpti
->qregs
+ SBUS_SEMAPHORE
);
561 sbus_writew(HCCTRL_REL
, qpti
->qregs
+ HCCTRL
);
563 /* Ask ISP to verify the checksum of the new code. */
564 param
[0] = MBOX_VERIFY_CHECKSUM
;
565 param
[1] = risc_code_addr
;
566 if (qlogicpti_mbox_command(qpti
, param
, 1) ||
567 (param
[0] != MBOX_COMMAND_COMPLETE
)) {
568 printk(KERN_EMERG
"qlogicpti%d: New firmware csum failure!\n",
570 spin_unlock_irqrestore(host
->host_lock
, flags
);
574 /* Start using newly downloaded firmware. */
575 param
[0] = MBOX_EXEC_FIRMWARE
;
576 param
[1] = risc_code_addr
;
577 qlogicpti_mbox_command(qpti
, param
, 1);
579 param
[0] = MBOX_ABOUT_FIRMWARE
;
580 if (qlogicpti_mbox_command(qpti
, param
, 1) ||
581 (param
[0] != MBOX_COMMAND_COMPLETE
)) {
582 printk(KERN_EMERG
"qlogicpti%d: AboutFirmware cmd fails.\n",
584 spin_unlock_irqrestore(host
->host_lock
, flags
);
588 /* Snag the major and minor revisions from the result. */
589 qpti
->fware_majrev
= param
[1];
590 qpti
->fware_minrev
= param
[2];
591 qpti
->fware_micrev
= param
[3];
593 /* Set the clock rate */
594 param
[0] = MBOX_SET_CLOCK_RATE
;
595 param
[1] = qpti
->clock
;
596 if (qlogicpti_mbox_command(qpti
, param
, 1) ||
597 (param
[0] != MBOX_COMMAND_COMPLETE
)) {
598 printk(KERN_EMERG
"qlogicpti%d: could not set clock rate.\n",
600 spin_unlock_irqrestore(host
->host_lock
, flags
);
604 if (qpti
->is_pti
!= 0) {
605 /* Load scsi initiator ID and interrupt level into sbus static ram. */
606 param
[0] = MBOX_WRITE_RAM_WORD
;
608 param
[2] = (unsigned short) qpti
->scsi_id
;
609 qlogicpti_mbox_command(qpti
, param
, 1);
611 param
[0] = MBOX_WRITE_RAM_WORD
;
613 param
[2] = (unsigned short) 3;
614 qlogicpti_mbox_command(qpti
, param
, 1);
617 spin_unlock_irqrestore(host
->host_lock
, flags
);
621 static int qlogicpti_verify_tmon(struct qlogicpti
*qpti
)
623 int curstat
= sbus_readb(qpti
->sreg
);
626 if (!(curstat
& SREG_FUSE
) && (qpti
->swsreg
& SREG_FUSE
))
627 printk("qlogicpti%d: Fuse returned to normal state.\n", qpti
->qpti_id
);
628 if (!(curstat
& SREG_TPOWER
) && (qpti
->swsreg
& SREG_TPOWER
))
629 printk("qlogicpti%d: termpwr back to normal state.\n", qpti
->qpti_id
);
630 if (curstat
!= qpti
->swsreg
) {
632 if (curstat
& SREG_FUSE
) {
634 printk("qlogicpti%d: Fuse is open!\n", qpti
->qpti_id
);
636 if (curstat
& SREG_TPOWER
) {
638 printk("qlogicpti%d: termpwr failure\n", qpti
->qpti_id
);
640 if (qpti
->differential
&&
641 (curstat
& SREG_DSENSE
) != SREG_DSENSE
) {
643 printk("qlogicpti%d: You have a single ended device on a "
644 "differential bus! Please fix!\n", qpti
->qpti_id
);
646 qpti
->swsreg
= curstat
;
652 static irqreturn_t
qpti_intr(int irq
, void *dev_id
, struct pt_regs
*regs
);
654 static void __init
qpti_chain_add(struct qlogicpti
*qpti
)
656 spin_lock_irq(&qptichain_lock
);
657 if (qptichain
!= NULL
) {
658 struct qlogicpti
*qlink
= qptichain
;
667 spin_unlock_irq(&qptichain_lock
);
670 static void __init
qpti_chain_del(struct qlogicpti
*qpti
)
672 spin_lock_irq(&qptichain_lock
);
673 if (qptichain
== qpti
) {
674 qptichain
= qpti
->next
;
676 struct qlogicpti
*qlink
= qptichain
;
677 while(qlink
->next
!= qpti
)
679 qlink
->next
= qpti
->next
;
682 spin_unlock_irq(&qptichain_lock
);
685 static int __init
qpti_map_regs(struct qlogicpti
*qpti
)
687 struct sbus_dev
*sdev
= qpti
->sdev
;
689 qpti
->qregs
= sbus_ioremap(&sdev
->resource
[0], 0,
690 sdev
->reg_addrs
[0].reg_size
,
693 printk("PTI: Qlogic/ISP registers are unmappable\n");
697 qpti
->sreg
= sbus_ioremap(&sdev
->resource
[0], (16 * 4096),
698 sizeof(unsigned char),
699 "PTI Qlogic/ISP statreg");
701 printk("PTI: Qlogic/ISP status register is unmappable\n");
708 static int __init
qpti_register_irq(struct qlogicpti
*qpti
)
710 struct sbus_dev
*sdev
= qpti
->sdev
;
712 qpti
->qhost
->irq
= qpti
->irq
= sdev
->irqs
[0];
714 /* We used to try various overly-clever things to
715 * reduce the interrupt processing overhead on
716 * sun4c/sun4m when multiple PTI's shared the
717 * same IRQ. It was too complex and messy to
720 if (request_irq(qpti
->irq
, qpti_intr
,
721 IRQF_SHARED
, "Qlogic/PTI", qpti
))
724 printk("qpti%d: IRQ %d ", qpti
->qpti_id
, qpti
->irq
);
729 printk("qpti%d: Cannot acquire irq line\n", qpti
->qpti_id
);
733 static void __init
qpti_get_scsi_id(struct qlogicpti
*qpti
)
735 qpti
->scsi_id
= prom_getintdefault(qpti
->prom_node
,
738 if (qpti
->scsi_id
== -1)
739 qpti
->scsi_id
= prom_getintdefault(qpti
->prom_node
,
742 if (qpti
->scsi_id
== -1)
744 prom_getintdefault(qpti
->sdev
->bus
->prom_node
,
745 "scsi-initiator-id", 7);
746 qpti
->qhost
->this_id
= qpti
->scsi_id
;
747 qpti
->qhost
->max_sectors
= 64;
749 printk("SCSI ID %d ", qpti
->scsi_id
);
752 static void qpti_get_bursts(struct qlogicpti
*qpti
)
754 struct sbus_dev
*sdev
= qpti
->sdev
;
757 bursts
= prom_getintdefault(qpti
->prom_node
, "burst-sizes", 0xff);
758 bmask
= prom_getintdefault(sdev
->bus
->prom_node
,
759 "burst-sizes", 0xff);
762 if (bursts
== 0xff ||
763 (bursts
& DMA_BURST16
) == 0 ||
764 (bursts
& DMA_BURST32
) == 0)
765 bursts
= (DMA_BURST32
- 1);
767 qpti
->bursts
= bursts
;
770 static void qpti_get_clock(struct qlogicpti
*qpti
)
774 /* Check for what the clock input to this card is.
777 cfreq
= prom_getintdefault(qpti
->prom_node
,"clock-frequency",40000000);
778 qpti
->clock
= (cfreq
+ 500000)/1000000;
779 if (qpti
->clock
== 0) /* bullshit */
783 /* The request and response queues must each be aligned
784 * on a page boundary.
786 static int __init
qpti_map_queues(struct qlogicpti
*qpti
)
788 struct sbus_dev
*sdev
= qpti
->sdev
;
790 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
791 qpti
->res_cpu
= sbus_alloc_consistent(sdev
,
792 QSIZE(RES_QUEUE_LEN
),
794 if (qpti
->res_cpu
== NULL
||
795 qpti
->res_dvma
== 0) {
796 printk("QPTI: Cannot map response queue.\n");
800 qpti
->req_cpu
= sbus_alloc_consistent(sdev
,
801 QSIZE(QLOGICPTI_REQ_QUEUE_LEN
),
803 if (qpti
->req_cpu
== NULL
||
804 qpti
->req_dvma
== 0) {
805 sbus_free_consistent(sdev
, QSIZE(RES_QUEUE_LEN
),
806 qpti
->res_cpu
, qpti
->res_dvma
);
807 printk("QPTI: Cannot map request queue.\n");
810 memset(qpti
->res_cpu
, 0, QSIZE(RES_QUEUE_LEN
));
811 memset(qpti
->req_cpu
, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN
));
815 const char *qlogicpti_info(struct Scsi_Host
*host
)
818 struct qlogicpti
*qpti
= (struct qlogicpti
*) host
->hostdata
;
820 sprintf(buf
, "PTI Qlogic,ISP SBUS SCSI irq %d regs at %p",
821 qpti
->qhost
->irq
, qpti
->qregs
);
825 /* I am a certified frobtronicist. */
826 static inline void marker_frob(struct Command_Entry
*cmd
)
828 struct Marker_Entry
*marker
= (struct Marker_Entry
*) cmd
;
830 memset(marker
, 0, sizeof(struct Marker_Entry
));
831 marker
->hdr
.entry_cnt
= 1;
832 marker
->hdr
.entry_type
= ENTRY_MARKER
;
833 marker
->modifier
= SYNC_ALL
;
837 static inline void cmd_frob(struct Command_Entry
*cmd
, struct scsi_cmnd
*Cmnd
,
838 struct qlogicpti
*qpti
)
840 memset(cmd
, 0, sizeof(struct Command_Entry
));
841 cmd
->hdr
.entry_cnt
= 1;
842 cmd
->hdr
.entry_type
= ENTRY_COMMAND
;
843 cmd
->target_id
= Cmnd
->device
->id
;
844 cmd
->target_lun
= Cmnd
->device
->lun
;
845 cmd
->cdb_length
= Cmnd
->cmd_len
;
846 cmd
->control_flags
= 0;
847 if (Cmnd
->device
->tagged_supported
) {
848 if (qpti
->cmd_count
[Cmnd
->device
->id
] == 0)
849 qpti
->tag_ages
[Cmnd
->device
->id
] = jiffies
;
850 if (time_after(jiffies
, qpti
->tag_ages
[Cmnd
->device
->id
] + (5*HZ
))) {
851 cmd
->control_flags
= CFLAG_ORDERED_TAG
;
852 qpti
->tag_ages
[Cmnd
->device
->id
] = jiffies
;
854 cmd
->control_flags
= CFLAG_SIMPLE_TAG
;
856 if ((Cmnd
->cmnd
[0] == WRITE_6
) ||
857 (Cmnd
->cmnd
[0] == WRITE_10
) ||
858 (Cmnd
->cmnd
[0] == WRITE_12
))
859 cmd
->control_flags
|= CFLAG_WRITE
;
861 cmd
->control_flags
|= CFLAG_READ
;
863 memcpy(cmd
->cdb
, Cmnd
->cmnd
, Cmnd
->cmd_len
);
866 /* Do it to it baby. */
867 static inline int load_cmd(struct scsi_cmnd
*Cmnd
, struct Command_Entry
*cmd
,
868 struct qlogicpti
*qpti
, u_int in_ptr
, u_int out_ptr
)
871 struct scatterlist
*sg
;
877 sg
= (struct scatterlist
*) Cmnd
->request_buffer
;
878 sg_count
= sbus_map_sg(qpti
->sdev
, sg
, Cmnd
->use_sg
, Cmnd
->sc_data_direction
);
881 cmd
->segment_cnt
= sg_count
;
883 /* Fill in first four sg entries: */
887 for (i
= 0; i
< n
; i
++, sg
++) {
888 ds
[i
].d_base
= sg_dma_address(sg
);
889 ds
[i
].d_count
= sg_dma_len(sg
);
892 while (sg_count
> 0) {
893 struct Continuation_Entry
*cont
;
895 ++cmd
->hdr
.entry_cnt
;
896 cont
= (struct Continuation_Entry
*) &qpti
->req_cpu
[in_ptr
];
897 in_ptr
= NEXT_REQ_PTR(in_ptr
);
898 if (in_ptr
== out_ptr
)
901 cont
->hdr
.entry_type
= ENTRY_CONTINUATION
;
902 cont
->hdr
.entry_cnt
= 0;
903 cont
->hdr
.sys_def_1
= 0;
910 for (i
= 0; i
< n
; i
++, sg
++) {
911 ds
[i
].d_base
= sg_dma_address(sg
);
912 ds
[i
].d_count
= sg_dma_len(sg
);
916 } else if (Cmnd
->request_bufflen
) {
917 Cmnd
->SCp
.ptr
= (char *)(unsigned long)
918 sbus_map_single(qpti
->sdev
,
919 Cmnd
->request_buffer
,
920 Cmnd
->request_bufflen
,
921 Cmnd
->sc_data_direction
);
923 cmd
->dataseg
[0].d_base
= (u32
) ((unsigned long)Cmnd
->SCp
.ptr
);
924 cmd
->dataseg
[0].d_count
= Cmnd
->request_bufflen
;
925 cmd
->segment_cnt
= 1;
927 cmd
->dataseg
[0].d_base
= 0;
928 cmd
->dataseg
[0].d_count
= 0;
929 cmd
->segment_cnt
= 1; /* Shouldn't this be 0? */
932 /* Committed, record Scsi_Cmd so we can find it later. */
933 cmd
->handle
= in_ptr
;
934 qpti
->cmd_slots
[in_ptr
] = Cmnd
;
936 qpti
->cmd_count
[Cmnd
->device
->id
]++;
937 sbus_writew(in_ptr
, qpti
->qregs
+ MBOX4
);
938 qpti
->req_in_ptr
= in_ptr
;
943 static inline void update_can_queue(struct Scsi_Host
*host
, u_int in_ptr
, u_int out_ptr
)
945 /* Temporary workaround until bug is found and fixed (one bug has been found
946 already, but fixing it makes things even worse) -jj */
947 int num_free
= QLOGICPTI_REQ_QUEUE_LEN
- REQ_QUEUE_DEPTH(in_ptr
, out_ptr
) - 64;
948 host
->can_queue
= host
->host_busy
+ num_free
;
949 host
->sg_tablesize
= QLOGICPTI_MAX_SG(num_free
);
952 static unsigned int scsi_rbuf_get(struct scsi_cmnd
*cmd
, unsigned char **buf_out
)
958 struct scatterlist
*sg
;
960 sg
= (struct scatterlist
*) cmd
->request_buffer
;
961 buf
= kmap_atomic(sg
->page
, KM_IRQ0
) + sg
->offset
;
964 buf
= cmd
->request_buffer
;
965 buflen
= cmd
->request_bufflen
;
972 static void scsi_rbuf_put(struct scsi_cmnd
*cmd
, unsigned char *buf
)
975 struct scatterlist
*sg
;
977 sg
= (struct scatterlist
*) cmd
->request_buffer
;
978 kunmap_atomic(buf
- sg
->offset
, KM_IRQ0
);
983 * Until we scan the entire bus with inquiries, go throught this fella...
985 static void ourdone(struct scsi_cmnd
*Cmnd
)
987 struct qlogicpti
*qpti
= (struct qlogicpti
*) Cmnd
->device
->host
->hostdata
;
988 int tgt
= Cmnd
->device
->id
;
989 void (*done
) (struct scsi_cmnd
*);
991 /* This grot added by DaveM, blame him for ugliness.
992 * The issue is that in the 2.3.x driver we use the
993 * host_scribble portion of the scsi command as a
994 * completion linked list at interrupt service time,
995 * so we have to store the done function pointer elsewhere.
997 done
= (void (*)(struct scsi_cmnd
*))
998 (((unsigned long) Cmnd
->SCp
.Message
)
1000 | ((unsigned long) Cmnd
->SCp
.Status
<< 32UL)
1004 if ((qpti
->sbits
& (1 << tgt
)) == 0) {
1005 int ok
= host_byte(Cmnd
->result
) == DID_OK
;
1006 if (Cmnd
->cmnd
[0] == 0x12 && ok
) {
1008 unsigned int iqd_len
;
1010 iqd_len
= scsi_rbuf_get(Cmnd
, &iqd
);
1012 /* tags handled in midlayer */
1013 /* enable sync mode? */
1014 if (iqd
[7] & 0x10) {
1015 qpti
->dev_param
[tgt
].device_flags
|= 0x10;
1017 qpti
->dev_param
[tgt
].synchronous_offset
= 0;
1018 qpti
->dev_param
[tgt
].synchronous_period
= 0;
1020 /* are we wide capable? */
1021 if (iqd
[7] & 0x20) {
1022 qpti
->dev_param
[tgt
].device_flags
|= 0x20;
1025 scsi_rbuf_put(Cmnd
, iqd
);
1027 qpti
->sbits
|= (1 << tgt
);
1029 qpti
->sbits
|= (1 << tgt
);
1035 static int qlogicpti_queuecommand(struct scsi_cmnd
*Cmnd
, void (*done
)(struct scsi_cmnd
*));
1037 static int qlogicpti_queuecommand_slow(struct scsi_cmnd
*Cmnd
,
1038 void (*done
)(struct scsi_cmnd
*))
1040 struct qlogicpti
*qpti
= (struct qlogicpti
*) Cmnd
->device
->host
->hostdata
;
1043 * done checking this host adapter?
1044 * If not, then rewrite the command
1045 * to finish through ourdone so we
1046 * can peek at Inquiry data results.
1048 if (qpti
->sbits
&& qpti
->sbits
!= 0xffff) {
1049 /* See above about in ourdone this ugliness... */
1050 Cmnd
->SCp
.Message
= ((unsigned long)done
) & 0xffffffff;
1051 #ifdef CONFIG_SPARC64
1052 Cmnd
->SCp
.Status
= ((unsigned long)done
>> 32UL) & 0xffffffff;
1054 return qlogicpti_queuecommand(Cmnd
, ourdone
);
1058 * We've peeked at all targets for this bus- time
1059 * to set parameters for devices for real now.
1061 if (qpti
->sbits
== 0xffff) {
1063 for(i
= 0; i
< MAX_TARGETS
; i
++) {
1065 param
[0] = MBOX_SET_TARGET_PARAMS
;
1066 param
[1] = (i
<< 8);
1067 param
[2] = (qpti
->dev_param
[i
].device_flags
<< 8);
1068 if (qpti
->dev_param
[i
].device_flags
& 0x10) {
1069 param
[3] = (qpti
->dev_param
[i
].synchronous_offset
<< 8) |
1070 qpti
->dev_param
[i
].synchronous_period
;
1074 (void) qlogicpti_mbox_command(qpti
, param
, 0);
1077 * set to zero so any traverse through ourdone
1078 * doesn't start the whole process again,
1083 /* check to see if we're done with all adapters... */
1084 for (qpti
= qptichain
; qpti
!= NULL
; qpti
= qpti
->next
) {
1091 * if we hit the end of the chain w/o finding adapters still
1092 * capability-configuring, then we're done with all adapters
1096 Cmnd
->device
->host
->hostt
->queuecommand
= qlogicpti_queuecommand
;
1098 return qlogicpti_queuecommand(Cmnd
, done
);
1102 * The middle SCSI layer ensures that queuecommand never gets invoked
1103 * concurrently with itself or the interrupt handler (though the
1104 * interrupt handler may call this routine as part of
1105 * request-completion handling).
1107 * "This code must fly." -davem
1109 static int qlogicpti_queuecommand(struct scsi_cmnd
*Cmnd
, void (*done
)(struct scsi_cmnd
*))
1111 struct Scsi_Host
*host
= Cmnd
->device
->host
;
1112 struct qlogicpti
*qpti
= (struct qlogicpti
*) host
->hostdata
;
1113 struct Command_Entry
*cmd
;
1117 Cmnd
->scsi_done
= done
;
1119 in_ptr
= qpti
->req_in_ptr
;
1120 cmd
= (struct Command_Entry
*) &qpti
->req_cpu
[in_ptr
];
1121 out_ptr
= sbus_readw(qpti
->qregs
+ MBOX4
);
1122 in_ptr
= NEXT_REQ_PTR(in_ptr
);
1123 if (in_ptr
== out_ptr
)
1126 if (qpti
->send_marker
) {
1128 qpti
->send_marker
= 0;
1129 if (NEXT_REQ_PTR(in_ptr
) == out_ptr
) {
1130 sbus_writew(in_ptr
, qpti
->qregs
+ MBOX4
);
1131 qpti
->req_in_ptr
= in_ptr
;
1134 cmd
= (struct Command_Entry
*) &qpti
->req_cpu
[in_ptr
];
1135 in_ptr
= NEXT_REQ_PTR(in_ptr
);
1137 cmd_frob(cmd
, Cmnd
, qpti
);
1138 if ((in_ptr
= load_cmd(Cmnd
, cmd
, qpti
, in_ptr
, out_ptr
)) == -1)
1141 update_can_queue(host
, in_ptr
, out_ptr
);
1146 printk(KERN_EMERG
"qlogicpti%d: request queue overflow\n",
1149 /* Unfortunately, unless you use the new EH code, which
1150 * we don't, the midlayer will ignore the return value,
1151 * which is insane. We pick up the pieces like this.
1153 Cmnd
->result
= DID_BUS_BUSY
;
1158 static int qlogicpti_return_status(struct Status_Entry
*sts
, int id
)
1160 int host_status
= DID_ERROR
;
1162 switch (sts
->completion_status
) {
1164 host_status
= DID_OK
;
1167 if (!(sts
->state_flags
& SF_GOT_BUS
))
1168 host_status
= DID_NO_CONNECT
;
1169 else if (!(sts
->state_flags
& SF_GOT_TARGET
))
1170 host_status
= DID_BAD_TARGET
;
1171 else if (!(sts
->state_flags
& SF_SENT_CDB
))
1172 host_status
= DID_ERROR
;
1173 else if (!(sts
->state_flags
& SF_TRANSFERRED_DATA
))
1174 host_status
= DID_ERROR
;
1175 else if (!(sts
->state_flags
& SF_GOT_STATUS
))
1176 host_status
= DID_ERROR
;
1177 else if (!(sts
->state_flags
& SF_GOT_SENSE
))
1178 host_status
= DID_ERROR
;
1181 case CS_TRANSPORT_ERROR
:
1182 host_status
= DID_ERROR
;
1184 case CS_RESET_OCCURRED
:
1186 host_status
= DID_RESET
;
1189 host_status
= DID_ABORT
;
1192 host_status
= DID_TIME_OUT
;
1194 case CS_DATA_OVERRUN
:
1195 case CS_COMMAND_OVERRUN
:
1196 case CS_STATUS_OVERRUN
:
1197 case CS_BAD_MESSAGE
:
1198 case CS_NO_MESSAGE_OUT
:
1199 case CS_EXT_ID_FAILED
:
1200 case CS_IDE_MSG_FAILED
:
1201 case CS_ABORT_MSG_FAILED
:
1202 case CS_NOP_MSG_FAILED
:
1203 case CS_PARITY_ERROR_MSG_FAILED
:
1204 case CS_DEVICE_RESET_MSG_FAILED
:
1205 case CS_ID_MSG_FAILED
:
1206 case CS_UNEXP_BUS_FREE
:
1207 host_status
= DID_ERROR
;
1209 case CS_DATA_UNDERRUN
:
1210 host_status
= DID_OK
;
1213 printk(KERN_EMERG
"qpti%d: unknown completion status 0x%04x\n",
1214 id
, sts
->completion_status
);
1215 host_status
= DID_ERROR
;
1219 return (sts
->scsi_status
& STATUS_MASK
) | (host_status
<< 16);
1222 static struct scsi_cmnd
*qlogicpti_intr_handler(struct qlogicpti
*qpti
)
1224 struct scsi_cmnd
*Cmnd
, *done_queue
= NULL
;
1225 struct Status_Entry
*sts
;
1226 u_int in_ptr
, out_ptr
;
1228 if (!(sbus_readw(qpti
->qregs
+ SBUS_STAT
) & SBUS_STAT_RINT
))
1231 in_ptr
= sbus_readw(qpti
->qregs
+ MBOX5
);
1232 sbus_writew(HCCTRL_CRIRQ
, qpti
->qregs
+ HCCTRL
);
1233 if (sbus_readw(qpti
->qregs
+ SBUS_SEMAPHORE
) & SBUS_SEMAPHORE_LCK
) {
1234 switch (sbus_readw(qpti
->qregs
+ MBOX0
)) {
1235 case ASYNC_SCSI_BUS_RESET
:
1236 case EXECUTION_TIMEOUT_RESET
:
1237 qpti
->send_marker
= 1;
1239 case INVALID_COMMAND
:
1240 case HOST_INTERFACE_ERROR
:
1242 case COMMAND_PARAM_ERROR
:
1245 sbus_writew(0, qpti
->qregs
+ SBUS_SEMAPHORE
);
1248 /* This looks like a network driver! */
1249 out_ptr
= qpti
->res_out_ptr
;
1250 while (out_ptr
!= in_ptr
) {
1253 sts
= (struct Status_Entry
*) &qpti
->res_cpu
[out_ptr
];
1254 out_ptr
= NEXT_RES_PTR(out_ptr
);
1256 /* We store an index in the handle, not the pointer in
1257 * some form. This avoids problems due to the fact
1258 * that the handle provided is only 32-bits. -DaveM
1260 cmd_slot
= sts
->handle
;
1261 Cmnd
= qpti
->cmd_slots
[cmd_slot
];
1262 qpti
->cmd_slots
[cmd_slot
] = NULL
;
1264 if (sts
->completion_status
== CS_RESET_OCCURRED
||
1265 sts
->completion_status
== CS_ABORTED
||
1266 (sts
->status_flags
& STF_BUS_RESET
))
1267 qpti
->send_marker
= 1;
1269 if (sts
->state_flags
& SF_GOT_SENSE
)
1270 memcpy(Cmnd
->sense_buffer
, sts
->req_sense_data
,
1271 sizeof(Cmnd
->sense_buffer
));
1273 if (sts
->hdr
.entry_type
== ENTRY_STATUS
)
1275 qlogicpti_return_status(sts
, qpti
->qpti_id
);
1277 Cmnd
->result
= DID_ERROR
<< 16;
1280 sbus_unmap_sg(qpti
->sdev
,
1281 (struct scatterlist
*)Cmnd
->request_buffer
,
1283 Cmnd
->sc_data_direction
);
1285 sbus_unmap_single(qpti
->sdev
,
1286 (__u32
)((unsigned long)Cmnd
->SCp
.ptr
),
1287 Cmnd
->request_bufflen
,
1288 Cmnd
->sc_data_direction
);
1290 qpti
->cmd_count
[Cmnd
->device
->id
]--;
1291 sbus_writew(out_ptr
, qpti
->qregs
+ MBOX5
);
1292 Cmnd
->host_scribble
= (unsigned char *) done_queue
;
1295 qpti
->res_out_ptr
= out_ptr
;
1300 static irqreturn_t
qpti_intr(int irq
, void *dev_id
, struct pt_regs
*regs
)
1302 struct qlogicpti
*qpti
= dev_id
;
1303 unsigned long flags
;
1304 struct scsi_cmnd
*dq
;
1306 spin_lock_irqsave(qpti
->qhost
->host_lock
, flags
);
1307 dq
= qlogicpti_intr_handler(qpti
);
1311 struct scsi_cmnd
*next
;
1313 next
= (struct scsi_cmnd
*) dq
->host_scribble
;
1316 } while (dq
!= NULL
);
1318 spin_unlock_irqrestore(qpti
->qhost
->host_lock
, flags
);
1323 static int qlogicpti_abort(struct scsi_cmnd
*Cmnd
)
1326 struct Scsi_Host
*host
= Cmnd
->device
->host
;
1327 struct qlogicpti
*qpti
= (struct qlogicpti
*) host
->hostdata
;
1328 int return_status
= SUCCESS
;
1332 printk(KERN_WARNING
"qlogicpti : Aborting cmd for tgt[%d] lun[%d]\n",
1333 (int)Cmnd
->device
->id
, (int)Cmnd
->device
->lun
);
1335 qlogicpti_disable_irqs(qpti
);
1337 /* Find the 32-bit cookie we gave to the firmware for
1340 for (i
= 0; i
< QLOGICPTI_REQ_QUEUE_LEN
+ 1; i
++)
1341 if (qpti
->cmd_slots
[i
] == Cmnd
)
1345 param
[0] = MBOX_ABORT
;
1346 param
[1] = (((u_short
) Cmnd
->device
->id
) << 8) | Cmnd
->device
->lun
;
1347 param
[2] = cmd_cookie
>> 16;
1348 param
[3] = cmd_cookie
& 0xffff;
1349 if (qlogicpti_mbox_command(qpti
, param
, 0) ||
1350 (param
[0] != MBOX_COMMAND_COMPLETE
)) {
1351 printk(KERN_EMERG
"qlogicpti : scsi abort failure: %x\n", param
[0]);
1352 return_status
= FAILED
;
1355 qlogicpti_enable_irqs(qpti
);
1357 return return_status
;
1360 static int qlogicpti_reset(struct scsi_cmnd
*Cmnd
)
1363 struct Scsi_Host
*host
= Cmnd
->device
->host
;
1364 struct qlogicpti
*qpti
= (struct qlogicpti
*) host
->hostdata
;
1365 int return_status
= SUCCESS
;
1367 printk(KERN_WARNING
"qlogicpti : Resetting SCSI bus!\n");
1369 qlogicpti_disable_irqs(qpti
);
1371 param
[0] = MBOX_BUS_RESET
;
1372 param
[1] = qpti
->host_param
.bus_reset_delay
;
1373 if (qlogicpti_mbox_command(qpti
, param
, 0) ||
1374 (param
[0] != MBOX_COMMAND_COMPLETE
)) {
1375 printk(KERN_EMERG
"qlogicisp : scsi bus reset failure: %x\n", param
[0]);
1376 return_status
= FAILED
;
1379 qlogicpti_enable_irqs(qpti
);
1381 return return_status
;
1384 static struct scsi_host_template qpti_template
= {
1385 .module
= THIS_MODULE
,
1386 .name
= "qlogicpti",
1387 .info
= qlogicpti_info
,
1388 .queuecommand
= qlogicpti_queuecommand_slow
,
1389 .eh_abort_handler
= qlogicpti_abort
,
1390 .eh_bus_reset_handler
= qlogicpti_reset
,
1391 .can_queue
= QLOGICPTI_REQ_QUEUE_LEN
,
1393 .sg_tablesize
= QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN
),
1395 .use_clustering
= ENABLE_CLUSTERING
,
1398 static int __devinit
qpti_sbus_probe(struct of_device
*dev
, const struct of_device_id
*match
)
1401 struct sbus_dev
*sdev
= to_sbus_device(&dev
->dev
);
1402 struct device_node
*dp
= dev
->node
;
1403 struct scsi_host_template
*tpnt
= match
->data
;
1404 struct Scsi_Host
*host
;
1405 struct qlogicpti
*qpti
;
1408 /* Sometimes Antares cards come up not completely
1409 * setup, and we get a report of a zero IRQ.
1411 if (sdev
->irqs
[0] == 0)
1414 host
= scsi_host_alloc(tpnt
, sizeof(struct qlogicpti
));
1418 qpti
= (struct qlogicpti
*) host
->hostdata
;
1420 host
->max_id
= MAX_TARGETS
;
1423 qpti
->qpti_id
= nqptis
;
1424 qpti
->prom_node
= sdev
->prom_node
;
1425 strcpy(qpti
->prom_name
, sdev
->ofdev
.node
->name
);
1426 qpti
->is_pti
= strcmp(qpti
->prom_name
, "QLGC,isp");
1428 if (qpti_map_regs(qpti
) < 0)
1431 if (qpti_register_irq(qpti
) < 0)
1432 goto fail_unmap_regs
;
1434 qpti_get_scsi_id(qpti
);
1435 qpti_get_bursts(qpti
);
1436 qpti_get_clock(qpti
);
1438 /* Clear out scsi_cmnd array. */
1439 memset(qpti
->cmd_slots
, 0, sizeof(qpti
->cmd_slots
));
1441 if (qpti_map_queues(qpti
) < 0)
1444 /* Load the firmware. */
1445 if (qlogicpti_load_firmware(qpti
))
1446 goto fail_unmap_queues
;
1448 /* Check the PTI status reg. */
1449 if (qlogicpti_verify_tmon(qpti
))
1450 goto fail_unmap_queues
;
1453 /* Reset the ISP and init res/req queues. */
1454 if (qlogicpti_reset_hardware(host
))
1455 goto fail_unmap_queues
;
1457 if (scsi_add_host(host
, &dev
->dev
))
1458 goto fail_unmap_queues
;
1460 printk("(Firmware v%d.%d.%d)", qpti
->fware_majrev
,
1461 qpti
->fware_minrev
, qpti
->fware_micrev
);
1463 fcode
= of_get_property(dp
, "isp-fcode", NULL
);
1464 if (fcode
&& fcode
[0])
1465 printk("(Firmware %s)", fcode
);
1466 if (of_find_property(dp
, "differential", NULL
) != NULL
)
1467 qpti
->differential
= 1;
1469 printk (" [%s Wide, using %s interface]\n",
1470 (qpti
->ultra
? "Ultra" : "Fast"),
1471 (qpti
->differential
? "differential" : "single ended"));
1473 dev_set_drvdata(&sdev
->ofdev
.dev
, qpti
);
1475 qpti_chain_add(qpti
);
1477 scsi_scan_host(host
);
1483 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1484 sbus_free_consistent(qpti
->sdev
,
1485 QSIZE(RES_QUEUE_LEN
),
1486 qpti
->res_cpu
, qpti
->res_dvma
);
1487 sbus_free_consistent(qpti
->sdev
,
1488 QSIZE(QLOGICPTI_REQ_QUEUE_LEN
),
1489 qpti
->req_cpu
, qpti
->req_dvma
);
1493 sbus_iounmap(qpti
->qregs
,
1494 qpti
->sdev
->reg_addrs
[0].reg_size
);
1496 sbus_iounmap(qpti
->sreg
, sizeof(unsigned char));
1499 free_irq(qpti
->irq
, qpti
);
1502 scsi_host_put(host
);
1507 static int __devexit
qpti_sbus_remove(struct of_device
*dev
)
1509 struct qlogicpti
*qpti
= dev_get_drvdata(&dev
->dev
);
1511 qpti_chain_del(qpti
);
1513 scsi_remove_host(qpti
->qhost
);
1515 /* Shut up the card. */
1516 sbus_writew(0, qpti
->qregs
+ SBUS_CTRL
);
1518 /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */
1519 free_irq(qpti
->irq
, qpti
);
1521 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1522 sbus_free_consistent(qpti
->sdev
,
1523 QSIZE(RES_QUEUE_LEN
),
1524 qpti
->res_cpu
, qpti
->res_dvma
);
1525 sbus_free_consistent(qpti
->sdev
,
1526 QSIZE(QLOGICPTI_REQ_QUEUE_LEN
),
1527 qpti
->req_cpu
, qpti
->req_dvma
);
1530 sbus_iounmap(qpti
->qregs
, qpti
->sdev
->reg_addrs
[0].reg_size
);
1532 sbus_iounmap(qpti
->sreg
, sizeof(unsigned char));
1534 scsi_host_put(qpti
->qhost
);
1539 static struct of_device_id qpti_match
[] = {
1542 .data
= &qpti_template
,
1545 .name
= "PTI,ptisp",
1546 .data
= &qpti_template
,
1550 .data
= &qpti_template
,
1554 .data
= &qpti_template
,
1558 MODULE_DEVICE_TABLE(of
, qpti_match
);
1560 static struct of_platform_driver qpti_sbus_driver
= {
1562 .match_table
= qpti_match
,
1563 .probe
= qpti_sbus_probe
,
1564 .remove
= __devexit_p(qpti_sbus_remove
),
1567 static int __init
qpti_init(void)
1569 return of_register_driver(&qpti_sbus_driver
, &sbus_bus_type
);
1572 static void __exit
qpti_exit(void)
1574 of_unregister_driver(&qpti_sbus_driver
);
1577 MODULE_DESCRIPTION("QlogicISP SBUS driver");
1578 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
1579 MODULE_LICENSE("GPL");
1580 MODULE_VERSION("2.0");
1582 module_init(qpti_init
);
1583 module_exit(qpti_exit
);