initial commit with v2.6.9
[linux-2.6.9-moxart.git] / drivers / scsi / 53c700.c
blob1495f0f8dc008cae716dc9f6650f91327c0cdca6
1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6 **-----------------------------------------------------------------------------
7 **
8 ** This program is free software; you can redistribute it and/or modify
9 ** it under the terms of the GNU General Public License as published by
10 ** the Free Software Foundation; either version 2 of the License, or
11 ** (at your option) any later version.
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
18 ** You should have received a copy of the GNU General Public License
19 ** along with this program; if not, write to the Free Software
20 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 **-----------------------------------------------------------------------------
25 /* Notes:
27 * This driver is designed exclusively for these chips (virtually the
28 * earliest of the scripts engine chips). They need their own drivers
29 * because they are missing so many of the scripts and snazzy register
30 * features of their elder brothers (the 710, 720 and 770).
32 * The 700 is the lowliest of the line, it can only do async SCSI.
33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
35 * The 700 chip has no host bus interface logic of its own. However,
36 * it is usually mapped to a location with well defined register
37 * offsets. Therefore, if you can determine the base address and the
38 * irq your board incorporating this chip uses, you can probably use
39 * this driver to run it (although you'll probably have to write a
40 * minimal wrapper for the purpose---see the NCR_D700 driver for
41 * details about how to do this).
44 * TODO List:
46 * 1. Better statistics in the proc fs
48 * 2. Implement message queue (queues SCSI messages like commands) and make
49 * the abort and device reset functions use them.
50 * */
52 /* CHANGELOG
54 * Version 2.8
56 * Fixed bad bug affecting tag starvation processing (previously the
57 * driver would hang the system if too many tags starved. Also fixed
58 * bad bug having to do with 10 byte command processing and REQUEST
59 * SENSE (the command would loop forever getting a transfer length
60 * mismatch in the CMD phase).
62 * Version 2.7
64 * Fixed scripts problem which caused certain devices (notably CDRWs)
65 * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
66 * __raw_readl/writel for parisc compatibility (Thomas
67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68 * for sense requests (Ryan Bradetich).
70 * Version 2.6
72 * Following test of the 64 bit parisc kernel by Richard Hirst,
73 * several problems have now been corrected. Also adds support for
74 * consistent memory allocation.
76 * Version 2.5
78 * More Compatibility changes for 710 (now actually works). Enhanced
79 * support for odd clock speeds which constrain SDTR negotiations.
80 * correct cacheline separation for scsi messages and status for
81 * incoherent architectures. Use of the pci mapping functions on
82 * buffers to begin support for 64 bit drivers.
84 * Version 2.4
86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87 * special 53c710 instructions or registers are used).
89 * Version 2.3
91 * More endianness/cache coherency changes.
93 * Better bad device handling (handles devices lying about tag
94 * queueing support and devices which fail to provide sense data on
95 * contingent allegiance conditions)
97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98 * debugging this driver on the parisc architecture and suggesting
99 * many improvements and bug fixes.
101 * Thanks also go to Linuxcare Inc. for providing several PARISC
102 * machines for me to debug the driver on.
104 * Version 2.2
106 * Made the driver mem or io mapped; added endian invariance; added
107 * dma cache flushing operations for architectures which need it;
108 * added support for more varied clocking speeds.
110 * Version 2.1
112 * Initial modularisation from the D700. See NCR_D700.c for the rest of
113 * the changelog.
114 * */
115 #define NCR_700_VERSION "2.8"
117 #include <linux/config.h>
118 #include <linux/kernel.h>
119 #include <linux/types.h>
120 #include <linux/string.h>
121 #include <linux/ioport.h>
122 #include <linux/delay.h>
123 #include <linux/spinlock.h>
124 #include <linux/completion.h>
125 #include <linux/sched.h>
126 #include <linux/init.h>
127 #include <linux/proc_fs.h>
128 #include <linux/blkdev.h>
129 #include <linux/module.h>
130 #include <linux/interrupt.h>
131 #include <asm/dma.h>
132 #include <asm/system.h>
133 #include <asm/io.h>
134 #include <asm/pgtable.h>
135 #include <asm/byteorder.h>
137 #include <scsi/scsi.h>
138 #include <scsi/scsi_cmnd.h>
139 #include <scsi/scsi_dbg.h>
140 #include <scsi/scsi_eh.h>
141 #include <scsi/scsi_host.h>
142 #include <scsi/scsi_tcq.h>
143 #include <scsi/scsi_transport.h>
144 #include <scsi/scsi_transport_spi.h>
146 #include "53c700.h"
148 /* NOTE: For 64 bit drivers there are points in the code where we use
149 * a non dereferenceable pointer to point to a structure in dma-able
150 * memory (which is 32 bits) so that we can use all of the structure
151 * operations but take the address at the end. This macro allows us
152 * to truncate the 64 bit pointer down to 32 bits without the compiler
153 * complaining */
154 #define to32bit(x) ((__u32)((unsigned long)(x)))
156 #ifdef NCR_700_DEBUG
157 #define STATIC
158 #else
159 #define STATIC static
160 #endif
162 MODULE_AUTHOR("James Bottomley");
163 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
164 MODULE_LICENSE("GPL");
166 /* This is the script */
167 #include "53c700_d.h"
170 STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
171 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
172 STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
173 STATIC int NCR_700_dev_reset(struct scsi_cmnd * SCpnt);
174 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
175 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
176 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
177 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
178 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
180 STATIC struct device_attribute *NCR_700_dev_attrs[];
182 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
184 static char *NCR_700_phase[] = {
186 "after selection",
187 "before command phase",
188 "after command phase",
189 "after status phase",
190 "after data in phase",
191 "after data out phase",
192 "during data phase",
195 static char *NCR_700_condition[] = {
197 "NOT MSG_OUT",
198 "UNEXPECTED PHASE",
199 "NOT MSG_IN",
200 "UNEXPECTED MSG",
201 "MSG_IN",
202 "SDTR_MSG RECEIVED",
203 "REJECT_MSG RECEIVED",
204 "DISCONNECT_MSG RECEIVED",
205 "MSG_OUT",
206 "DATA_IN",
210 static char *NCR_700_fatal_messages[] = {
211 "unexpected message after reselection",
212 "still MSG_OUT after message injection",
213 "not MSG_IN after selection",
214 "Illegal message length received",
217 static char *NCR_700_SBCL_bits[] = {
218 "IO ",
219 "CD ",
220 "MSG ",
221 "ATN ",
222 "SEL ",
223 "BSY ",
224 "ACK ",
225 "REQ ",
228 static char *NCR_700_SBCL_to_phase[] = {
229 "DATA_OUT",
230 "DATA_IN",
231 "CMD_OUT",
232 "STATE",
233 "ILLEGAL PHASE",
234 "ILLEGAL PHASE",
235 "MSG OUT",
236 "MSG IN",
239 static __u8 NCR_700_SDTR_msg[] = {
240 0x01, /* Extended message */
241 0x03, /* Extended message Length */
242 0x01, /* SDTR Extended message */
243 NCR_700_MIN_PERIOD,
244 NCR_700_MAX_OFFSET
247 /* This translates the SDTR message offset and period to a value
248 * which can be loaded into the SXFER_REG.
250 * NOTE: According to SCSI-2, the true transfer period (in ns) is
251 * actually four times this period value */
252 static inline __u8
253 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
254 __u8 offset, __u8 period)
256 int XFERP;
258 __u8 min_xferp = (hostdata->chip710
259 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
260 __u8 max_offset = (hostdata->chip710
261 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
263 if(offset == 0)
264 return 0;
266 if(period < hostdata->min_period) {
267 printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_SDTR_msg[3]*4);
268 period = hostdata->min_period;
270 XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
271 if(offset > max_offset) {
272 printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
273 offset, max_offset);
274 offset = max_offset;
276 if(XFERP < min_xferp) {
277 printk(KERN_WARNING "53c700: XFERP %d is less than minium, setting to %d\n",
278 XFERP, min_xferp);
279 XFERP = min_xferp;
281 return (offset & 0x0f) | (XFERP & 0x07)<<4;
284 static inline __u8
285 NCR_700_get_SXFER(struct scsi_device *SDp)
287 struct NCR_700_Host_Parameters *hostdata =
288 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
290 return NCR_700_offset_period_to_sxfer(hostdata, spi_offset(SDp),
291 spi_period(SDp));
294 struct Scsi_Host *
295 NCR_700_detect(struct scsi_host_template *tpnt,
296 struct NCR_700_Host_Parameters *hostdata)
298 dma_addr_t pScript, pSlots;
299 __u8 *memory;
300 __u32 *script;
301 struct Scsi_Host *host;
302 static int banner = 0;
303 int j;
305 if(tpnt->sdev_attrs == NULL)
306 tpnt->sdev_attrs = NCR_700_dev_attrs;
308 memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
309 &pScript, GFP_KERNEL);
310 if(memory == NULL) {
311 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
312 return NULL;
315 script = (__u32 *)memory;
316 hostdata->msgin = memory + MSGIN_OFFSET;
317 hostdata->msgout = memory + MSGOUT_OFFSET;
318 hostdata->status = memory + STATUS_OFFSET;
319 /* all of these offsets are L1_CACHE_BYTES separated. It is fatal
320 * if this isn't sufficient separation to avoid dma flushing issues */
321 BUG_ON(!dma_is_consistent(pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
322 hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
324 pSlots = pScript + SLOTS_OFFSET;
326 /* Fill in the missing routines from the host template */
327 tpnt->queuecommand = NCR_700_queuecommand;
328 tpnt->eh_abort_handler = NCR_700_abort;
329 tpnt->eh_device_reset_handler = NCR_700_dev_reset;
330 tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
331 tpnt->eh_host_reset_handler = NCR_700_host_reset;
332 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
333 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
334 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
335 tpnt->use_clustering = ENABLE_CLUSTERING;
336 tpnt->slave_configure = NCR_700_slave_configure;
337 tpnt->slave_destroy = NCR_700_slave_destroy;
339 if(tpnt->name == NULL)
340 tpnt->name = "53c700";
341 if(tpnt->proc_name == NULL)
342 tpnt->proc_name = "53c700";
345 host = scsi_host_alloc(tpnt, 4);
346 if (!host)
347 return NULL;
348 memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
349 * NCR_700_COMMAND_SLOTS_PER_HOST);
350 for(j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
351 dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
352 - (unsigned long)&hostdata->slots[0].SG[0]);
353 hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
354 if(j == 0)
355 hostdata->free_list = &hostdata->slots[j];
356 else
357 hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
358 hostdata->slots[j].state = NCR_700_SLOT_FREE;
361 for(j = 0; j < sizeof(SCRIPT)/sizeof(SCRIPT[0]); j++) {
362 script[j] = bS_to_host(SCRIPT[j]);
365 /* adjust all labels to be bus physical */
366 for(j = 0; j < PATCHES; j++) {
367 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
369 /* now patch up fixed addresses. */
370 script_patch_32(script, MessageLocation,
371 pScript + MSGOUT_OFFSET);
372 script_patch_32(script, StatusAddress,
373 pScript + STATUS_OFFSET);
374 script_patch_32(script, ReceiveMsgAddress,
375 pScript + MSGIN_OFFSET);
377 hostdata->script = script;
378 hostdata->pScript = pScript;
379 dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
380 hostdata->state = NCR_700_HOST_FREE;
381 hostdata->cmd = NULL;
382 host->max_id = 7;
383 host->max_lun = NCR_700_MAX_LUNS;
384 BUG_ON(NCR_700_transport_template == NULL);
385 host->transportt = NCR_700_transport_template;
386 host->unique_id = hostdata->base;
387 host->base = hostdata->base;
388 hostdata->eh_complete = NULL;
389 host->hostdata[0] = (unsigned long)hostdata;
390 /* kick the chip */
391 NCR_700_writeb(0xff, host, CTEST9_REG);
392 if(hostdata->chip710)
393 hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
394 else
395 hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
396 hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
397 if(banner == 0) {
398 printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
399 banner = 1;
401 printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
402 hostdata->chip710 ? "53c710" :
403 (hostdata->fast ? "53c700-66" : "53c700"),
404 hostdata->rev, hostdata->differential ?
405 "(Differential)" : "");
406 /* reset the chip */
407 NCR_700_chip_reset(host);
409 return host;
413 NCR_700_release(struct Scsi_Host *host)
415 struct NCR_700_Host_Parameters *hostdata =
416 (struct NCR_700_Host_Parameters *)host->hostdata[0];
418 dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
419 hostdata->script, hostdata->pScript);
420 return 1;
423 static inline __u8
424 NCR_700_identify(int can_disconnect, __u8 lun)
426 return IDENTIFY_BASE |
427 ((can_disconnect) ? 0x40 : 0) |
428 (lun & NCR_700_LUN_MASK);
432 * Function : static int data_residual (Scsi_Host *host)
434 * Purpose : return residual data count of what's in the chip. If you
435 * really want to know what this function is doing, it's almost a
436 * direct transcription of the algorithm described in the 53c710
437 * guide, except that the DBC and DFIFO registers are only 6 bits
438 * wide on a 53c700.
440 * Inputs : host - SCSI host */
441 static inline int
442 NCR_700_data_residual (struct Scsi_Host *host) {
443 struct NCR_700_Host_Parameters *hostdata =
444 (struct NCR_700_Host_Parameters *)host->hostdata[0];
445 int count, synchronous = 0;
446 unsigned int ddir;
448 if(hostdata->chip710) {
449 count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
450 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
451 } else {
452 count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
453 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
456 if(hostdata->fast)
457 synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
459 /* get the data direction */
460 ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
462 if (ddir) {
463 /* Receive */
464 if (synchronous)
465 count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
466 else
467 if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
468 ++count;
469 } else {
470 /* Send */
471 __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
472 if (sstat & SODL_REG_FULL)
473 ++count;
474 if (synchronous && (sstat & SODR_REG_FULL))
475 ++count;
477 #ifdef NCR_700_DEBUG
478 if(count)
479 printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
480 #endif
481 return count;
484 /* print out the SCSI wires and corresponding phase from the SBCL register
485 * in the chip */
486 static inline char *
487 sbcl_to_string(__u8 sbcl)
489 int i;
490 static char ret[256];
492 ret[0]='\0';
493 for(i=0; i<8; i++) {
494 if((1<<i) & sbcl)
495 strcat(ret, NCR_700_SBCL_bits[i]);
497 strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
498 return ret;
501 static inline __u8
502 bitmap_to_number(__u8 bitmap)
504 __u8 i;
506 for(i=0; i<8 && !(bitmap &(1<<i)); i++)
508 return i;
511 /* Pull a slot off the free list */
512 STATIC struct NCR_700_command_slot *
513 find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
515 struct NCR_700_command_slot *slot = hostdata->free_list;
517 if(slot == NULL) {
518 /* sanity check */
519 if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
520 printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
521 return NULL;
524 if(slot->state != NCR_700_SLOT_FREE)
525 /* should panic! */
526 printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
529 hostdata->free_list = slot->ITL_forw;
530 slot->ITL_forw = NULL;
533 /* NOTE: set the state to busy here, not queued, since this
534 * indicates the slot is in use and cannot be run by the IRQ
535 * finish routine. If we cannot queue the command when it
536 * is properly build, we then change to NCR_700_SLOT_QUEUED */
537 slot->state = NCR_700_SLOT_BUSY;
538 hostdata->command_slot_count++;
540 return slot;
543 STATIC void
544 free_slot(struct NCR_700_command_slot *slot,
545 struct NCR_700_Host_Parameters *hostdata)
547 if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
548 printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
550 if(slot->state == NCR_700_SLOT_FREE) {
551 printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
554 slot->resume_offset = 0;
555 slot->cmnd = NULL;
556 slot->state = NCR_700_SLOT_FREE;
557 slot->ITL_forw = hostdata->free_list;
558 hostdata->free_list = slot;
559 hostdata->command_slot_count--;
563 /* This routine really does very little. The command is indexed on
564 the ITL and (if tagged) the ITLQ lists in _queuecommand */
565 STATIC void
566 save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
567 struct scsi_cmnd *SCp, __u32 dsp)
569 /* Its just possible that this gets executed twice */
570 if(SCp != NULL) {
571 struct NCR_700_command_slot *slot =
572 (struct NCR_700_command_slot *)SCp->host_scribble;
574 slot->resume_offset = dsp;
576 hostdata->state = NCR_700_HOST_FREE;
577 hostdata->cmd = NULL;
580 STATIC inline void
581 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
582 struct NCR_700_command_slot *slot)
584 if(SCp->sc_data_direction != DMA_NONE &&
585 SCp->sc_data_direction != DMA_BIDIRECTIONAL) {
586 if(SCp->use_sg) {
587 dma_unmap_sg(hostdata->dev, SCp->buffer,
588 SCp->use_sg, SCp->sc_data_direction);
589 } else {
590 dma_unmap_single(hostdata->dev, slot->dma_handle,
591 SCp->request_bufflen,
592 SCp->sc_data_direction);
597 STATIC inline void
598 NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
599 struct scsi_cmnd *SCp, int result)
601 hostdata->state = NCR_700_HOST_FREE;
602 hostdata->cmd = NULL;
604 if(SCp != NULL) {
605 struct NCR_700_command_slot *slot =
606 (struct NCR_700_command_slot *)SCp->host_scribble;
608 NCR_700_unmap(hostdata, SCp, slot);
609 dma_unmap_single(hostdata->dev, slot->pCmd,
610 sizeof(SCp->cmnd), DMA_TO_DEVICE);
611 if(SCp->cmnd[0] == REQUEST_SENSE && SCp->cmnd[6] == NCR_700_INTERNAL_SENSE_MAGIC) {
612 #ifdef NCR_700_DEBUG
613 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
614 SCp, SCp->cmnd[7], result);
615 scsi_print_sense("53c700", SCp);
617 #endif
618 /* restore the old result if the request sense was
619 * successful */
620 if(result == 0)
621 result = SCp->cmnd[7];
622 /* now restore the original command */
623 memcpy((void *) SCp->cmnd, (void *) SCp->data_cmnd,
624 sizeof(SCp->data_cmnd));
625 SCp->request_buffer = SCp->buffer;
626 SCp->request_bufflen = SCp->bufflen;
627 SCp->use_sg = SCp->old_use_sg;
628 SCp->cmd_len = SCp->old_cmd_len;
629 SCp->sc_data_direction = SCp->sc_old_data_direction;
630 SCp->underflow = SCp->old_underflow;
633 free_slot(slot, hostdata);
634 #ifdef NCR_700_DEBUG
635 if(NCR_700_get_depth(SCp->device) == 0 ||
636 NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
637 printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
638 NCR_700_get_depth(SCp->device));
639 #endif /* NCR_700_DEBUG */
640 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
642 SCp->host_scribble = NULL;
643 SCp->result = result;
644 SCp->scsi_done(SCp);
645 } else {
646 printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
651 STATIC void
652 NCR_700_internal_bus_reset(struct Scsi_Host *host)
654 /* Bus reset */
655 NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
656 udelay(50);
657 NCR_700_writeb(0, host, SCNTL1_REG);
661 STATIC void
662 NCR_700_chip_setup(struct Scsi_Host *host)
664 struct NCR_700_Host_Parameters *hostdata =
665 (struct NCR_700_Host_Parameters *)host->hostdata[0];
666 __u32 dcntl_extra = 0;
667 __u8 min_period;
668 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
670 if(hostdata->chip710) {
671 __u8 burst_disable = hostdata->burst_disable
672 ? BURST_DISABLE : 0;
673 dcntl_extra = COMPAT_700_MODE;
675 NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
676 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
677 host, DMODE_710_REG);
678 NCR_700_writeb(burst_disable | (hostdata->differential ?
679 DIFF : 0), host, CTEST7_REG);
680 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
681 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
682 | AUTO_ATN, host, SCNTL0_REG);
683 } else {
684 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
685 host, DMODE_700_REG);
686 NCR_700_writeb(hostdata->differential ?
687 DIFF : 0, host, CTEST7_REG);
688 if(hostdata->fast) {
689 /* this is for 700-66, does nothing on 700 */
690 NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
691 | GENERATE_RECEIVE_PARITY, host,
692 CTEST8_REG);
693 } else {
694 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
695 | PARITY | AUTO_ATN, host, SCNTL0_REG);
699 NCR_700_writeb(1 << host->this_id, host, SCID_REG);
700 NCR_700_writeb(0, host, SBCL_REG);
701 NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
703 NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
704 | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
706 NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
707 NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
708 if(hostdata->clock > 75) {
709 printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
710 /* do the best we can, but the async clock will be out
711 * of spec: sync divider 2, async divider 3 */
712 DEBUG(("53c700: sync 2 async 3\n"));
713 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
714 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
715 hostdata->sync_clock = hostdata->clock/2;
716 } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
717 /* sync divider 1.5, async divider 3 */
718 DEBUG(("53c700: sync 1.5 async 3\n"));
719 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
720 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
721 hostdata->sync_clock = hostdata->clock*2;
722 hostdata->sync_clock /= 3;
724 } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
725 /* sync divider 1, async divider 2 */
726 DEBUG(("53c700: sync 1 async 2\n"));
727 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
728 NCR_700_writeb(ASYNC_DIV_2_0 | dcntl_extra, host, DCNTL_REG);
729 hostdata->sync_clock = hostdata->clock;
730 } else if(hostdata->clock > 25 && hostdata->clock <=37) {
731 /* sync divider 1, async divider 1.5 */
732 DEBUG(("53c700: sync 1 async 1.5\n"));
733 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
734 NCR_700_writeb(ASYNC_DIV_1_5 | dcntl_extra, host, DCNTL_REG);
735 hostdata->sync_clock = hostdata->clock;
736 } else {
737 DEBUG(("53c700: sync 1 async 1\n"));
738 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
739 NCR_700_writeb(ASYNC_DIV_1_0 | dcntl_extra, host, DCNTL_REG);
740 /* sync divider 1, async divider 1 */
741 hostdata->sync_clock = hostdata->clock;
743 /* Calculate the actual minimum period that can be supported
744 * by our synchronous clock speed. See the 710 manual for
745 * exact details of this calculation which is based on a
746 * setting of the SXFER register */
747 min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
748 hostdata->min_period = NCR_700_MIN_PERIOD;
749 if(min_period > NCR_700_MIN_PERIOD)
750 hostdata->min_period = min_period;
753 STATIC void
754 NCR_700_chip_reset(struct Scsi_Host *host)
756 struct NCR_700_Host_Parameters *hostdata =
757 (struct NCR_700_Host_Parameters *)host->hostdata[0];
758 if(hostdata->chip710) {
759 NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
760 udelay(100);
762 NCR_700_writeb(0, host, ISTAT_REG);
763 } else {
764 NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
765 udelay(100);
767 NCR_700_writeb(0, host, DCNTL_REG);
770 mdelay(1000);
772 NCR_700_chip_setup(host);
775 /* The heart of the message processing engine is that the instruction
776 * immediately after the INT is the normal case (and so must be CLEAR
777 * ACK). If we want to do something else, we call that routine in
778 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
779 * ACK) so that the routine returns correctly to resume its activity
780 * */
781 STATIC __u32
782 process_extended_message(struct Scsi_Host *host,
783 struct NCR_700_Host_Parameters *hostdata,
784 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
786 __u32 resume_offset = dsp, temp = dsp + 8;
787 __u8 pun = 0xff, lun = 0xff;
789 if(SCp != NULL) {
790 pun = SCp->device->id;
791 lun = SCp->device->lun;
794 switch(hostdata->msgin[2]) {
795 case A_SDTR_MSG:
796 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
797 __u8 period = hostdata->msgin[3];
798 __u8 offset = hostdata->msgin[4];
800 if(offset == 0 || period == 0) {
801 offset = 0;
802 period = 0;
805 if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
806 if(spi_offset(SCp->device) != 0)
807 printk(KERN_INFO "scsi%d: (%d:%d) Synchronous at offset %d, period %dns\n",
808 host->host_no, pun, lun,
809 offset, period*4);
810 else
811 printk(KERN_INFO "scsi%d: (%d:%d) Asynchronous\n",
812 host->host_no, pun, lun);
813 NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
816 spi_offset(SCp->device) = offset;
817 spi_period(SCp->device) = period;
820 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
821 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
823 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
824 host, SXFER_REG);
826 } else {
827 /* SDTR message out of the blue, reject it */
828 printk(KERN_WARNING "scsi%d Unexpected SDTR msg\n",
829 host->host_no);
830 hostdata->msgout[0] = A_REJECT_MSG;
831 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
832 script_patch_16(hostdata->script, MessageCount, 1);
833 /* SendMsgOut returns, so set up the return
834 * address */
835 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
837 break;
839 case A_WDTR_MSG:
840 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
841 host->host_no, pun, lun);
842 hostdata->msgout[0] = A_REJECT_MSG;
843 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
844 script_patch_16(hostdata->script, MessageCount, 1);
845 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
847 break;
849 default:
850 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
851 host->host_no, pun, lun,
852 NCR_700_phase[(dsps & 0xf00) >> 8]);
853 scsi_print_msg(hostdata->msgin);
854 printk("\n");
855 /* just reject it */
856 hostdata->msgout[0] = A_REJECT_MSG;
857 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
858 script_patch_16(hostdata->script, MessageCount, 1);
859 /* SendMsgOut returns, so set up the return
860 * address */
861 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
863 NCR_700_writel(temp, host, TEMP_REG);
864 return resume_offset;
867 STATIC __u32
868 process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
869 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
871 /* work out where to return to */
872 __u32 temp = dsp + 8, resume_offset = dsp;
873 __u8 pun = 0xff, lun = 0xff;
875 if(SCp != NULL) {
876 pun = SCp->device->id;
877 lun = SCp->device->lun;
880 #ifdef NCR_700_DEBUG
881 printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
882 NCR_700_phase[(dsps & 0xf00) >> 8]);
883 scsi_print_msg(hostdata->msgin);
884 printk("\n");
885 #endif
887 switch(hostdata->msgin[0]) {
889 case A_EXTENDED_MSG:
890 resume_offset = process_extended_message(host, hostdata, SCp,
891 dsp, dsps);
892 break;
894 case A_REJECT_MSG:
895 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
896 /* Rejected our sync negotiation attempt */
897 spi_period(SCp->device) = spi_offset(SCp->device) = 0;
898 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
899 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
900 } else if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING)) {
901 /* rejected our first simple tag message */
902 printk(KERN_WARNING "scsi%d (%d:%d) Rejected first tag queue attempt, turning off tag queueing\n", host->host_no, pun, lun);
903 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
904 hostdata->tag_negotiated &= ~(1<<SCp->device->id);
905 SCp->device->tagged_supported = 0;
906 scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
907 } else {
908 printk(KERN_WARNING "scsi%d (%d:%d) Unexpected REJECT Message %s\n",
909 host->host_no, pun, lun,
910 NCR_700_phase[(dsps & 0xf00) >> 8]);
911 /* however, just ignore it */
913 break;
915 case A_PARITY_ERROR_MSG:
916 printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
917 pun, lun);
918 NCR_700_internal_bus_reset(host);
919 break;
920 case A_SIMPLE_TAG_MSG:
921 printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
922 pun, lun, hostdata->msgin[1],
923 NCR_700_phase[(dsps & 0xf00) >> 8]);
924 /* just ignore it */
925 break;
926 default:
927 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
928 host->host_no, pun, lun,
929 NCR_700_phase[(dsps & 0xf00) >> 8]);
931 scsi_print_msg(hostdata->msgin);
932 printk("\n");
933 /* just reject it */
934 hostdata->msgout[0] = A_REJECT_MSG;
935 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
936 script_patch_16(hostdata->script, MessageCount, 1);
937 /* SendMsgOut returns, so set up the return
938 * address */
939 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
941 break;
943 NCR_700_writel(temp, host, TEMP_REG);
944 /* set us up to receive another message */
945 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
946 return resume_offset;
949 STATIC __u32
950 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
951 struct Scsi_Host *host,
952 struct NCR_700_Host_Parameters *hostdata)
954 __u32 resume_offset = 0;
955 __u8 pun = 0xff, lun=0xff;
957 if(SCp != NULL) {
958 pun = SCp->device->id;
959 lun = SCp->device->lun;
962 if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
963 DEBUG((" COMMAND COMPLETE, status=%02x\n",
964 hostdata->status[0]));
965 /* OK, if TCQ still on, we know it works */
966 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
967 /* check for contingent allegiance contitions */
968 if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
969 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
970 struct NCR_700_command_slot *slot =
971 (struct NCR_700_command_slot *)SCp->host_scribble;
972 if(SCp->cmnd[0] == REQUEST_SENSE) {
973 /* OOPS: bad device, returning another
974 * contingent allegiance condition */
975 printk(KERN_ERR "scsi%d (%d:%d) broken device is looping in contingent allegiance: ignoring\n", host->host_no, pun, lun);
976 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
977 } else {
978 #ifdef NCR_DEBUG
979 scsi_print_command(SCp);
980 printk(" cmd %p has status %d, requesting sense\n",
981 SCp, hostdata->status[0]);
982 #endif
983 /* we can destroy the command here
984 * because the contingent allegiance
985 * condition will cause a retry which
986 * will re-copy the command from the
987 * saved data_cmnd. We also unmap any
988 * data associated with the command
989 * here */
990 NCR_700_unmap(hostdata, SCp, slot);
992 SCp->cmnd[0] = REQUEST_SENSE;
993 SCp->cmnd[1] = (SCp->device->lun & 0x7) << 5;
994 SCp->cmnd[2] = 0;
995 SCp->cmnd[3] = 0;
996 SCp->cmnd[4] = sizeof(SCp->sense_buffer);
997 SCp->cmnd[5] = 0;
998 SCp->cmd_len = 6;
999 /* Here's a quiet hack: the
1000 * REQUEST_SENSE command is six bytes,
1001 * so store a flag indicating that
1002 * this was an internal sense request
1003 * and the original status at the end
1004 * of the command */
1005 SCp->cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1006 SCp->cmnd[7] = hostdata->status[0];
1007 SCp->use_sg = 0;
1008 SCp->sc_data_direction = DMA_FROM_DEVICE;
1009 dma_sync_single_for_device(hostdata->dev, slot->pCmd,
1010 SCp->cmd_len, DMA_TO_DEVICE);
1011 SCp->request_bufflen = sizeof(SCp->sense_buffer);
1012 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1013 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
1014 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1015 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1016 slot->SG[1].pAddr = 0;
1017 slot->resume_offset = hostdata->pScript;
1018 dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1019 dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1021 /* queue the command for reissue */
1022 slot->state = NCR_700_SLOT_QUEUED;
1023 hostdata->state = NCR_700_HOST_FREE;
1024 hostdata->cmd = NULL;
1026 } else {
1027 // Currently rely on the mid layer evaluation
1028 // of the tag queuing capability
1030 //if(status_byte(hostdata->status[0]) == GOOD &&
1031 // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1032 // /* Piggy back the tag queueing support
1033 // * on this command */
1034 // dma_sync_single_for_cpu(hostdata->dev,
1035 // slot->dma_handle,
1036 // SCp->request_bufflen,
1037 // DMA_FROM_DEVICE);
1038 // if(((char *)SCp->request_buffer)[7] & 0x02) {
1039 // printk(KERN_INFO "scsi%d: (%d:%d) Enabling Tag Command Queuing\n", host->host_no, pun, lun);
1040 // hostdata->tag_negotiated |= (1<<SCp->device->id);
1041 // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1042 // } else {
1043 // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1044 // hostdata->tag_negotiated &= ~(1<<SCp->device->id);
1045 // }
1047 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1049 } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1050 __u8 i = (dsps & 0xf00) >> 8;
1052 printk(KERN_ERR "scsi%d: (%d:%d), UNEXPECTED PHASE %s (%s)\n",
1053 host->host_no, pun, lun,
1054 NCR_700_phase[i],
1055 sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1056 printk(KERN_ERR " len = %d, cmd =", SCp->cmd_len);
1057 scsi_print_command(SCp);
1059 NCR_700_internal_bus_reset(host);
1060 } else if((dsps & 0xfffff000) == A_FATAL) {
1061 int i = (dsps & 0xfff);
1063 printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1064 host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1065 if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1066 printk(KERN_ERR " msg begins %02x %02x\n",
1067 hostdata->msgin[0], hostdata->msgin[1]);
1069 NCR_700_internal_bus_reset(host);
1070 } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1071 #ifdef NCR_700_DEBUG
1072 __u8 i = (dsps & 0xf00) >> 8;
1074 printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1075 host->host_no, pun, lun,
1076 i, NCR_700_phase[i]);
1077 #endif
1078 save_for_reselection(hostdata, SCp, dsp);
1080 } else if(dsps == A_RESELECTION_IDENTIFIED) {
1081 __u8 lun;
1082 struct NCR_700_command_slot *slot;
1083 __u8 reselection_id = hostdata->reselection_id;
1084 struct scsi_device *SDp;
1086 lun = hostdata->msgin[0] & 0x1f;
1088 hostdata->reselection_id = 0xff;
1089 DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1090 host->host_no, reselection_id, lun));
1091 /* clear the reselection indicator */
1092 SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1093 if(unlikely(SDp == NULL)) {
1094 printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1095 host->host_no, reselection_id, lun);
1096 BUG();
1098 if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1099 struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1100 if(unlikely(SCp == NULL)) {
1101 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1102 host->host_no, reselection_id, lun, hostdata->msgin[2]);
1103 BUG();
1106 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1107 DEBUG(("53c700: %d:%d:%d, reselection is tag %d, slot %p(%d)\n",
1108 host->host_no, SDp->id, SDp->lun,
1109 hostdata->msgin[2], slot, slot->tag));
1110 } else {
1111 struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1112 if(unlikely(SCp == NULL)) {
1113 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for untagged cmd\n",
1114 host->host_no, reselection_id, lun);
1115 BUG();
1117 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1120 if(slot == NULL) {
1121 printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1122 host->host_no, reselection_id, lun,
1123 hostdata->msgin[0], hostdata->msgin[1],
1124 hostdata->msgin[2]);
1125 } else {
1126 if(hostdata->state != NCR_700_HOST_BUSY)
1127 printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1128 host->host_no);
1129 resume_offset = slot->resume_offset;
1130 hostdata->cmd = slot->cmnd;
1132 /* re-patch for this command */
1133 script_patch_32_abs(hostdata->script, CommandAddress,
1134 slot->pCmd);
1135 script_patch_16(hostdata->script,
1136 CommandCount, slot->cmnd->cmd_len);
1137 script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1138 to32bit(&slot->pSG[0].ins));
1140 /* Note: setting SXFER only works if we're
1141 * still in the MESSAGE phase, so it is vital
1142 * that ACK is still asserted when we process
1143 * the reselection message. The resume offset
1144 * should therefore always clear ACK */
1145 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1146 host, SXFER_REG);
1147 dma_cache_sync(hostdata->msgin,
1148 MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1149 dma_cache_sync(hostdata->msgout,
1150 MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1151 /* I'm just being paranoid here, the command should
1152 * already have been flushed from the cache */
1153 dma_cache_sync(slot->cmnd->cmnd,
1154 slot->cmnd->cmd_len, DMA_TO_DEVICE);
1159 } else if(dsps == A_RESELECTED_DURING_SELECTION) {
1161 /* This section is full of debugging code because I've
1162 * never managed to reach it. I think what happens is
1163 * that, because the 700 runs with selection
1164 * interrupts enabled the whole time that we take a
1165 * selection interrupt before we manage to get to the
1166 * reselected script interrupt */
1168 __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1169 struct NCR_700_command_slot *slot;
1171 /* Take out our own ID */
1172 reselection_id &= ~(1<<host->this_id);
1174 /* I've never seen this happen, so keep this as a printk rather
1175 * than a debug */
1176 printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1177 host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1180 /* FIXME: DEBUGGING CODE */
1181 __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1182 int i;
1184 for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1185 if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1186 && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1187 break;
1189 printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1190 SCp = hostdata->slots[i].cmnd;
1193 if(SCp != NULL) {
1194 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1195 /* change slot from busy to queued to redo command */
1196 slot->state = NCR_700_SLOT_QUEUED;
1198 hostdata->cmd = NULL;
1200 if(reselection_id == 0) {
1201 if(hostdata->reselection_id == 0xff) {
1202 printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1203 return 0;
1204 } else {
1205 printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1206 host->host_no);
1207 reselection_id = hostdata->reselection_id;
1209 } else {
1211 /* convert to real ID */
1212 reselection_id = bitmap_to_number(reselection_id);
1214 hostdata->reselection_id = reselection_id;
1215 /* just in case we have a stale simple tag message, clear it */
1216 hostdata->msgin[1] = 0;
1217 dma_cache_sync(hostdata->msgin,
1218 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1219 if(hostdata->tag_negotiated & (1<<reselection_id)) {
1220 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1221 } else {
1222 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1224 } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1225 /* we've just disconnected from the bus, do nothing since
1226 * a return here will re-run the queued command slot
1227 * that may have been interrupted by the initial selection */
1228 DEBUG((" SELECTION COMPLETED\n"));
1229 } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1230 resume_offset = process_message(host, hostdata, SCp,
1231 dsp, dsps);
1232 } else if((dsps & 0xfffff000) == 0) {
1233 __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1234 printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1235 host->host_no, pun, lun, NCR_700_condition[i],
1236 NCR_700_phase[j], dsp - hostdata->pScript);
1237 if(SCp != NULL) {
1238 scsi_print_command(SCp);
1240 if(SCp->use_sg) {
1241 for(i = 0; i < SCp->use_sg + 1; i++) {
1242 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, ((struct scatterlist *)SCp->buffer)[i].length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1246 NCR_700_internal_bus_reset(host);
1247 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1248 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1249 host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1250 resume_offset = dsp;
1251 } else {
1252 printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1253 host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1254 NCR_700_internal_bus_reset(host);
1256 return resume_offset;
1259 /* We run the 53c700 with selection interrupts always enabled. This
1260 * means that the chip may be selected as soon as the bus frees. On a
1261 * busy bus, this can be before the scripts engine finishes its
1262 * processing. Therefore, part of the selection processing has to be
1263 * to find out what the scripts engine is doing and complete the
1264 * function if necessary (i.e. process the pending disconnect or save
1265 * the interrupted initial selection */
1266 STATIC inline __u32
1267 process_selection(struct Scsi_Host *host, __u32 dsp)
1269 __u8 id = 0; /* Squash compiler warning */
1270 int count = 0;
1271 __u32 resume_offset = 0;
1272 struct NCR_700_Host_Parameters *hostdata =
1273 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1274 struct scsi_cmnd *SCp = hostdata->cmd;
1275 __u8 sbcl;
1277 for(count = 0; count < 5; count++) {
1278 id = NCR_700_readb(host, hostdata->chip710 ?
1279 CTEST9_REG : SFBR_REG);
1281 /* Take out our own ID */
1282 id &= ~(1<<host->this_id);
1283 if(id != 0)
1284 break;
1285 udelay(5);
1287 sbcl = NCR_700_readb(host, SBCL_REG);
1288 if((sbcl & SBCL_IO) == 0) {
1289 /* mark as having been selected rather than reselected */
1290 id = 0xff;
1291 } else {
1292 /* convert to real ID */
1293 hostdata->reselection_id = id = bitmap_to_number(id);
1294 DEBUG(("scsi%d: Reselected by %d\n",
1295 host->host_no, id));
1297 if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1298 struct NCR_700_command_slot *slot =
1299 (struct NCR_700_command_slot *)SCp->host_scribble;
1300 DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1302 switch(dsp - hostdata->pScript) {
1303 case Ent_Disconnect1:
1304 case Ent_Disconnect2:
1305 save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1306 break;
1307 case Ent_Disconnect3:
1308 case Ent_Disconnect4:
1309 save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1310 break;
1311 case Ent_Disconnect5:
1312 case Ent_Disconnect6:
1313 save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1314 break;
1315 case Ent_Disconnect7:
1316 case Ent_Disconnect8:
1317 save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1318 break;
1319 case Ent_Finish1:
1320 case Ent_Finish2:
1321 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1322 break;
1324 default:
1325 slot->state = NCR_700_SLOT_QUEUED;
1326 break;
1329 hostdata->state = NCR_700_HOST_BUSY;
1330 hostdata->cmd = NULL;
1331 /* clear any stale simple tag message */
1332 hostdata->msgin[1] = 0;
1333 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1334 DMA_BIDIRECTIONAL);
1336 if(id == 0xff) {
1337 /* Selected as target, Ignore */
1338 resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1339 } else if(hostdata->tag_negotiated & (1<<id)) {
1340 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1341 } else {
1342 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1344 return resume_offset;
1347 static inline void
1348 NCR_700_clear_fifo(struct Scsi_Host *host) {
1349 const struct NCR_700_Host_Parameters *hostdata
1350 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1351 if(hostdata->chip710) {
1352 NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1353 } else {
1354 NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1358 static inline void
1359 NCR_700_flush_fifo(struct Scsi_Host *host) {
1360 const struct NCR_700_Host_Parameters *hostdata
1361 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1362 if(hostdata->chip710) {
1363 NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1364 udelay(10);
1365 NCR_700_writeb(0, host, CTEST8_REG);
1366 } else {
1367 NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1368 udelay(10);
1369 NCR_700_writeb(0, host, DFIFO_REG);
1374 /* The queue lock with interrupts disabled must be held on entry to
1375 * this function */
1376 STATIC int
1377 NCR_700_start_command(struct scsi_cmnd *SCp)
1379 struct NCR_700_command_slot *slot =
1380 (struct NCR_700_command_slot *)SCp->host_scribble;
1381 struct NCR_700_Host_Parameters *hostdata =
1382 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1383 __u16 count = 1; /* for IDENTIFY message */
1385 if(hostdata->state != NCR_700_HOST_FREE) {
1386 /* keep this inside the lock to close the race window where
1387 * the running command finishes on another CPU while we don't
1388 * change the state to queued on this one */
1389 slot->state = NCR_700_SLOT_QUEUED;
1391 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1392 SCp->device->host->host_no, slot->cmnd, slot));
1393 return 0;
1395 hostdata->state = NCR_700_HOST_BUSY;
1396 hostdata->cmd = SCp;
1397 slot->state = NCR_700_SLOT_BUSY;
1398 /* keep interrupts disabled until we have the command correctly
1399 * set up so we cannot take a selection interrupt */
1401 hostdata->msgout[0] = NCR_700_identify(SCp->cmnd[0] != REQUEST_SENSE,
1402 SCp->device->lun);
1403 /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1404 * if the negotiated transfer parameters still hold, so
1405 * always renegotiate them */
1406 if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE) {
1407 NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1410 /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1411 * If a contingent allegiance condition exists, the device
1412 * will refuse all tags, so send the request sense as untagged
1413 * */
1414 if((hostdata->tag_negotiated & (1<<SCp->device->id))
1415 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE)) {
1416 count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1419 if(hostdata->fast &&
1420 NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1421 memcpy(&hostdata->msgout[count], NCR_700_SDTR_msg,
1422 sizeof(NCR_700_SDTR_msg));
1423 hostdata->msgout[count+3] = spi_period(SCp->device);
1424 hostdata->msgout[count+4] = spi_offset(SCp->device);
1425 count += sizeof(NCR_700_SDTR_msg);
1426 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1429 script_patch_16(hostdata->script, MessageCount, count);
1432 script_patch_ID(hostdata->script,
1433 Device_ID, 1<<SCp->device->id);
1435 script_patch_32_abs(hostdata->script, CommandAddress,
1436 slot->pCmd);
1437 script_patch_16(hostdata->script, CommandCount, SCp->cmd_len);
1438 /* finally plumb the beginning of the SG list into the script
1439 * */
1440 script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1441 to32bit(&slot->pSG[0].ins));
1442 NCR_700_clear_fifo(SCp->device->host);
1444 if(slot->resume_offset == 0)
1445 slot->resume_offset = hostdata->pScript;
1446 /* now perform all the writebacks and invalidates */
1447 dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE);
1448 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1449 DMA_FROM_DEVICE);
1450 dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1451 dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE);
1453 /* set the synchronous period/offset */
1454 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1455 SCp->device->host, SXFER_REG);
1456 NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1457 NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1459 return 1;
1462 irqreturn_t
1463 NCR_700_intr(int irq, void *dev_id, struct pt_regs *regs)
1465 struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1466 struct NCR_700_Host_Parameters *hostdata =
1467 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1468 __u8 istat;
1469 __u32 resume_offset = 0;
1470 __u8 pun = 0xff, lun = 0xff;
1471 unsigned long flags;
1472 int handled = 0;
1474 /* Use the host lock to serialise acess to the 53c700
1475 * hardware. Note: In future, we may need to take the queue
1476 * lock to enter the done routines. When that happens, we
1477 * need to ensure that for this driver, the host lock and the
1478 * queue lock point to the same thing. */
1479 spin_lock_irqsave(host->host_lock, flags);
1480 if((istat = NCR_700_readb(host, ISTAT_REG))
1481 & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1482 __u32 dsps;
1483 __u8 sstat0 = 0, dstat = 0;
1484 __u32 dsp;
1485 struct scsi_cmnd *SCp = hostdata->cmd;
1486 enum NCR_700_Host_State state;
1488 handled = 1;
1489 state = hostdata->state;
1490 SCp = hostdata->cmd;
1492 if(istat & SCSI_INT_PENDING) {
1493 udelay(10);
1495 sstat0 = NCR_700_readb(host, SSTAT0_REG);
1498 if(istat & DMA_INT_PENDING) {
1499 udelay(10);
1501 dstat = NCR_700_readb(host, DSTAT_REG);
1504 dsps = NCR_700_readl(host, DSPS_REG);
1505 dsp = NCR_700_readl(host, DSP_REG);
1507 DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1508 host->host_no, istat, sstat0, dstat,
1509 (dsp - (__u32)(hostdata->pScript))/4,
1510 dsp, dsps));
1512 if(SCp != NULL) {
1513 pun = SCp->device->id;
1514 lun = SCp->device->lun;
1517 if(sstat0 & SCSI_RESET_DETECTED) {
1518 struct scsi_device *SDp;
1519 int i;
1521 hostdata->state = NCR_700_HOST_BUSY;
1523 printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1524 host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1526 scsi_report_bus_reset(host, 0);
1528 /* clear all the negotiated parameters */
1529 __shost_for_each_device(SDp, host)
1530 SDp->hostdata = NULL;
1532 /* clear all the slots and their pending commands */
1533 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1534 struct scsi_cmnd *SCp;
1535 struct NCR_700_command_slot *slot =
1536 &hostdata->slots[i];
1538 if(slot->state == NCR_700_SLOT_FREE)
1539 continue;
1541 SCp = slot->cmnd;
1542 printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1543 slot, SCp);
1544 free_slot(slot, hostdata);
1545 SCp->host_scribble = NULL;
1546 NCR_700_set_depth(SCp->device, 0);
1547 /* NOTE: deadlock potential here: we
1548 * rely on mid-layer guarantees that
1549 * scsi_done won't try to issue the
1550 * command again otherwise we'll
1551 * deadlock on the
1552 * hostdata->state_lock */
1553 SCp->result = DID_RESET << 16;
1554 SCp->scsi_done(SCp);
1556 mdelay(25);
1557 NCR_700_chip_setup(host);
1559 hostdata->state = NCR_700_HOST_FREE;
1560 hostdata->cmd = NULL;
1561 /* signal back if this was an eh induced reset */
1562 if(hostdata->eh_complete != NULL)
1563 complete(hostdata->eh_complete);
1564 goto out_unlock;
1565 } else if(sstat0 & SELECTION_TIMEOUT) {
1566 DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1567 host->host_no, pun, lun));
1568 NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1569 } else if(sstat0 & PHASE_MISMATCH) {
1570 struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1571 (struct NCR_700_command_slot *)SCp->host_scribble;
1573 if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1574 /* It wants to reply to some part of
1575 * our message */
1576 #ifdef NCR_700_DEBUG
1577 __u32 temp = NCR_700_readl(host, TEMP_REG);
1578 int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1579 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1580 #endif
1581 resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1582 } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1583 dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1584 int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1585 int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1586 int residual = NCR_700_data_residual(host);
1587 int i;
1588 #ifdef NCR_700_DEBUG
1589 __u32 naddr = NCR_700_readl(host, DNAD_REG);
1591 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1592 host->host_no, pun, lun,
1593 SGcount, data_transfer);
1594 scsi_print_command(SCp);
1595 if(residual) {
1596 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1597 host->host_no, pun, lun,
1598 SGcount, data_transfer, residual);
1600 #endif
1601 data_transfer += residual;
1603 if(data_transfer != 0) {
1604 int count;
1605 __u32 pAddr;
1607 SGcount--;
1609 count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1610 DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1611 slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1612 slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1613 pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1614 pAddr += (count - data_transfer);
1615 #ifdef NCR_700_DEBUG
1616 if(pAddr != naddr) {
1617 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1619 #endif
1620 slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1622 /* set the executed moves to nops */
1623 for(i=0; i<SGcount; i++) {
1624 slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1625 slot->SG[i].pAddr = 0;
1627 dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1628 /* and pretend we disconnected after
1629 * the command phase */
1630 resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1631 /* make sure all the data is flushed */
1632 NCR_700_flush_fifo(host);
1633 } else {
1634 __u8 sbcl = NCR_700_readb(host, SBCL_REG);
1635 printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1636 host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1637 NCR_700_internal_bus_reset(host);
1640 } else if(sstat0 & SCSI_GROSS_ERROR) {
1641 printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1642 host->host_no, pun, lun);
1643 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1644 } else if(sstat0 & PARITY_ERROR) {
1645 printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1646 host->host_no, pun, lun);
1647 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1648 } else if(dstat & SCRIPT_INT_RECEIVED) {
1649 DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1650 host->host_no, pun, lun));
1651 resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1652 } else if(dstat & (ILGL_INST_DETECTED)) {
1653 printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1654 " Please email James.Bottomley@HansenPartnership.com with the details\n",
1655 host->host_no, pun, lun,
1656 dsp, dsp - hostdata->pScript);
1657 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1658 } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1659 printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1660 host->host_no, pun, lun, dstat);
1661 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1665 /* NOTE: selection interrupt processing MUST occur
1666 * after script interrupt processing to correctly cope
1667 * with the case where we process a disconnect and
1668 * then get reselected before we process the
1669 * disconnection */
1670 if(sstat0 & SELECTED) {
1671 /* FIXME: It currently takes at least FOUR
1672 * interrupts to complete a command that
1673 * disconnects: one for the disconnect, one
1674 * for the reselection, one to get the
1675 * reselection data and one to complete the
1676 * command. If we guess the reselected
1677 * command here and prepare it, we only need
1678 * to get a reselection data interrupt if we
1679 * guessed wrongly. Since the interrupt
1680 * overhead is much greater than the command
1681 * setup, this would be an efficient
1682 * optimisation particularly as we probably
1683 * only have one outstanding command on a
1684 * target most of the time */
1686 resume_offset = process_selection(host, dsp);
1692 if(resume_offset) {
1693 if(hostdata->state != NCR_700_HOST_BUSY) {
1694 printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1695 host->host_no, resume_offset, resume_offset - hostdata->pScript);
1696 hostdata->state = NCR_700_HOST_BUSY;
1699 DEBUG(("Attempting to resume at %x\n", resume_offset));
1700 NCR_700_clear_fifo(host);
1701 NCR_700_writel(resume_offset, host, DSP_REG);
1703 /* There is probably a technical no-no about this: If we're a
1704 * shared interrupt and we got this interrupt because the
1705 * other device needs servicing not us, we're still going to
1706 * check our queued commands here---of course, there shouldn't
1707 * be any outstanding.... */
1708 if(hostdata->state == NCR_700_HOST_FREE) {
1709 int i;
1711 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1712 /* fairness: always run the queue from the last
1713 * position we left off */
1714 int j = (i + hostdata->saved_slot_position)
1715 % NCR_700_COMMAND_SLOTS_PER_HOST;
1717 if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1718 continue;
1719 if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1720 DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1721 host->host_no, &hostdata->slots[j],
1722 hostdata->slots[j].cmnd));
1723 hostdata->saved_slot_position = j + 1;
1726 break;
1729 out_unlock:
1730 spin_unlock_irqrestore(host->host_lock, flags);
1731 return IRQ_RETVAL(handled);
1734 STATIC int
1735 NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1737 struct NCR_700_Host_Parameters *hostdata =
1738 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1739 __u32 move_ins;
1740 enum dma_data_direction direction;
1741 struct NCR_700_command_slot *slot;
1743 if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1744 /* We're over our allocation, this should never happen
1745 * since we report the max allocation to the mid layer */
1746 printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1747 return 1;
1749 /* check for untagged commands. We cannot have any outstanding
1750 * commands if we accept them. Commands could be untagged because:
1752 * - The tag negotiated bitmap is clear
1753 * - The blk layer sent and untagged command
1755 if(NCR_700_get_depth(SCp->device) != 0
1756 && (!(hostdata->tag_negotiated & (1<<SCp->device->id))
1757 || !blk_rq_tagged(SCp->request))) {
1758 DEBUG((KERN_ERR "scsi%d (%d:%d) has non zero depth %d\n",
1759 SCp->device->host->host_no, SCp->device->id, SCp->device->lun,
1760 NCR_700_get_depth(SCp->device)));
1761 return SCSI_MLQUEUE_DEVICE_BUSY;
1763 if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1764 DEBUG((KERN_ERR "scsi%d (%d:%d) has max tag depth %d\n",
1765 SCp->device->host->host_no, SCp->device->id, SCp->device->lun,
1766 NCR_700_get_depth(SCp->device)));
1767 return SCSI_MLQUEUE_DEVICE_BUSY;
1769 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1771 /* begin the command here */
1772 /* no need to check for NULL, test for command_slot_count above
1773 * ensures a slot is free */
1774 slot = find_empty_slot(hostdata);
1776 slot->cmnd = SCp;
1778 SCp->scsi_done = done;
1779 SCp->host_scribble = (unsigned char *)slot;
1780 SCp->SCp.ptr = NULL;
1781 SCp->SCp.buffer = NULL;
1783 #ifdef NCR_700_DEBUG
1784 printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1785 scsi_print_command(SCp);
1786 #endif
1787 if(SCp->device->tagged_supported && !SCp->device->simple_tags
1788 && (hostdata->tag_negotiated &(1<<SCp->device->id)) == 0
1789 && NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING)) {
1790 /* upper layer has indicated tags are supported. We don't
1791 * necessarily believe it yet.
1793 * NOTE: There is a danger here: the mid layer supports
1794 * tag queuing per LUN. We only support it per PUN because
1795 * of potential reselection issues */
1796 scsi_activate_tcq(SCp->device, NCR_700_DEFAULT_TAGS);
1799 if(blk_rq_tagged(SCp->request)
1800 && (hostdata->tag_negotiated &(1<<SCp->device->id)) == 0) {
1801 printk(KERN_INFO "scsi%d: (%d:%d) Enabling Tag Command Queuing\n", SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1802 hostdata->tag_negotiated |= (1<<SCp->device->id);
1803 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1806 /* here we may have to process an untagged command. The gate
1807 * above ensures that this will be the only one outstanding,
1808 * so clear the tag negotiated bit.
1810 * FIXME: This will royally screw up on multiple LUN devices
1811 * */
1812 if(!blk_rq_tagged(SCp->request)
1813 && (hostdata->tag_negotiated &(1<<SCp->device->id))) {
1814 printk(KERN_INFO "scsi%d: (%d:%d) Disabling Tag Command Queuing\n", SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1815 hostdata->tag_negotiated &= ~(1<<SCp->device->id);
1818 if((hostdata->tag_negotiated &(1<<SCp->device->id))) {
1819 slot->tag = SCp->request->tag;
1820 DEBUG(("53c700 %d:%d:%d, sending out tag %d, slot %p\n",
1821 SCp->device->host->host_no, SCp->device->id, SCp->device->lun, slot->tag,
1822 slot));
1823 } else {
1824 slot->tag = SCSI_NO_TAG;
1825 /* must populate current_cmnd for scsi_find_tag to work */
1826 SCp->device->current_cmnd = SCp;
1828 /* sanity check: some of the commands generated by the mid-layer
1829 * have an eccentric idea of their sc_data_direction */
1830 if(!SCp->use_sg && !SCp->request_bufflen
1831 && SCp->sc_data_direction != DMA_NONE) {
1832 #ifdef NCR_700_DEBUG
1833 printk("53c700: Command");
1834 scsi_print_command(SCp);
1835 printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1836 #endif
1837 SCp->sc_data_direction = DMA_NONE;
1840 switch (SCp->cmnd[0]) {
1841 case REQUEST_SENSE:
1842 /* clear the internal sense magic */
1843 SCp->cmnd[6] = 0;
1844 /* fall through */
1845 default:
1846 /* OK, get it from the command */
1847 switch(SCp->sc_data_direction) {
1848 case DMA_BIDIRECTIONAL:
1849 default:
1850 printk(KERN_ERR "53c700: Unknown command for data direction ");
1851 scsi_print_command(SCp);
1853 move_ins = 0;
1854 break;
1855 case DMA_NONE:
1856 move_ins = 0;
1857 break;
1858 case DMA_FROM_DEVICE:
1859 move_ins = SCRIPT_MOVE_DATA_IN;
1860 break;
1861 case DMA_TO_DEVICE:
1862 move_ins = SCRIPT_MOVE_DATA_OUT;
1863 break;
1867 /* now build the scatter gather list */
1868 direction = SCp->sc_data_direction;
1869 if(move_ins != 0) {
1870 int i;
1871 int sg_count;
1872 dma_addr_t vPtr = 0;
1873 __u32 count = 0;
1875 if(SCp->use_sg) {
1876 sg_count = dma_map_sg(hostdata->dev, SCp->buffer,
1877 SCp->use_sg, direction);
1878 } else {
1879 vPtr = dma_map_single(hostdata->dev,
1880 SCp->request_buffer,
1881 SCp->request_bufflen,
1882 direction);
1883 count = SCp->request_bufflen;
1884 slot->dma_handle = vPtr;
1885 sg_count = 1;
1889 for(i = 0; i < sg_count; i++) {
1891 if(SCp->use_sg) {
1892 struct scatterlist *sg = SCp->buffer;
1894 vPtr = sg_dma_address(&sg[i]);
1895 count = sg_dma_len(&sg[i]);
1898 slot->SG[i].ins = bS_to_host(move_ins | count);
1899 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1900 i, count, slot->SG[i].ins, (unsigned long)vPtr));
1901 slot->SG[i].pAddr = bS_to_host(vPtr);
1903 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1904 slot->SG[i].pAddr = 0;
1905 dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1906 DEBUG((" SETTING %08lx to %x\n",
1907 (&slot->pSG[i].ins),
1908 slot->SG[i].ins));
1910 slot->resume_offset = 0;
1911 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1912 sizeof(SCp->cmnd), DMA_TO_DEVICE);
1913 NCR_700_start_command(SCp);
1914 return 0;
1917 STATIC int
1918 NCR_700_abort(struct scsi_cmnd * SCp)
1920 struct NCR_700_command_slot *slot;
1922 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants to abort command\n\t",
1923 SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1924 scsi_print_command(SCp);
1926 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1928 if(slot == NULL)
1929 /* no outstanding command to abort */
1930 return SUCCESS;
1931 if(SCp->cmnd[0] == TEST_UNIT_READY) {
1932 /* FIXME: This is because of a problem in the new
1933 * error handler. When it is in error recovery, it
1934 * will send a TUR to a device it thinks may still be
1935 * showing a problem. If the TUR isn't responded to,
1936 * it will abort it and mark the device off line.
1937 * Unfortunately, it does no other error recovery, so
1938 * this would leave us with an outstanding command
1939 * occupying a slot. Rather than allow this to
1940 * happen, we issue a bus reset to force all
1941 * outstanding commands to terminate here. */
1942 NCR_700_internal_bus_reset(SCp->device->host);
1943 /* still drop through and return failed */
1945 return FAILED;
1949 STATIC int
1950 NCR_700_bus_reset(struct scsi_cmnd * SCp)
1952 DECLARE_COMPLETION(complete);
1953 struct NCR_700_Host_Parameters *hostdata =
1954 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1956 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants BUS reset, cmd %p\n\t",
1957 SCp->device->host->host_no, SCp->device->id, SCp->device->lun, SCp);
1958 scsi_print_command(SCp);
1959 /* In theory, eh_complete should always be null because the
1960 * eh is single threaded, but just in case we're handling a
1961 * reset via sg or something */
1962 while(hostdata->eh_complete != NULL) {
1963 spin_unlock_irq(SCp->device->host->host_lock);
1964 schedule_timeout(HZ/10);
1965 spin_lock_irq(SCp->device->host->host_lock);
1967 hostdata->eh_complete = &complete;
1968 NCR_700_internal_bus_reset(SCp->device->host);
1969 spin_unlock_irq(SCp->device->host->host_lock);
1970 wait_for_completion(&complete);
1971 spin_lock_irq(SCp->device->host->host_lock);
1972 hostdata->eh_complete = NULL;
1973 /* Revalidate the transport parameters of the failing device */
1974 if(hostdata->fast)
1975 spi_schedule_dv_device(SCp->device);
1976 return SUCCESS;
1979 STATIC int
1980 NCR_700_dev_reset(struct scsi_cmnd * SCp)
1982 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants device reset\n\t",
1983 SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1984 scsi_print_command(SCp);
1986 return FAILED;
1989 STATIC int
1990 NCR_700_host_reset(struct scsi_cmnd * SCp)
1992 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants HOST reset\n\t",
1993 SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1994 scsi_print_command(SCp);
1996 NCR_700_internal_bus_reset(SCp->device->host);
1997 NCR_700_chip_reset(SCp->device->host);
1998 return SUCCESS;
2001 STATIC void
2002 NCR_700_set_period(struct scsi_device *SDp, int period)
2004 struct NCR_700_Host_Parameters *hostdata =
2005 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2007 if(!hostdata->fast)
2008 return;
2010 if(period < hostdata->min_period)
2011 period = hostdata->min_period;
2013 spi_period(SDp) = period;
2014 NCR_700_clear_flag(SDp, NCR_700_DEV_NEGOTIATED_SYNC);
2015 NCR_700_clear_flag(SDp, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2016 NCR_700_set_flag(SDp, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
2019 STATIC void
2020 NCR_700_set_offset(struct scsi_device *SDp, int offset)
2022 struct NCR_700_Host_Parameters *hostdata =
2023 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2024 int max_offset = hostdata->chip710
2025 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2027 if(!hostdata->fast)
2028 return;
2030 if(offset > max_offset)
2031 offset = max_offset;
2033 /* if we're currently async, make sure the period is reasonable */
2034 if(spi_offset(SDp) == 0 && (spi_period(SDp) < hostdata->min_period ||
2035 spi_period(SDp) > 0xff))
2036 spi_period(SDp) = hostdata->min_period;
2038 spi_offset(SDp) = offset;
2039 NCR_700_clear_flag(SDp, NCR_700_DEV_NEGOTIATED_SYNC);
2040 NCR_700_clear_flag(SDp, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2041 NCR_700_set_flag(SDp, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
2046 STATIC int
2047 NCR_700_slave_configure(struct scsi_device *SDp)
2049 struct NCR_700_Host_Parameters *hostdata =
2050 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2052 /* to do here: allocate memory; build a queue_full list */
2053 if(SDp->tagged_supported) {
2054 /* do TCQ stuff here */
2055 } else {
2056 /* initialise to default depth */
2057 scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2059 if(hostdata->fast) {
2060 /* Find the correct offset and period via domain validation */
2061 spi_dv_device(SDp);
2062 } else {
2063 spi_offset(SDp) = 0;
2064 spi_period(SDp) = 0;
2066 return 0;
2069 STATIC void
2070 NCR_700_slave_destroy(struct scsi_device *SDp)
2072 /* to do here: deallocate memory */
2075 static ssize_t
2076 NCR_700_store_queue_depth(struct device *dev, const char *buf, size_t count)
2078 int depth;
2080 struct scsi_device *SDp = to_scsi_device(dev);
2081 depth = simple_strtoul(buf, NULL, 0);
2082 if(depth > NCR_700_MAX_TAGS)
2083 return -EINVAL;
2084 scsi_adjust_queue_depth(SDp, MSG_ORDERED_TAG, depth);
2086 return count;
2089 static ssize_t
2090 NCR_700_show_active_tags(struct device *dev, char *buf)
2092 struct scsi_device *SDp = to_scsi_device(dev);
2094 return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2097 static struct device_attribute NCR_700_queue_depth_attr = {
2098 .attr = {
2099 .name = "queue_depth",
2100 .mode = S_IWUSR,
2102 .store = NCR_700_store_queue_depth,
2105 static struct device_attribute NCR_700_active_tags_attr = {
2106 .attr = {
2107 .name = "active_tags",
2108 .mode = S_IRUGO,
2110 .show = NCR_700_show_active_tags,
2113 STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2114 &NCR_700_queue_depth_attr,
2115 &NCR_700_active_tags_attr,
2116 NULL,
2119 EXPORT_SYMBOL(NCR_700_detect);
2120 EXPORT_SYMBOL(NCR_700_release);
2121 EXPORT_SYMBOL(NCR_700_intr);
2123 static struct spi_function_template NCR_700_transport_functions = {
2124 .set_period = NCR_700_set_period,
2125 .show_period = 1,
2126 .set_offset = NCR_700_set_offset,
2127 .show_offset = 1,
2130 static int __init NCR_700_init(void)
2132 NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2133 if(!NCR_700_transport_template)
2134 return -ENODEV;
2135 return 0;
2138 static void __exit NCR_700_exit(void)
2140 spi_release_transport(NCR_700_transport_template);
2143 module_init(NCR_700_init);
2144 module_exit(NCR_700_exit);