[TG3]: Identify Serdes devices more clearly.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / 53c700.c
blob335a25540c08687b5c171aa74ec7124c7465cace
1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6 **-----------------------------------------------------------------------------
7 **
8 ** This program is free software; you can redistribute it and/or modify
9 ** it under the terms of the GNU General Public License as published by
10 ** the Free Software Foundation; either version 2 of the License, or
11 ** (at your option) any later version.
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
18 ** You should have received a copy of the GNU General Public License
19 ** along with this program; if not, write to the Free Software
20 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 **-----------------------------------------------------------------------------
25 /* Notes:
27 * This driver is designed exclusively for these chips (virtually the
28 * earliest of the scripts engine chips). They need their own drivers
29 * because they are missing so many of the scripts and snazzy register
30 * features of their elder brothers (the 710, 720 and 770).
32 * The 700 is the lowliest of the line, it can only do async SCSI.
33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
35 * The 700 chip has no host bus interface logic of its own. However,
36 * it is usually mapped to a location with well defined register
37 * offsets. Therefore, if you can determine the base address and the
38 * irq your board incorporating this chip uses, you can probably use
39 * this driver to run it (although you'll probably have to write a
40 * minimal wrapper for the purpose---see the NCR_D700 driver for
41 * details about how to do this).
44 * TODO List:
46 * 1. Better statistics in the proc fs
48 * 2. Implement message queue (queues SCSI messages like commands) and make
49 * the abort and device reset functions use them.
50 * */
52 /* CHANGELOG
54 * Version 2.8
56 * Fixed bad bug affecting tag starvation processing (previously the
57 * driver would hang the system if too many tags starved. Also fixed
58 * bad bug having to do with 10 byte command processing and REQUEST
59 * SENSE (the command would loop forever getting a transfer length
60 * mismatch in the CMD phase).
62 * Version 2.7
64 * Fixed scripts problem which caused certain devices (notably CDRWs)
65 * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
66 * __raw_readl/writel for parisc compatibility (Thomas
67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68 * for sense requests (Ryan Bradetich).
70 * Version 2.6
72 * Following test of the 64 bit parisc kernel by Richard Hirst,
73 * several problems have now been corrected. Also adds support for
74 * consistent memory allocation.
76 * Version 2.5
78 * More Compatibility changes for 710 (now actually works). Enhanced
79 * support for odd clock speeds which constrain SDTR negotiations.
80 * correct cacheline separation for scsi messages and status for
81 * incoherent architectures. Use of the pci mapping functions on
82 * buffers to begin support for 64 bit drivers.
84 * Version 2.4
86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87 * special 53c710 instructions or registers are used).
89 * Version 2.3
91 * More endianness/cache coherency changes.
93 * Better bad device handling (handles devices lying about tag
94 * queueing support and devices which fail to provide sense data on
95 * contingent allegiance conditions)
97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98 * debugging this driver on the parisc architecture and suggesting
99 * many improvements and bug fixes.
101 * Thanks also go to Linuxcare Inc. for providing several PARISC
102 * machines for me to debug the driver on.
104 * Version 2.2
106 * Made the driver mem or io mapped; added endian invariance; added
107 * dma cache flushing operations for architectures which need it;
108 * added support for more varied clocking speeds.
110 * Version 2.1
112 * Initial modularisation from the D700. See NCR_D700.c for the rest of
113 * the changelog.
114 * */
115 #define NCR_700_VERSION "2.8"
117 #include <linux/kernel.h>
118 #include <linux/types.h>
119 #include <linux/string.h>
120 #include <linux/ioport.h>
121 #include <linux/delay.h>
122 #include <linux/spinlock.h>
123 #include <linux/completion.h>
124 #include <linux/sched.h>
125 #include <linux/init.h>
126 #include <linux/proc_fs.h>
127 #include <linux/blkdev.h>
128 #include <linux/module.h>
129 #include <linux/interrupt.h>
130 #include <linux/device.h>
131 #include <asm/dma.h>
132 #include <asm/system.h>
133 #include <asm/io.h>
134 #include <asm/pgtable.h>
135 #include <asm/byteorder.h>
137 #include <scsi/scsi.h>
138 #include <scsi/scsi_cmnd.h>
139 #include <scsi/scsi_dbg.h>
140 #include <scsi/scsi_eh.h>
141 #include <scsi/scsi_host.h>
142 #include <scsi/scsi_tcq.h>
143 #include <scsi/scsi_transport.h>
144 #include <scsi/scsi_transport_spi.h>
146 #include "53c700.h"
148 /* NOTE: For 64 bit drivers there are points in the code where we use
149 * a non dereferenceable pointer to point to a structure in dma-able
150 * memory (which is 32 bits) so that we can use all of the structure
151 * operations but take the address at the end. This macro allows us
152 * to truncate the 64 bit pointer down to 32 bits without the compiler
153 * complaining */
154 #define to32bit(x) ((__u32)((unsigned long)(x)))
156 #ifdef NCR_700_DEBUG
157 #define STATIC
158 #else
159 #define STATIC static
160 #endif
162 MODULE_AUTHOR("James Bottomley");
163 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
164 MODULE_LICENSE("GPL");
166 /* This is the script */
167 #include "53c700_d.h"
170 STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
171 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
172 STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
173 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
174 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
175 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
176 STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
177 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
178 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
179 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
180 static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
182 STATIC struct device_attribute *NCR_700_dev_attrs[];
184 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
186 static char *NCR_700_phase[] = {
188 "after selection",
189 "before command phase",
190 "after command phase",
191 "after status phase",
192 "after data in phase",
193 "after data out phase",
194 "during data phase",
197 static char *NCR_700_condition[] = {
199 "NOT MSG_OUT",
200 "UNEXPECTED PHASE",
201 "NOT MSG_IN",
202 "UNEXPECTED MSG",
203 "MSG_IN",
204 "SDTR_MSG RECEIVED",
205 "REJECT_MSG RECEIVED",
206 "DISCONNECT_MSG RECEIVED",
207 "MSG_OUT",
208 "DATA_IN",
212 static char *NCR_700_fatal_messages[] = {
213 "unexpected message after reselection",
214 "still MSG_OUT after message injection",
215 "not MSG_IN after selection",
216 "Illegal message length received",
219 static char *NCR_700_SBCL_bits[] = {
220 "IO ",
221 "CD ",
222 "MSG ",
223 "ATN ",
224 "SEL ",
225 "BSY ",
226 "ACK ",
227 "REQ ",
230 static char *NCR_700_SBCL_to_phase[] = {
231 "DATA_OUT",
232 "DATA_IN",
233 "CMD_OUT",
234 "STATE",
235 "ILLEGAL PHASE",
236 "ILLEGAL PHASE",
237 "MSG OUT",
238 "MSG IN",
241 /* This translates the SDTR message offset and period to a value
242 * which can be loaded into the SXFER_REG.
244 * NOTE: According to SCSI-2, the true transfer period (in ns) is
245 * actually four times this period value */
246 static inline __u8
247 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
248 __u8 offset, __u8 period)
250 int XFERP;
252 __u8 min_xferp = (hostdata->chip710
253 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
254 __u8 max_offset = (hostdata->chip710
255 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
257 if(offset == 0)
258 return 0;
260 if(period < hostdata->min_period) {
261 printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
262 period = hostdata->min_period;
264 XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
265 if(offset > max_offset) {
266 printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
267 offset, max_offset);
268 offset = max_offset;
270 if(XFERP < min_xferp) {
271 printk(KERN_WARNING "53c700: XFERP %d is less than minium, setting to %d\n",
272 XFERP, min_xferp);
273 XFERP = min_xferp;
275 return (offset & 0x0f) | (XFERP & 0x07)<<4;
278 static inline __u8
279 NCR_700_get_SXFER(struct scsi_device *SDp)
281 struct NCR_700_Host_Parameters *hostdata =
282 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
284 return NCR_700_offset_period_to_sxfer(hostdata,
285 spi_offset(SDp->sdev_target),
286 spi_period(SDp->sdev_target));
289 struct Scsi_Host *
290 NCR_700_detect(struct scsi_host_template *tpnt,
291 struct NCR_700_Host_Parameters *hostdata, struct device *dev)
293 dma_addr_t pScript, pSlots;
294 __u8 *memory;
295 __u32 *script;
296 struct Scsi_Host *host;
297 static int banner = 0;
298 int j;
300 if(tpnt->sdev_attrs == NULL)
301 tpnt->sdev_attrs = NCR_700_dev_attrs;
303 memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
304 &pScript, GFP_KERNEL);
305 if(memory == NULL) {
306 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
307 return NULL;
310 script = (__u32 *)memory;
311 hostdata->msgin = memory + MSGIN_OFFSET;
312 hostdata->msgout = memory + MSGOUT_OFFSET;
313 hostdata->status = memory + STATUS_OFFSET;
314 /* all of these offsets are L1_CACHE_BYTES separated. It is fatal
315 * if this isn't sufficient separation to avoid dma flushing issues */
316 BUG_ON(!dma_is_consistent(pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
317 hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
318 hostdata->dev = dev;
320 pSlots = pScript + SLOTS_OFFSET;
322 /* Fill in the missing routines from the host template */
323 tpnt->queuecommand = NCR_700_queuecommand;
324 tpnt->eh_abort_handler = NCR_700_abort;
325 tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
326 tpnt->eh_host_reset_handler = NCR_700_host_reset;
327 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
328 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
329 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
330 tpnt->use_clustering = ENABLE_CLUSTERING;
331 tpnt->slave_configure = NCR_700_slave_configure;
332 tpnt->slave_destroy = NCR_700_slave_destroy;
333 tpnt->slave_alloc = NCR_700_slave_alloc;
334 tpnt->change_queue_depth = NCR_700_change_queue_depth;
335 tpnt->change_queue_type = NCR_700_change_queue_type;
337 if(tpnt->name == NULL)
338 tpnt->name = "53c700";
339 if(tpnt->proc_name == NULL)
340 tpnt->proc_name = "53c700";
342 host = scsi_host_alloc(tpnt, 4);
343 if (!host)
344 return NULL;
345 memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
346 * NCR_700_COMMAND_SLOTS_PER_HOST);
347 for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
348 dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
349 - (unsigned long)&hostdata->slots[0].SG[0]);
350 hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
351 if(j == 0)
352 hostdata->free_list = &hostdata->slots[j];
353 else
354 hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
355 hostdata->slots[j].state = NCR_700_SLOT_FREE;
358 for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
359 script[j] = bS_to_host(SCRIPT[j]);
361 /* adjust all labels to be bus physical */
362 for (j = 0; j < PATCHES; j++)
363 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
364 /* now patch up fixed addresses. */
365 script_patch_32(script, MessageLocation,
366 pScript + MSGOUT_OFFSET);
367 script_patch_32(script, StatusAddress,
368 pScript + STATUS_OFFSET);
369 script_patch_32(script, ReceiveMsgAddress,
370 pScript + MSGIN_OFFSET);
372 hostdata->script = script;
373 hostdata->pScript = pScript;
374 dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
375 hostdata->state = NCR_700_HOST_FREE;
376 hostdata->cmd = NULL;
377 host->max_id = 8;
378 host->max_lun = NCR_700_MAX_LUNS;
379 BUG_ON(NCR_700_transport_template == NULL);
380 host->transportt = NCR_700_transport_template;
381 host->unique_id = (unsigned long)hostdata->base;
382 hostdata->eh_complete = NULL;
383 host->hostdata[0] = (unsigned long)hostdata;
384 /* kick the chip */
385 NCR_700_writeb(0xff, host, CTEST9_REG);
386 if (hostdata->chip710)
387 hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
388 else
389 hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
390 hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
391 if (banner == 0) {
392 printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
393 banner = 1;
395 printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
396 hostdata->chip710 ? "53c710" :
397 (hostdata->fast ? "53c700-66" : "53c700"),
398 hostdata->rev, hostdata->differential ?
399 "(Differential)" : "");
400 /* reset the chip */
401 NCR_700_chip_reset(host);
403 if (scsi_add_host(host, dev)) {
404 dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
405 scsi_host_put(host);
406 return NULL;
409 spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
410 SPI_SIGNAL_SE;
412 return host;
416 NCR_700_release(struct Scsi_Host *host)
418 struct NCR_700_Host_Parameters *hostdata =
419 (struct NCR_700_Host_Parameters *)host->hostdata[0];
421 dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
422 hostdata->script, hostdata->pScript);
423 return 1;
426 static inline __u8
427 NCR_700_identify(int can_disconnect, __u8 lun)
429 return IDENTIFY_BASE |
430 ((can_disconnect) ? 0x40 : 0) |
431 (lun & NCR_700_LUN_MASK);
435 * Function : static int data_residual (Scsi_Host *host)
437 * Purpose : return residual data count of what's in the chip. If you
438 * really want to know what this function is doing, it's almost a
439 * direct transcription of the algorithm described in the 53c710
440 * guide, except that the DBC and DFIFO registers are only 6 bits
441 * wide on a 53c700.
443 * Inputs : host - SCSI host */
444 static inline int
445 NCR_700_data_residual (struct Scsi_Host *host) {
446 struct NCR_700_Host_Parameters *hostdata =
447 (struct NCR_700_Host_Parameters *)host->hostdata[0];
448 int count, synchronous = 0;
449 unsigned int ddir;
451 if(hostdata->chip710) {
452 count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
453 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
454 } else {
455 count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
456 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
459 if(hostdata->fast)
460 synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
462 /* get the data direction */
463 ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
465 if (ddir) {
466 /* Receive */
467 if (synchronous)
468 count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
469 else
470 if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
471 ++count;
472 } else {
473 /* Send */
474 __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
475 if (sstat & SODL_REG_FULL)
476 ++count;
477 if (synchronous && (sstat & SODR_REG_FULL))
478 ++count;
480 #ifdef NCR_700_DEBUG
481 if(count)
482 printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
483 #endif
484 return count;
487 /* print out the SCSI wires and corresponding phase from the SBCL register
488 * in the chip */
489 static inline char *
490 sbcl_to_string(__u8 sbcl)
492 int i;
493 static char ret[256];
495 ret[0]='\0';
496 for(i=0; i<8; i++) {
497 if((1<<i) & sbcl)
498 strcat(ret, NCR_700_SBCL_bits[i]);
500 strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
501 return ret;
504 static inline __u8
505 bitmap_to_number(__u8 bitmap)
507 __u8 i;
509 for(i=0; i<8 && !(bitmap &(1<<i)); i++)
511 return i;
514 /* Pull a slot off the free list */
515 STATIC struct NCR_700_command_slot *
516 find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
518 struct NCR_700_command_slot *slot = hostdata->free_list;
520 if(slot == NULL) {
521 /* sanity check */
522 if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
523 printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
524 return NULL;
527 if(slot->state != NCR_700_SLOT_FREE)
528 /* should panic! */
529 printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
532 hostdata->free_list = slot->ITL_forw;
533 slot->ITL_forw = NULL;
536 /* NOTE: set the state to busy here, not queued, since this
537 * indicates the slot is in use and cannot be run by the IRQ
538 * finish routine. If we cannot queue the command when it
539 * is properly build, we then change to NCR_700_SLOT_QUEUED */
540 slot->state = NCR_700_SLOT_BUSY;
541 slot->flags = 0;
542 hostdata->command_slot_count++;
544 return slot;
547 STATIC void
548 free_slot(struct NCR_700_command_slot *slot,
549 struct NCR_700_Host_Parameters *hostdata)
551 if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
552 printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
554 if(slot->state == NCR_700_SLOT_FREE) {
555 printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
558 slot->resume_offset = 0;
559 slot->cmnd = NULL;
560 slot->state = NCR_700_SLOT_FREE;
561 slot->ITL_forw = hostdata->free_list;
562 hostdata->free_list = slot;
563 hostdata->command_slot_count--;
567 /* This routine really does very little. The command is indexed on
568 the ITL and (if tagged) the ITLQ lists in _queuecommand */
569 STATIC void
570 save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
571 struct scsi_cmnd *SCp, __u32 dsp)
573 /* Its just possible that this gets executed twice */
574 if(SCp != NULL) {
575 struct NCR_700_command_slot *slot =
576 (struct NCR_700_command_slot *)SCp->host_scribble;
578 slot->resume_offset = dsp;
580 hostdata->state = NCR_700_HOST_FREE;
581 hostdata->cmd = NULL;
584 STATIC inline void
585 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
586 struct NCR_700_command_slot *slot)
588 if(SCp->sc_data_direction != DMA_NONE &&
589 SCp->sc_data_direction != DMA_BIDIRECTIONAL) {
590 if(SCp->use_sg) {
591 dma_unmap_sg(hostdata->dev, SCp->request_buffer,
592 SCp->use_sg, SCp->sc_data_direction);
593 } else {
594 dma_unmap_single(hostdata->dev, slot->dma_handle,
595 SCp->request_bufflen,
596 SCp->sc_data_direction);
601 STATIC inline void
602 NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
603 struct scsi_cmnd *SCp, int result)
605 hostdata->state = NCR_700_HOST_FREE;
606 hostdata->cmd = NULL;
608 if(SCp != NULL) {
609 struct NCR_700_command_slot *slot =
610 (struct NCR_700_command_slot *)SCp->host_scribble;
612 dma_unmap_single(hostdata->dev, slot->pCmd,
613 sizeof(SCp->cmnd), DMA_TO_DEVICE);
614 if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
615 char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
616 #ifdef NCR_700_DEBUG
617 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
618 SCp, SCp->cmnd[7], result);
619 scsi_print_sense("53c700", SCp);
621 #endif
622 dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
623 /* restore the old result if the request sense was
624 * successful */
625 if (result == 0)
626 result = cmnd[7];
627 /* restore the original length */
628 SCp->cmd_len = cmnd[8];
629 } else
630 NCR_700_unmap(hostdata, SCp, slot);
632 free_slot(slot, hostdata);
633 #ifdef NCR_700_DEBUG
634 if(NCR_700_get_depth(SCp->device) == 0 ||
635 NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
636 printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
637 NCR_700_get_depth(SCp->device));
638 #endif /* NCR_700_DEBUG */
639 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
641 SCp->host_scribble = NULL;
642 SCp->result = result;
643 SCp->scsi_done(SCp);
644 } else {
645 printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
650 STATIC void
651 NCR_700_internal_bus_reset(struct Scsi_Host *host)
653 /* Bus reset */
654 NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
655 udelay(50);
656 NCR_700_writeb(0, host, SCNTL1_REG);
660 STATIC void
661 NCR_700_chip_setup(struct Scsi_Host *host)
663 struct NCR_700_Host_Parameters *hostdata =
664 (struct NCR_700_Host_Parameters *)host->hostdata[0];
665 __u32 dcntl_extra = 0;
666 __u8 min_period;
667 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
669 if(hostdata->chip710) {
670 __u8 burst_disable = hostdata->burst_disable
671 ? BURST_DISABLE : 0;
672 dcntl_extra = COMPAT_700_MODE;
674 NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
675 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
676 host, DMODE_710_REG);
677 NCR_700_writeb(burst_disable | (hostdata->differential ?
678 DIFF : 0), host, CTEST7_REG);
679 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
680 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
681 | AUTO_ATN, host, SCNTL0_REG);
682 } else {
683 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
684 host, DMODE_700_REG);
685 NCR_700_writeb(hostdata->differential ?
686 DIFF : 0, host, CTEST7_REG);
687 if(hostdata->fast) {
688 /* this is for 700-66, does nothing on 700 */
689 NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
690 | GENERATE_RECEIVE_PARITY, host,
691 CTEST8_REG);
692 } else {
693 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
694 | PARITY | AUTO_ATN, host, SCNTL0_REG);
698 NCR_700_writeb(1 << host->this_id, host, SCID_REG);
699 NCR_700_writeb(0, host, SBCL_REG);
700 NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
702 NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
703 | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
705 NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
706 NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
707 if(hostdata->clock > 75) {
708 printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
709 /* do the best we can, but the async clock will be out
710 * of spec: sync divider 2, async divider 3 */
711 DEBUG(("53c700: sync 2 async 3\n"));
712 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
713 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
714 hostdata->sync_clock = hostdata->clock/2;
715 } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
716 /* sync divider 1.5, async divider 3 */
717 DEBUG(("53c700: sync 1.5 async 3\n"));
718 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
719 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
720 hostdata->sync_clock = hostdata->clock*2;
721 hostdata->sync_clock /= 3;
723 } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
724 /* sync divider 1, async divider 2 */
725 DEBUG(("53c700: sync 1 async 2\n"));
726 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
727 NCR_700_writeb(ASYNC_DIV_2_0 | dcntl_extra, host, DCNTL_REG);
728 hostdata->sync_clock = hostdata->clock;
729 } else if(hostdata->clock > 25 && hostdata->clock <=37) {
730 /* sync divider 1, async divider 1.5 */
731 DEBUG(("53c700: sync 1 async 1.5\n"));
732 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
733 NCR_700_writeb(ASYNC_DIV_1_5 | dcntl_extra, host, DCNTL_REG);
734 hostdata->sync_clock = hostdata->clock;
735 } else {
736 DEBUG(("53c700: sync 1 async 1\n"));
737 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
738 NCR_700_writeb(ASYNC_DIV_1_0 | dcntl_extra, host, DCNTL_REG);
739 /* sync divider 1, async divider 1 */
740 hostdata->sync_clock = hostdata->clock;
742 /* Calculate the actual minimum period that can be supported
743 * by our synchronous clock speed. See the 710 manual for
744 * exact details of this calculation which is based on a
745 * setting of the SXFER register */
746 min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
747 hostdata->min_period = NCR_700_MIN_PERIOD;
748 if(min_period > NCR_700_MIN_PERIOD)
749 hostdata->min_period = min_period;
752 STATIC void
753 NCR_700_chip_reset(struct Scsi_Host *host)
755 struct NCR_700_Host_Parameters *hostdata =
756 (struct NCR_700_Host_Parameters *)host->hostdata[0];
757 if(hostdata->chip710) {
758 NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
759 udelay(100);
761 NCR_700_writeb(0, host, ISTAT_REG);
762 } else {
763 NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
764 udelay(100);
766 NCR_700_writeb(0, host, DCNTL_REG);
769 mdelay(1000);
771 NCR_700_chip_setup(host);
774 /* The heart of the message processing engine is that the instruction
775 * immediately after the INT is the normal case (and so must be CLEAR
776 * ACK). If we want to do something else, we call that routine in
777 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
778 * ACK) so that the routine returns correctly to resume its activity
779 * */
780 STATIC __u32
781 process_extended_message(struct Scsi_Host *host,
782 struct NCR_700_Host_Parameters *hostdata,
783 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
785 __u32 resume_offset = dsp, temp = dsp + 8;
786 __u8 pun = 0xff, lun = 0xff;
788 if(SCp != NULL) {
789 pun = SCp->device->id;
790 lun = SCp->device->lun;
793 switch(hostdata->msgin[2]) {
794 case A_SDTR_MSG:
795 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
796 struct scsi_target *starget = SCp->device->sdev_target;
797 __u8 period = hostdata->msgin[3];
798 __u8 offset = hostdata->msgin[4];
800 if(offset == 0 || period == 0) {
801 offset = 0;
802 period = 0;
805 spi_offset(starget) = offset;
806 spi_period(starget) = period;
808 if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
809 spi_display_xfer_agreement(starget);
810 NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
813 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
814 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
816 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
817 host, SXFER_REG);
819 } else {
820 /* SDTR message out of the blue, reject it */
821 shost_printk(KERN_WARNING, host,
822 "Unexpected SDTR msg\n");
823 hostdata->msgout[0] = A_REJECT_MSG;
824 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
825 script_patch_16(hostdata->script, MessageCount, 1);
826 /* SendMsgOut returns, so set up the return
827 * address */
828 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
830 break;
832 case A_WDTR_MSG:
833 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
834 host->host_no, pun, lun);
835 hostdata->msgout[0] = A_REJECT_MSG;
836 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
837 script_patch_16(hostdata->script, MessageCount, 1);
838 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
840 break;
842 default:
843 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
844 host->host_no, pun, lun,
845 NCR_700_phase[(dsps & 0xf00) >> 8]);
846 spi_print_msg(hostdata->msgin);
847 printk("\n");
848 /* just reject it */
849 hostdata->msgout[0] = A_REJECT_MSG;
850 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
851 script_patch_16(hostdata->script, MessageCount, 1);
852 /* SendMsgOut returns, so set up the return
853 * address */
854 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
856 NCR_700_writel(temp, host, TEMP_REG);
857 return resume_offset;
860 STATIC __u32
861 process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
862 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
864 /* work out where to return to */
865 __u32 temp = dsp + 8, resume_offset = dsp;
866 __u8 pun = 0xff, lun = 0xff;
868 if(SCp != NULL) {
869 pun = SCp->device->id;
870 lun = SCp->device->lun;
873 #ifdef NCR_700_DEBUG
874 printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
875 NCR_700_phase[(dsps & 0xf00) >> 8]);
876 spi_print_msg(hostdata->msgin);
877 printk("\n");
878 #endif
880 switch(hostdata->msgin[0]) {
882 case A_EXTENDED_MSG:
883 resume_offset = process_extended_message(host, hostdata, SCp,
884 dsp, dsps);
885 break;
887 case A_REJECT_MSG:
888 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
889 /* Rejected our sync negotiation attempt */
890 spi_period(SCp->device->sdev_target) =
891 spi_offset(SCp->device->sdev_target) = 0;
892 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
893 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
894 } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
895 /* rejected our first simple tag message */
896 scmd_printk(KERN_WARNING, SCp,
897 "Rejected first tag queue attempt, turning off tag queueing\n");
898 /* we're done negotiating */
899 NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
900 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
901 SCp->device->tagged_supported = 0;
902 scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
903 } else {
904 shost_printk(KERN_WARNING, host,
905 "(%d:%d) Unexpected REJECT Message %s\n",
906 pun, lun,
907 NCR_700_phase[(dsps & 0xf00) >> 8]);
908 /* however, just ignore it */
910 break;
912 case A_PARITY_ERROR_MSG:
913 printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
914 pun, lun);
915 NCR_700_internal_bus_reset(host);
916 break;
917 case A_SIMPLE_TAG_MSG:
918 printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
919 pun, lun, hostdata->msgin[1],
920 NCR_700_phase[(dsps & 0xf00) >> 8]);
921 /* just ignore it */
922 break;
923 default:
924 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
925 host->host_no, pun, lun,
926 NCR_700_phase[(dsps & 0xf00) >> 8]);
928 spi_print_msg(hostdata->msgin);
929 printk("\n");
930 /* just reject it */
931 hostdata->msgout[0] = A_REJECT_MSG;
932 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
933 script_patch_16(hostdata->script, MessageCount, 1);
934 /* SendMsgOut returns, so set up the return
935 * address */
936 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
938 break;
940 NCR_700_writel(temp, host, TEMP_REG);
941 /* set us up to receive another message */
942 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
943 return resume_offset;
946 STATIC __u32
947 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
948 struct Scsi_Host *host,
949 struct NCR_700_Host_Parameters *hostdata)
951 __u32 resume_offset = 0;
952 __u8 pun = 0xff, lun=0xff;
954 if(SCp != NULL) {
955 pun = SCp->device->id;
956 lun = SCp->device->lun;
959 if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
960 DEBUG((" COMMAND COMPLETE, status=%02x\n",
961 hostdata->status[0]));
962 /* OK, if TCQ still under negotiation, we now know it works */
963 if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
964 NCR_700_set_tag_neg_state(SCp->device,
965 NCR_700_FINISHED_TAG_NEGOTIATION);
967 /* check for contingent allegiance contitions */
968 if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
969 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
970 struct NCR_700_command_slot *slot =
971 (struct NCR_700_command_slot *)SCp->host_scribble;
972 if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
973 /* OOPS: bad device, returning another
974 * contingent allegiance condition */
975 scmd_printk(KERN_ERR, SCp,
976 "broken device is looping in contingent allegiance: ignoring\n");
977 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
978 } else {
979 char *cmnd =
980 NCR_700_get_sense_cmnd(SCp->device);
981 #ifdef NCR_DEBUG
982 scsi_print_command(SCp);
983 printk(" cmd %p has status %d, requesting sense\n",
984 SCp, hostdata->status[0]);
985 #endif
986 /* we can destroy the command here
987 * because the contingent allegiance
988 * condition will cause a retry which
989 * will re-copy the command from the
990 * saved data_cmnd. We also unmap any
991 * data associated with the command
992 * here */
993 NCR_700_unmap(hostdata, SCp, slot);
994 dma_unmap_single(hostdata->dev, slot->pCmd,
995 sizeof(SCp->cmnd),
996 DMA_TO_DEVICE);
998 cmnd[0] = REQUEST_SENSE;
999 cmnd[1] = (SCp->device->lun & 0x7) << 5;
1000 cmnd[2] = 0;
1001 cmnd[3] = 0;
1002 cmnd[4] = sizeof(SCp->sense_buffer);
1003 cmnd[5] = 0;
1004 /* Here's a quiet hack: the
1005 * REQUEST_SENSE command is six bytes,
1006 * so store a flag indicating that
1007 * this was an internal sense request
1008 * and the original status at the end
1009 * of the command */
1010 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1011 cmnd[7] = hostdata->status[0];
1012 cmnd[8] = SCp->cmd_len;
1013 SCp->cmd_len = 6; /* command length for
1014 * REQUEST_SENSE */
1015 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1016 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1017 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
1018 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1019 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1020 slot->SG[1].pAddr = 0;
1021 slot->resume_offset = hostdata->pScript;
1022 dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1023 dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1025 /* queue the command for reissue */
1026 slot->state = NCR_700_SLOT_QUEUED;
1027 slot->flags = NCR_700_FLAG_AUTOSENSE;
1028 hostdata->state = NCR_700_HOST_FREE;
1029 hostdata->cmd = NULL;
1031 } else {
1032 // Currently rely on the mid layer evaluation
1033 // of the tag queuing capability
1035 //if(status_byte(hostdata->status[0]) == GOOD &&
1036 // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1037 // /* Piggy back the tag queueing support
1038 // * on this command */
1039 // dma_sync_single_for_cpu(hostdata->dev,
1040 // slot->dma_handle,
1041 // SCp->request_bufflen,
1042 // DMA_FROM_DEVICE);
1043 // if(((char *)SCp->request_buffer)[7] & 0x02) {
1044 // scmd_printk(KERN_INFO, SCp,
1045 // "Enabling Tag Command Queuing\n");
1046 // hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1047 // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1048 // } else {
1049 // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1050 // hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1051 // }
1053 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1055 } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1056 __u8 i = (dsps & 0xf00) >> 8;
1058 scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1059 NCR_700_phase[i],
1060 sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1061 scmd_printk(KERN_ERR, SCp, " len = %d, cmd =",
1062 SCp->cmd_len);
1063 scsi_print_command(SCp);
1065 NCR_700_internal_bus_reset(host);
1066 } else if((dsps & 0xfffff000) == A_FATAL) {
1067 int i = (dsps & 0xfff);
1069 printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1070 host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1071 if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1072 printk(KERN_ERR " msg begins %02x %02x\n",
1073 hostdata->msgin[0], hostdata->msgin[1]);
1075 NCR_700_internal_bus_reset(host);
1076 } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1077 #ifdef NCR_700_DEBUG
1078 __u8 i = (dsps & 0xf00) >> 8;
1080 printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1081 host->host_no, pun, lun,
1082 i, NCR_700_phase[i]);
1083 #endif
1084 save_for_reselection(hostdata, SCp, dsp);
1086 } else if(dsps == A_RESELECTION_IDENTIFIED) {
1087 __u8 lun;
1088 struct NCR_700_command_slot *slot;
1089 __u8 reselection_id = hostdata->reselection_id;
1090 struct scsi_device *SDp;
1092 lun = hostdata->msgin[0] & 0x1f;
1094 hostdata->reselection_id = 0xff;
1095 DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1096 host->host_no, reselection_id, lun));
1097 /* clear the reselection indicator */
1098 SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1099 if(unlikely(SDp == NULL)) {
1100 printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1101 host->host_no, reselection_id, lun);
1102 BUG();
1104 if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1105 struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1106 if(unlikely(SCp == NULL)) {
1107 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1108 host->host_no, reselection_id, lun, hostdata->msgin[2]);
1109 BUG();
1112 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1113 DDEBUG(KERN_DEBUG, SDp,
1114 "reselection is tag %d, slot %p(%d)\n",
1115 hostdata->msgin[2], slot, slot->tag);
1116 } else {
1117 struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1118 if(unlikely(SCp == NULL)) {
1119 sdev_printk(KERN_ERR, SDp,
1120 "no saved request for untagged cmd\n");
1121 BUG();
1123 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1126 if(slot == NULL) {
1127 printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1128 host->host_no, reselection_id, lun,
1129 hostdata->msgin[0], hostdata->msgin[1],
1130 hostdata->msgin[2]);
1131 } else {
1132 if(hostdata->state != NCR_700_HOST_BUSY)
1133 printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1134 host->host_no);
1135 resume_offset = slot->resume_offset;
1136 hostdata->cmd = slot->cmnd;
1138 /* re-patch for this command */
1139 script_patch_32_abs(hostdata->script, CommandAddress,
1140 slot->pCmd);
1141 script_patch_16(hostdata->script,
1142 CommandCount, slot->cmnd->cmd_len);
1143 script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1144 to32bit(&slot->pSG[0].ins));
1146 /* Note: setting SXFER only works if we're
1147 * still in the MESSAGE phase, so it is vital
1148 * that ACK is still asserted when we process
1149 * the reselection message. The resume offset
1150 * should therefore always clear ACK */
1151 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1152 host, SXFER_REG);
1153 dma_cache_sync(hostdata->msgin,
1154 MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1155 dma_cache_sync(hostdata->msgout,
1156 MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1157 /* I'm just being paranoid here, the command should
1158 * already have been flushed from the cache */
1159 dma_cache_sync(slot->cmnd->cmnd,
1160 slot->cmnd->cmd_len, DMA_TO_DEVICE);
1165 } else if(dsps == A_RESELECTED_DURING_SELECTION) {
1167 /* This section is full of debugging code because I've
1168 * never managed to reach it. I think what happens is
1169 * that, because the 700 runs with selection
1170 * interrupts enabled the whole time that we take a
1171 * selection interrupt before we manage to get to the
1172 * reselected script interrupt */
1174 __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1175 struct NCR_700_command_slot *slot;
1177 /* Take out our own ID */
1178 reselection_id &= ~(1<<host->this_id);
1180 /* I've never seen this happen, so keep this as a printk rather
1181 * than a debug */
1182 printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1183 host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1186 /* FIXME: DEBUGGING CODE */
1187 __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1188 int i;
1190 for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1191 if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1192 && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1193 break;
1195 printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1196 SCp = hostdata->slots[i].cmnd;
1199 if(SCp != NULL) {
1200 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1201 /* change slot from busy to queued to redo command */
1202 slot->state = NCR_700_SLOT_QUEUED;
1204 hostdata->cmd = NULL;
1206 if(reselection_id == 0) {
1207 if(hostdata->reselection_id == 0xff) {
1208 printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1209 return 0;
1210 } else {
1211 printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1212 host->host_no);
1213 reselection_id = hostdata->reselection_id;
1215 } else {
1217 /* convert to real ID */
1218 reselection_id = bitmap_to_number(reselection_id);
1220 hostdata->reselection_id = reselection_id;
1221 /* just in case we have a stale simple tag message, clear it */
1222 hostdata->msgin[1] = 0;
1223 dma_cache_sync(hostdata->msgin,
1224 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1225 if(hostdata->tag_negotiated & (1<<reselection_id)) {
1226 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1227 } else {
1228 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1230 } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1231 /* we've just disconnected from the bus, do nothing since
1232 * a return here will re-run the queued command slot
1233 * that may have been interrupted by the initial selection */
1234 DEBUG((" SELECTION COMPLETED\n"));
1235 } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1236 resume_offset = process_message(host, hostdata, SCp,
1237 dsp, dsps);
1238 } else if((dsps & 0xfffff000) == 0) {
1239 __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1240 printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1241 host->host_no, pun, lun, NCR_700_condition[i],
1242 NCR_700_phase[j], dsp - hostdata->pScript);
1243 if(SCp != NULL) {
1244 scsi_print_command(SCp);
1246 if(SCp->use_sg) {
1247 for(i = 0; i < SCp->use_sg + 1; i++) {
1248 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, ((struct scatterlist *)SCp->request_buffer)[i].length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1252 NCR_700_internal_bus_reset(host);
1253 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1254 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1255 host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1256 resume_offset = dsp;
1257 } else {
1258 printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1259 host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1260 NCR_700_internal_bus_reset(host);
1262 return resume_offset;
1265 /* We run the 53c700 with selection interrupts always enabled. This
1266 * means that the chip may be selected as soon as the bus frees. On a
1267 * busy bus, this can be before the scripts engine finishes its
1268 * processing. Therefore, part of the selection processing has to be
1269 * to find out what the scripts engine is doing and complete the
1270 * function if necessary (i.e. process the pending disconnect or save
1271 * the interrupted initial selection */
1272 STATIC inline __u32
1273 process_selection(struct Scsi_Host *host, __u32 dsp)
1275 __u8 id = 0; /* Squash compiler warning */
1276 int count = 0;
1277 __u32 resume_offset = 0;
1278 struct NCR_700_Host_Parameters *hostdata =
1279 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1280 struct scsi_cmnd *SCp = hostdata->cmd;
1281 __u8 sbcl;
1283 for(count = 0; count < 5; count++) {
1284 id = NCR_700_readb(host, hostdata->chip710 ?
1285 CTEST9_REG : SFBR_REG);
1287 /* Take out our own ID */
1288 id &= ~(1<<host->this_id);
1289 if(id != 0)
1290 break;
1291 udelay(5);
1293 sbcl = NCR_700_readb(host, SBCL_REG);
1294 if((sbcl & SBCL_IO) == 0) {
1295 /* mark as having been selected rather than reselected */
1296 id = 0xff;
1297 } else {
1298 /* convert to real ID */
1299 hostdata->reselection_id = id = bitmap_to_number(id);
1300 DEBUG(("scsi%d: Reselected by %d\n",
1301 host->host_no, id));
1303 if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1304 struct NCR_700_command_slot *slot =
1305 (struct NCR_700_command_slot *)SCp->host_scribble;
1306 DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1308 switch(dsp - hostdata->pScript) {
1309 case Ent_Disconnect1:
1310 case Ent_Disconnect2:
1311 save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1312 break;
1313 case Ent_Disconnect3:
1314 case Ent_Disconnect4:
1315 save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1316 break;
1317 case Ent_Disconnect5:
1318 case Ent_Disconnect6:
1319 save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1320 break;
1321 case Ent_Disconnect7:
1322 case Ent_Disconnect8:
1323 save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1324 break;
1325 case Ent_Finish1:
1326 case Ent_Finish2:
1327 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1328 break;
1330 default:
1331 slot->state = NCR_700_SLOT_QUEUED;
1332 break;
1335 hostdata->state = NCR_700_HOST_BUSY;
1336 hostdata->cmd = NULL;
1337 /* clear any stale simple tag message */
1338 hostdata->msgin[1] = 0;
1339 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1340 DMA_BIDIRECTIONAL);
1342 if(id == 0xff) {
1343 /* Selected as target, Ignore */
1344 resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1345 } else if(hostdata->tag_negotiated & (1<<id)) {
1346 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1347 } else {
1348 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1350 return resume_offset;
1353 static inline void
1354 NCR_700_clear_fifo(struct Scsi_Host *host) {
1355 const struct NCR_700_Host_Parameters *hostdata
1356 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1357 if(hostdata->chip710) {
1358 NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1359 } else {
1360 NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1364 static inline void
1365 NCR_700_flush_fifo(struct Scsi_Host *host) {
1366 const struct NCR_700_Host_Parameters *hostdata
1367 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1368 if(hostdata->chip710) {
1369 NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1370 udelay(10);
1371 NCR_700_writeb(0, host, CTEST8_REG);
1372 } else {
1373 NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1374 udelay(10);
1375 NCR_700_writeb(0, host, DFIFO_REG);
1380 /* The queue lock with interrupts disabled must be held on entry to
1381 * this function */
1382 STATIC int
1383 NCR_700_start_command(struct scsi_cmnd *SCp)
1385 struct NCR_700_command_slot *slot =
1386 (struct NCR_700_command_slot *)SCp->host_scribble;
1387 struct NCR_700_Host_Parameters *hostdata =
1388 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1389 __u16 count = 1; /* for IDENTIFY message */
1391 if(hostdata->state != NCR_700_HOST_FREE) {
1392 /* keep this inside the lock to close the race window where
1393 * the running command finishes on another CPU while we don't
1394 * change the state to queued on this one */
1395 slot->state = NCR_700_SLOT_QUEUED;
1397 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1398 SCp->device->host->host_no, slot->cmnd, slot));
1399 return 0;
1401 hostdata->state = NCR_700_HOST_BUSY;
1402 hostdata->cmd = SCp;
1403 slot->state = NCR_700_SLOT_BUSY;
1404 /* keep interrupts disabled until we have the command correctly
1405 * set up so we cannot take a selection interrupt */
1407 hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1408 slot->flags != NCR_700_FLAG_AUTOSENSE),
1409 SCp->device->lun);
1410 /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1411 * if the negotiated transfer parameters still hold, so
1412 * always renegotiate them */
1413 if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1414 slot->flags == NCR_700_FLAG_AUTOSENSE) {
1415 NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1418 /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1419 * If a contingent allegiance condition exists, the device
1420 * will refuse all tags, so send the request sense as untagged
1421 * */
1422 if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1423 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1424 slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1425 count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1428 if(hostdata->fast &&
1429 NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1430 count += spi_populate_sync_msg(&hostdata->msgout[count],
1431 spi_period(SCp->device->sdev_target),
1432 spi_offset(SCp->device->sdev_target));
1433 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1436 script_patch_16(hostdata->script, MessageCount, count);
1439 script_patch_ID(hostdata->script,
1440 Device_ID, 1<<scmd_id(SCp));
1442 script_patch_32_abs(hostdata->script, CommandAddress,
1443 slot->pCmd);
1444 script_patch_16(hostdata->script, CommandCount, SCp->cmd_len);
1445 /* finally plumb the beginning of the SG list into the script
1446 * */
1447 script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1448 to32bit(&slot->pSG[0].ins));
1449 NCR_700_clear_fifo(SCp->device->host);
1451 if(slot->resume_offset == 0)
1452 slot->resume_offset = hostdata->pScript;
1453 /* now perform all the writebacks and invalidates */
1454 dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE);
1455 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1456 DMA_FROM_DEVICE);
1457 dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1458 dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE);
1460 /* set the synchronous period/offset */
1461 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1462 SCp->device->host, SXFER_REG);
1463 NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1464 NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1466 return 1;
1469 irqreturn_t
1470 NCR_700_intr(int irq, void *dev_id)
1472 struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1473 struct NCR_700_Host_Parameters *hostdata =
1474 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1475 __u8 istat;
1476 __u32 resume_offset = 0;
1477 __u8 pun = 0xff, lun = 0xff;
1478 unsigned long flags;
1479 int handled = 0;
1481 /* Use the host lock to serialise acess to the 53c700
1482 * hardware. Note: In future, we may need to take the queue
1483 * lock to enter the done routines. When that happens, we
1484 * need to ensure that for this driver, the host lock and the
1485 * queue lock point to the same thing. */
1486 spin_lock_irqsave(host->host_lock, flags);
1487 if((istat = NCR_700_readb(host, ISTAT_REG))
1488 & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1489 __u32 dsps;
1490 __u8 sstat0 = 0, dstat = 0;
1491 __u32 dsp;
1492 struct scsi_cmnd *SCp = hostdata->cmd;
1493 enum NCR_700_Host_State state;
1495 handled = 1;
1496 state = hostdata->state;
1497 SCp = hostdata->cmd;
1499 if(istat & SCSI_INT_PENDING) {
1500 udelay(10);
1502 sstat0 = NCR_700_readb(host, SSTAT0_REG);
1505 if(istat & DMA_INT_PENDING) {
1506 udelay(10);
1508 dstat = NCR_700_readb(host, DSTAT_REG);
1511 dsps = NCR_700_readl(host, DSPS_REG);
1512 dsp = NCR_700_readl(host, DSP_REG);
1514 DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1515 host->host_no, istat, sstat0, dstat,
1516 (dsp - (__u32)(hostdata->pScript))/4,
1517 dsp, dsps));
1519 if(SCp != NULL) {
1520 pun = SCp->device->id;
1521 lun = SCp->device->lun;
1524 if(sstat0 & SCSI_RESET_DETECTED) {
1525 struct scsi_device *SDp;
1526 int i;
1528 hostdata->state = NCR_700_HOST_BUSY;
1530 printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1531 host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1533 scsi_report_bus_reset(host, 0);
1535 /* clear all the negotiated parameters */
1536 __shost_for_each_device(SDp, host)
1537 NCR_700_clear_flag(SDp, ~0);
1539 /* clear all the slots and their pending commands */
1540 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1541 struct scsi_cmnd *SCp;
1542 struct NCR_700_command_slot *slot =
1543 &hostdata->slots[i];
1545 if(slot->state == NCR_700_SLOT_FREE)
1546 continue;
1548 SCp = slot->cmnd;
1549 printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1550 slot, SCp);
1551 free_slot(slot, hostdata);
1552 SCp->host_scribble = NULL;
1553 NCR_700_set_depth(SCp->device, 0);
1554 /* NOTE: deadlock potential here: we
1555 * rely on mid-layer guarantees that
1556 * scsi_done won't try to issue the
1557 * command again otherwise we'll
1558 * deadlock on the
1559 * hostdata->state_lock */
1560 SCp->result = DID_RESET << 16;
1561 SCp->scsi_done(SCp);
1563 mdelay(25);
1564 NCR_700_chip_setup(host);
1566 hostdata->state = NCR_700_HOST_FREE;
1567 hostdata->cmd = NULL;
1568 /* signal back if this was an eh induced reset */
1569 if(hostdata->eh_complete != NULL)
1570 complete(hostdata->eh_complete);
1571 goto out_unlock;
1572 } else if(sstat0 & SELECTION_TIMEOUT) {
1573 DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1574 host->host_no, pun, lun));
1575 NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1576 } else if(sstat0 & PHASE_MISMATCH) {
1577 struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1578 (struct NCR_700_command_slot *)SCp->host_scribble;
1580 if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1581 /* It wants to reply to some part of
1582 * our message */
1583 #ifdef NCR_700_DEBUG
1584 __u32 temp = NCR_700_readl(host, TEMP_REG);
1585 int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1586 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1587 #endif
1588 resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1589 } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1590 dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1591 int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1592 int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1593 int residual = NCR_700_data_residual(host);
1594 int i;
1595 #ifdef NCR_700_DEBUG
1596 __u32 naddr = NCR_700_readl(host, DNAD_REG);
1598 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1599 host->host_no, pun, lun,
1600 SGcount, data_transfer);
1601 scsi_print_command(SCp);
1602 if(residual) {
1603 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1604 host->host_no, pun, lun,
1605 SGcount, data_transfer, residual);
1607 #endif
1608 data_transfer += residual;
1610 if(data_transfer != 0) {
1611 int count;
1612 __u32 pAddr;
1614 SGcount--;
1616 count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1617 DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1618 slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1619 slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1620 pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1621 pAddr += (count - data_transfer);
1622 #ifdef NCR_700_DEBUG
1623 if(pAddr != naddr) {
1624 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1626 #endif
1627 slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1629 /* set the executed moves to nops */
1630 for(i=0; i<SGcount; i++) {
1631 slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1632 slot->SG[i].pAddr = 0;
1634 dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1635 /* and pretend we disconnected after
1636 * the command phase */
1637 resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1638 /* make sure all the data is flushed */
1639 NCR_700_flush_fifo(host);
1640 } else {
1641 __u8 sbcl = NCR_700_readb(host, SBCL_REG);
1642 printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1643 host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1644 NCR_700_internal_bus_reset(host);
1647 } else if(sstat0 & SCSI_GROSS_ERROR) {
1648 printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1649 host->host_no, pun, lun);
1650 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1651 } else if(sstat0 & PARITY_ERROR) {
1652 printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1653 host->host_no, pun, lun);
1654 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1655 } else if(dstat & SCRIPT_INT_RECEIVED) {
1656 DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1657 host->host_no, pun, lun));
1658 resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1659 } else if(dstat & (ILGL_INST_DETECTED)) {
1660 printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1661 " Please email James.Bottomley@HansenPartnership.com with the details\n",
1662 host->host_no, pun, lun,
1663 dsp, dsp - hostdata->pScript);
1664 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1665 } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1666 printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1667 host->host_no, pun, lun, dstat);
1668 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1672 /* NOTE: selection interrupt processing MUST occur
1673 * after script interrupt processing to correctly cope
1674 * with the case where we process a disconnect and
1675 * then get reselected before we process the
1676 * disconnection */
1677 if(sstat0 & SELECTED) {
1678 /* FIXME: It currently takes at least FOUR
1679 * interrupts to complete a command that
1680 * disconnects: one for the disconnect, one
1681 * for the reselection, one to get the
1682 * reselection data and one to complete the
1683 * command. If we guess the reselected
1684 * command here and prepare it, we only need
1685 * to get a reselection data interrupt if we
1686 * guessed wrongly. Since the interrupt
1687 * overhead is much greater than the command
1688 * setup, this would be an efficient
1689 * optimisation particularly as we probably
1690 * only have one outstanding command on a
1691 * target most of the time */
1693 resume_offset = process_selection(host, dsp);
1699 if(resume_offset) {
1700 if(hostdata->state != NCR_700_HOST_BUSY) {
1701 printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1702 host->host_no, resume_offset, resume_offset - hostdata->pScript);
1703 hostdata->state = NCR_700_HOST_BUSY;
1706 DEBUG(("Attempting to resume at %x\n", resume_offset));
1707 NCR_700_clear_fifo(host);
1708 NCR_700_writel(resume_offset, host, DSP_REG);
1710 /* There is probably a technical no-no about this: If we're a
1711 * shared interrupt and we got this interrupt because the
1712 * other device needs servicing not us, we're still going to
1713 * check our queued commands here---of course, there shouldn't
1714 * be any outstanding.... */
1715 if(hostdata->state == NCR_700_HOST_FREE) {
1716 int i;
1718 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1719 /* fairness: always run the queue from the last
1720 * position we left off */
1721 int j = (i + hostdata->saved_slot_position)
1722 % NCR_700_COMMAND_SLOTS_PER_HOST;
1724 if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1725 continue;
1726 if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1727 DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1728 host->host_no, &hostdata->slots[j],
1729 hostdata->slots[j].cmnd));
1730 hostdata->saved_slot_position = j + 1;
1733 break;
1736 out_unlock:
1737 spin_unlock_irqrestore(host->host_lock, flags);
1738 return IRQ_RETVAL(handled);
1741 STATIC int
1742 NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1744 struct NCR_700_Host_Parameters *hostdata =
1745 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1746 __u32 move_ins;
1747 enum dma_data_direction direction;
1748 struct NCR_700_command_slot *slot;
1750 if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1751 /* We're over our allocation, this should never happen
1752 * since we report the max allocation to the mid layer */
1753 printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1754 return 1;
1756 /* check for untagged commands. We cannot have any outstanding
1757 * commands if we accept them. Commands could be untagged because:
1759 * - The tag negotiated bitmap is clear
1760 * - The blk layer sent and untagged command
1762 if(NCR_700_get_depth(SCp->device) != 0
1763 && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1764 || !blk_rq_tagged(SCp->request))) {
1765 CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1766 NCR_700_get_depth(SCp->device));
1767 return SCSI_MLQUEUE_DEVICE_BUSY;
1769 if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1770 CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1771 NCR_700_get_depth(SCp->device));
1772 return SCSI_MLQUEUE_DEVICE_BUSY;
1774 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1776 /* begin the command here */
1777 /* no need to check for NULL, test for command_slot_count above
1778 * ensures a slot is free */
1779 slot = find_empty_slot(hostdata);
1781 slot->cmnd = SCp;
1783 SCp->scsi_done = done;
1784 SCp->host_scribble = (unsigned char *)slot;
1785 SCp->SCp.ptr = NULL;
1786 SCp->SCp.buffer = NULL;
1788 #ifdef NCR_700_DEBUG
1789 printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1790 scsi_print_command(SCp);
1791 #endif
1792 if(blk_rq_tagged(SCp->request)
1793 && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1794 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1795 scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1796 hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1797 NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1800 /* here we may have to process an untagged command. The gate
1801 * above ensures that this will be the only one outstanding,
1802 * so clear the tag negotiated bit.
1804 * FIXME: This will royally screw up on multiple LUN devices
1805 * */
1806 if(!blk_rq_tagged(SCp->request)
1807 && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1808 scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1809 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1812 if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
1813 && scsi_get_tag_type(SCp->device)) {
1814 slot->tag = SCp->request->tag;
1815 CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1816 slot->tag, slot);
1817 } else {
1818 slot->tag = SCSI_NO_TAG;
1819 /* must populate current_cmnd for scsi_find_tag to work */
1820 SCp->device->current_cmnd = SCp;
1822 /* sanity check: some of the commands generated by the mid-layer
1823 * have an eccentric idea of their sc_data_direction */
1824 if(!SCp->use_sg && !SCp->request_bufflen
1825 && SCp->sc_data_direction != DMA_NONE) {
1826 #ifdef NCR_700_DEBUG
1827 printk("53c700: Command");
1828 scsi_print_command(SCp);
1829 printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1830 #endif
1831 SCp->sc_data_direction = DMA_NONE;
1834 switch (SCp->cmnd[0]) {
1835 case REQUEST_SENSE:
1836 /* clear the internal sense magic */
1837 SCp->cmnd[6] = 0;
1838 /* fall through */
1839 default:
1840 /* OK, get it from the command */
1841 switch(SCp->sc_data_direction) {
1842 case DMA_BIDIRECTIONAL:
1843 default:
1844 printk(KERN_ERR "53c700: Unknown command for data direction ");
1845 scsi_print_command(SCp);
1847 move_ins = 0;
1848 break;
1849 case DMA_NONE:
1850 move_ins = 0;
1851 break;
1852 case DMA_FROM_DEVICE:
1853 move_ins = SCRIPT_MOVE_DATA_IN;
1854 break;
1855 case DMA_TO_DEVICE:
1856 move_ins = SCRIPT_MOVE_DATA_OUT;
1857 break;
1861 /* now build the scatter gather list */
1862 direction = SCp->sc_data_direction;
1863 if(move_ins != 0) {
1864 int i;
1865 int sg_count;
1866 dma_addr_t vPtr = 0;
1867 __u32 count = 0;
1869 if(SCp->use_sg) {
1870 sg_count = dma_map_sg(hostdata->dev,
1871 SCp->request_buffer, SCp->use_sg,
1872 direction);
1873 } else {
1874 vPtr = dma_map_single(hostdata->dev,
1875 SCp->request_buffer,
1876 SCp->request_bufflen,
1877 direction);
1878 count = SCp->request_bufflen;
1879 slot->dma_handle = vPtr;
1880 sg_count = 1;
1884 for(i = 0; i < sg_count; i++) {
1886 if(SCp->use_sg) {
1887 struct scatterlist *sg = SCp->request_buffer;
1889 vPtr = sg_dma_address(&sg[i]);
1890 count = sg_dma_len(&sg[i]);
1893 slot->SG[i].ins = bS_to_host(move_ins | count);
1894 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1895 i, count, slot->SG[i].ins, (unsigned long)vPtr));
1896 slot->SG[i].pAddr = bS_to_host(vPtr);
1898 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1899 slot->SG[i].pAddr = 0;
1900 dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1901 DEBUG((" SETTING %08lx to %x\n",
1902 (&slot->pSG[i].ins),
1903 slot->SG[i].ins));
1905 slot->resume_offset = 0;
1906 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1907 sizeof(SCp->cmnd), DMA_TO_DEVICE);
1908 NCR_700_start_command(SCp);
1909 return 0;
1912 STATIC int
1913 NCR_700_abort(struct scsi_cmnd * SCp)
1915 struct NCR_700_command_slot *slot;
1917 scmd_printk(KERN_INFO, SCp,
1918 "New error handler wants to abort command\n\t");
1919 scsi_print_command(SCp);
1921 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1923 if(slot == NULL)
1924 /* no outstanding command to abort */
1925 return SUCCESS;
1926 if(SCp->cmnd[0] == TEST_UNIT_READY) {
1927 /* FIXME: This is because of a problem in the new
1928 * error handler. When it is in error recovery, it
1929 * will send a TUR to a device it thinks may still be
1930 * showing a problem. If the TUR isn't responded to,
1931 * it will abort it and mark the device off line.
1932 * Unfortunately, it does no other error recovery, so
1933 * this would leave us with an outstanding command
1934 * occupying a slot. Rather than allow this to
1935 * happen, we issue a bus reset to force all
1936 * outstanding commands to terminate here. */
1937 NCR_700_internal_bus_reset(SCp->device->host);
1938 /* still drop through and return failed */
1940 return FAILED;
1944 STATIC int
1945 NCR_700_bus_reset(struct scsi_cmnd * SCp)
1947 DECLARE_COMPLETION_ONSTACK(complete);
1948 struct NCR_700_Host_Parameters *hostdata =
1949 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1951 scmd_printk(KERN_INFO, SCp,
1952 "New error handler wants BUS reset, cmd %p\n\t", SCp);
1953 scsi_print_command(SCp);
1955 /* In theory, eh_complete should always be null because the
1956 * eh is single threaded, but just in case we're handling a
1957 * reset via sg or something */
1958 spin_lock_irq(SCp->device->host->host_lock);
1959 while (hostdata->eh_complete != NULL) {
1960 spin_unlock_irq(SCp->device->host->host_lock);
1961 msleep_interruptible(100);
1962 spin_lock_irq(SCp->device->host->host_lock);
1965 hostdata->eh_complete = &complete;
1966 NCR_700_internal_bus_reset(SCp->device->host);
1968 spin_unlock_irq(SCp->device->host->host_lock);
1969 wait_for_completion(&complete);
1970 spin_lock_irq(SCp->device->host->host_lock);
1972 hostdata->eh_complete = NULL;
1973 /* Revalidate the transport parameters of the failing device */
1974 if(hostdata->fast)
1975 spi_schedule_dv_device(SCp->device);
1977 spin_unlock_irq(SCp->device->host->host_lock);
1978 return SUCCESS;
1981 STATIC int
1982 NCR_700_host_reset(struct scsi_cmnd * SCp)
1984 scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
1985 scsi_print_command(SCp);
1987 spin_lock_irq(SCp->device->host->host_lock);
1989 NCR_700_internal_bus_reset(SCp->device->host);
1990 NCR_700_chip_reset(SCp->device->host);
1992 spin_unlock_irq(SCp->device->host->host_lock);
1994 return SUCCESS;
1997 STATIC void
1998 NCR_700_set_period(struct scsi_target *STp, int period)
2000 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2001 struct NCR_700_Host_Parameters *hostdata =
2002 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2004 if(!hostdata->fast)
2005 return;
2007 if(period < hostdata->min_period)
2008 period = hostdata->min_period;
2010 spi_period(STp) = period;
2011 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2012 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2013 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2016 STATIC void
2017 NCR_700_set_offset(struct scsi_target *STp, int offset)
2019 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2020 struct NCR_700_Host_Parameters *hostdata =
2021 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2022 int max_offset = hostdata->chip710
2023 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2025 if(!hostdata->fast)
2026 return;
2028 if(offset > max_offset)
2029 offset = max_offset;
2031 /* if we're currently async, make sure the period is reasonable */
2032 if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2033 spi_period(STp) > 0xff))
2034 spi_period(STp) = hostdata->min_period;
2036 spi_offset(STp) = offset;
2037 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2038 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2039 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2042 STATIC int
2043 NCR_700_slave_alloc(struct scsi_device *SDp)
2045 SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2046 GFP_KERNEL);
2048 if (!SDp->hostdata)
2049 return -ENOMEM;
2051 return 0;
2054 STATIC int
2055 NCR_700_slave_configure(struct scsi_device *SDp)
2057 struct NCR_700_Host_Parameters *hostdata =
2058 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2060 /* to do here: allocate memory; build a queue_full list */
2061 if(SDp->tagged_supported) {
2062 scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2063 scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2064 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2065 } else {
2066 /* initialise to default depth */
2067 scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2069 if(hostdata->fast) {
2070 /* Find the correct offset and period via domain validation */
2071 if (!spi_initial_dv(SDp->sdev_target))
2072 spi_dv_device(SDp);
2073 } else {
2074 spi_offset(SDp->sdev_target) = 0;
2075 spi_period(SDp->sdev_target) = 0;
2077 return 0;
2080 STATIC void
2081 NCR_700_slave_destroy(struct scsi_device *SDp)
2083 kfree(SDp->hostdata);
2084 SDp->hostdata = NULL;
2087 static int
2088 NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2090 if (depth > NCR_700_MAX_TAGS)
2091 depth = NCR_700_MAX_TAGS;
2093 scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2094 return depth;
2097 static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2099 int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
2100 || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2101 struct NCR_700_Host_Parameters *hostdata =
2102 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2104 scsi_set_tag_type(SDp, tag_type);
2106 /* We have a global (per target) flag to track whether TCQ is
2107 * enabled, so we'll be turning it off for the entire target here.
2108 * our tag algorithm will fail if we mix tagged and untagged commands,
2109 * so quiesce the device before doing this */
2110 if (change_tag)
2111 scsi_target_quiesce(SDp->sdev_target);
2113 if (!tag_type) {
2114 /* shift back to the default unqueued number of commands
2115 * (the user can still raise this) */
2116 scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
2117 hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
2118 } else {
2119 /* Here, we cleared the negotiation flag above, so this
2120 * will force the driver to renegotiate */
2121 scsi_activate_tcq(SDp, SDp->queue_depth);
2122 if (change_tag)
2123 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2125 if (change_tag)
2126 scsi_target_resume(SDp->sdev_target);
2128 return tag_type;
2131 static ssize_t
2132 NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2134 struct scsi_device *SDp = to_scsi_device(dev);
2136 return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2139 static struct device_attribute NCR_700_active_tags_attr = {
2140 .attr = {
2141 .name = "active_tags",
2142 .mode = S_IRUGO,
2144 .show = NCR_700_show_active_tags,
2147 STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2148 &NCR_700_active_tags_attr,
2149 NULL,
2152 EXPORT_SYMBOL(NCR_700_detect);
2153 EXPORT_SYMBOL(NCR_700_release);
2154 EXPORT_SYMBOL(NCR_700_intr);
2156 static struct spi_function_template NCR_700_transport_functions = {
2157 .set_period = NCR_700_set_period,
2158 .show_period = 1,
2159 .set_offset = NCR_700_set_offset,
2160 .show_offset = 1,
2163 static int __init NCR_700_init(void)
2165 NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2166 if(!NCR_700_transport_template)
2167 return -ENODEV;
2168 return 0;
2171 static void __exit NCR_700_exit(void)
2173 spi_release_transport(NCR_700_transport_template);
2176 module_init(NCR_700_init);
2177 module_exit(NCR_700_exit);