53c700: remove scsi_print_sense() usage
[linux-2.6/btrfs-unstable.git] / drivers / scsi / 53c700.c
blob179a24ec75613704da0267d0bae698b16904637f
1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6 **-----------------------------------------------------------------------------
7 **
8 ** This program is free software; you can redistribute it and/or modify
9 ** it under the terms of the GNU General Public License as published by
10 ** the Free Software Foundation; either version 2 of the License, or
11 ** (at your option) any later version.
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
18 ** You should have received a copy of the GNU General Public License
19 ** along with this program; if not, write to the Free Software
20 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 **-----------------------------------------------------------------------------
25 /* Notes:
27 * This driver is designed exclusively for these chips (virtually the
28 * earliest of the scripts engine chips). They need their own drivers
29 * because they are missing so many of the scripts and snazzy register
30 * features of their elder brothers (the 710, 720 and 770).
32 * The 700 is the lowliest of the line, it can only do async SCSI.
33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
35 * The 700 chip has no host bus interface logic of its own. However,
36 * it is usually mapped to a location with well defined register
37 * offsets. Therefore, if you can determine the base address and the
38 * irq your board incorporating this chip uses, you can probably use
39 * this driver to run it (although you'll probably have to write a
40 * minimal wrapper for the purpose---see the NCR_D700 driver for
41 * details about how to do this).
44 * TODO List:
46 * 1. Better statistics in the proc fs
48 * 2. Implement message queue (queues SCSI messages like commands) and make
49 * the abort and device reset functions use them.
50 * */
52 /* CHANGELOG
54 * Version 2.8
56 * Fixed bad bug affecting tag starvation processing (previously the
57 * driver would hang the system if too many tags starved. Also fixed
58 * bad bug having to do with 10 byte command processing and REQUEST
59 * SENSE (the command would loop forever getting a transfer length
60 * mismatch in the CMD phase).
62 * Version 2.7
64 * Fixed scripts problem which caused certain devices (notably CDRWs)
65 * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
66 * __raw_readl/writel for parisc compatibility (Thomas
67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68 * for sense requests (Ryan Bradetich).
70 * Version 2.6
72 * Following test of the 64 bit parisc kernel by Richard Hirst,
73 * several problems have now been corrected. Also adds support for
74 * consistent memory allocation.
76 * Version 2.5
78 * More Compatibility changes for 710 (now actually works). Enhanced
79 * support for odd clock speeds which constrain SDTR negotiations.
80 * correct cacheline separation for scsi messages and status for
81 * incoherent architectures. Use of the pci mapping functions on
82 * buffers to begin support for 64 bit drivers.
84 * Version 2.4
86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87 * special 53c710 instructions or registers are used).
89 * Version 2.3
91 * More endianness/cache coherency changes.
93 * Better bad device handling (handles devices lying about tag
94 * queueing support and devices which fail to provide sense data on
95 * contingent allegiance conditions)
97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98 * debugging this driver on the parisc architecture and suggesting
99 * many improvements and bug fixes.
101 * Thanks also go to Linuxcare Inc. for providing several PARISC
102 * machines for me to debug the driver on.
104 * Version 2.2
106 * Made the driver mem or io mapped; added endian invariance; added
107 * dma cache flushing operations for architectures which need it;
108 * added support for more varied clocking speeds.
110 * Version 2.1
112 * Initial modularisation from the D700. See NCR_D700.c for the rest of
113 * the changelog.
114 * */
115 #define NCR_700_VERSION "2.8"
117 #include <linux/kernel.h>
118 #include <linux/types.h>
119 #include <linux/string.h>
120 #include <linux/slab.h>
121 #include <linux/ioport.h>
122 #include <linux/delay.h>
123 #include <linux/spinlock.h>
124 #include <linux/completion.h>
125 #include <linux/init.h>
126 #include <linux/proc_fs.h>
127 #include <linux/blkdev.h>
128 #include <linux/module.h>
129 #include <linux/interrupt.h>
130 #include <linux/device.h>
131 #include <asm/dma.h>
132 #include <asm/io.h>
133 #include <asm/pgtable.h>
134 #include <asm/byteorder.h>
136 #include <scsi/scsi.h>
137 #include <scsi/scsi_cmnd.h>
138 #include <scsi/scsi_dbg.h>
139 #include <scsi/scsi_eh.h>
140 #include <scsi/scsi_host.h>
141 #include <scsi/scsi_tcq.h>
142 #include <scsi/scsi_transport.h>
143 #include <scsi/scsi_transport_spi.h>
145 #include "53c700.h"
147 /* NOTE: For 64 bit drivers there are points in the code where we use
148 * a non dereferenceable pointer to point to a structure in dma-able
149 * memory (which is 32 bits) so that we can use all of the structure
150 * operations but take the address at the end. This macro allows us
151 * to truncate the 64 bit pointer down to 32 bits without the compiler
152 * complaining */
153 #define to32bit(x) ((__u32)((unsigned long)(x)))
155 #ifdef NCR_700_DEBUG
156 #define STATIC
157 #else
158 #define STATIC static
159 #endif
161 MODULE_AUTHOR("James Bottomley");
162 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
163 MODULE_LICENSE("GPL");
165 /* This is the script */
166 #include "53c700_d.h"
169 STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
170 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
171 STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
172 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
173 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
174 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
175 STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
176 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
177 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
178 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth, int reason);
179 static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
181 STATIC struct device_attribute *NCR_700_dev_attrs[];
183 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
185 static char *NCR_700_phase[] = {
187 "after selection",
188 "before command phase",
189 "after command phase",
190 "after status phase",
191 "after data in phase",
192 "after data out phase",
193 "during data phase",
196 static char *NCR_700_condition[] = {
198 "NOT MSG_OUT",
199 "UNEXPECTED PHASE",
200 "NOT MSG_IN",
201 "UNEXPECTED MSG",
202 "MSG_IN",
203 "SDTR_MSG RECEIVED",
204 "REJECT_MSG RECEIVED",
205 "DISCONNECT_MSG RECEIVED",
206 "MSG_OUT",
207 "DATA_IN",
211 static char *NCR_700_fatal_messages[] = {
212 "unexpected message after reselection",
213 "still MSG_OUT after message injection",
214 "not MSG_IN after selection",
215 "Illegal message length received",
218 static char *NCR_700_SBCL_bits[] = {
219 "IO ",
220 "CD ",
221 "MSG ",
222 "ATN ",
223 "SEL ",
224 "BSY ",
225 "ACK ",
226 "REQ ",
229 static char *NCR_700_SBCL_to_phase[] = {
230 "DATA_OUT",
231 "DATA_IN",
232 "CMD_OUT",
233 "STATE",
234 "ILLEGAL PHASE",
235 "ILLEGAL PHASE",
236 "MSG OUT",
237 "MSG IN",
240 /* This translates the SDTR message offset and period to a value
241 * which can be loaded into the SXFER_REG.
243 * NOTE: According to SCSI-2, the true transfer period (in ns) is
244 * actually four times this period value */
245 static inline __u8
246 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
247 __u8 offset, __u8 period)
249 int XFERP;
251 __u8 min_xferp = (hostdata->chip710
252 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
253 __u8 max_offset = (hostdata->chip710
254 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
256 if(offset == 0)
257 return 0;
259 if(period < hostdata->min_period) {
260 printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
261 period = hostdata->min_period;
263 XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
264 if(offset > max_offset) {
265 printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
266 offset, max_offset);
267 offset = max_offset;
269 if(XFERP < min_xferp) {
270 XFERP = min_xferp;
272 return (offset & 0x0f) | (XFERP & 0x07)<<4;
275 static inline __u8
276 NCR_700_get_SXFER(struct scsi_device *SDp)
278 struct NCR_700_Host_Parameters *hostdata =
279 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
281 return NCR_700_offset_period_to_sxfer(hostdata,
282 spi_offset(SDp->sdev_target),
283 spi_period(SDp->sdev_target));
286 struct Scsi_Host *
287 NCR_700_detect(struct scsi_host_template *tpnt,
288 struct NCR_700_Host_Parameters *hostdata, struct device *dev)
290 dma_addr_t pScript, pSlots;
291 __u8 *memory;
292 __u32 *script;
293 struct Scsi_Host *host;
294 static int banner = 0;
295 int j;
297 if(tpnt->sdev_attrs == NULL)
298 tpnt->sdev_attrs = NCR_700_dev_attrs;
300 memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
301 &pScript, GFP_KERNEL);
302 if(memory == NULL) {
303 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
304 return NULL;
307 script = (__u32 *)memory;
308 hostdata->msgin = memory + MSGIN_OFFSET;
309 hostdata->msgout = memory + MSGOUT_OFFSET;
310 hostdata->status = memory + STATUS_OFFSET;
311 hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
312 hostdata->dev = dev;
314 pSlots = pScript + SLOTS_OFFSET;
316 /* Fill in the missing routines from the host template */
317 tpnt->queuecommand = NCR_700_queuecommand;
318 tpnt->eh_abort_handler = NCR_700_abort;
319 tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
320 tpnt->eh_host_reset_handler = NCR_700_host_reset;
321 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
322 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
323 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
324 tpnt->use_clustering = ENABLE_CLUSTERING;
325 tpnt->slave_configure = NCR_700_slave_configure;
326 tpnt->slave_destroy = NCR_700_slave_destroy;
327 tpnt->slave_alloc = NCR_700_slave_alloc;
328 tpnt->change_queue_depth = NCR_700_change_queue_depth;
329 tpnt->change_queue_type = NCR_700_change_queue_type;
331 if(tpnt->name == NULL)
332 tpnt->name = "53c700";
333 if(tpnt->proc_name == NULL)
334 tpnt->proc_name = "53c700";
336 host = scsi_host_alloc(tpnt, 4);
337 if (!host)
338 return NULL;
339 memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
340 * NCR_700_COMMAND_SLOTS_PER_HOST);
341 for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
342 dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
343 - (unsigned long)&hostdata->slots[0].SG[0]);
344 hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
345 if(j == 0)
346 hostdata->free_list = &hostdata->slots[j];
347 else
348 hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
349 hostdata->slots[j].state = NCR_700_SLOT_FREE;
352 for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
353 script[j] = bS_to_host(SCRIPT[j]);
355 /* adjust all labels to be bus physical */
356 for (j = 0; j < PATCHES; j++)
357 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
358 /* now patch up fixed addresses. */
359 script_patch_32(hostdata->dev, script, MessageLocation,
360 pScript + MSGOUT_OFFSET);
361 script_patch_32(hostdata->dev, script, StatusAddress,
362 pScript + STATUS_OFFSET);
363 script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
364 pScript + MSGIN_OFFSET);
366 hostdata->script = script;
367 hostdata->pScript = pScript;
368 dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
369 hostdata->state = NCR_700_HOST_FREE;
370 hostdata->cmd = NULL;
371 host->max_id = 8;
372 host->max_lun = NCR_700_MAX_LUNS;
373 BUG_ON(NCR_700_transport_template == NULL);
374 host->transportt = NCR_700_transport_template;
375 host->unique_id = (unsigned long)hostdata->base;
376 hostdata->eh_complete = NULL;
377 host->hostdata[0] = (unsigned long)hostdata;
378 /* kick the chip */
379 NCR_700_writeb(0xff, host, CTEST9_REG);
380 if (hostdata->chip710)
381 hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
382 else
383 hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
384 hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
385 if (banner == 0) {
386 printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
387 banner = 1;
389 printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
390 hostdata->chip710 ? "53c710" :
391 (hostdata->fast ? "53c700-66" : "53c700"),
392 hostdata->rev, hostdata->differential ?
393 "(Differential)" : "");
394 /* reset the chip */
395 NCR_700_chip_reset(host);
397 if (scsi_add_host(host, dev)) {
398 dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
399 scsi_host_put(host);
400 return NULL;
403 spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
404 SPI_SIGNAL_SE;
406 return host;
410 NCR_700_release(struct Scsi_Host *host)
412 struct NCR_700_Host_Parameters *hostdata =
413 (struct NCR_700_Host_Parameters *)host->hostdata[0];
415 dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
416 hostdata->script, hostdata->pScript);
417 return 1;
420 static inline __u8
421 NCR_700_identify(int can_disconnect, __u8 lun)
423 return IDENTIFY_BASE |
424 ((can_disconnect) ? 0x40 : 0) |
425 (lun & NCR_700_LUN_MASK);
429 * Function : static int data_residual (Scsi_Host *host)
431 * Purpose : return residual data count of what's in the chip. If you
432 * really want to know what this function is doing, it's almost a
433 * direct transcription of the algorithm described in the 53c710
434 * guide, except that the DBC and DFIFO registers are only 6 bits
435 * wide on a 53c700.
437 * Inputs : host - SCSI host */
438 static inline int
439 NCR_700_data_residual (struct Scsi_Host *host) {
440 struct NCR_700_Host_Parameters *hostdata =
441 (struct NCR_700_Host_Parameters *)host->hostdata[0];
442 int count, synchronous = 0;
443 unsigned int ddir;
445 if(hostdata->chip710) {
446 count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
447 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
448 } else {
449 count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
450 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
453 if(hostdata->fast)
454 synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
456 /* get the data direction */
457 ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
459 if (ddir) {
460 /* Receive */
461 if (synchronous)
462 count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
463 else
464 if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
465 ++count;
466 } else {
467 /* Send */
468 __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
469 if (sstat & SODL_REG_FULL)
470 ++count;
471 if (synchronous && (sstat & SODR_REG_FULL))
472 ++count;
474 #ifdef NCR_700_DEBUG
475 if(count)
476 printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
477 #endif
478 return count;
481 /* print out the SCSI wires and corresponding phase from the SBCL register
482 * in the chip */
483 static inline char *
484 sbcl_to_string(__u8 sbcl)
486 int i;
487 static char ret[256];
489 ret[0]='\0';
490 for(i=0; i<8; i++) {
491 if((1<<i) & sbcl)
492 strcat(ret, NCR_700_SBCL_bits[i]);
494 strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
495 return ret;
498 static inline __u8
499 bitmap_to_number(__u8 bitmap)
501 __u8 i;
503 for(i=0; i<8 && !(bitmap &(1<<i)); i++)
505 return i;
508 /* Pull a slot off the free list */
509 STATIC struct NCR_700_command_slot *
510 find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
512 struct NCR_700_command_slot *slot = hostdata->free_list;
514 if(slot == NULL) {
515 /* sanity check */
516 if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
517 printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
518 return NULL;
521 if(slot->state != NCR_700_SLOT_FREE)
522 /* should panic! */
523 printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
526 hostdata->free_list = slot->ITL_forw;
527 slot->ITL_forw = NULL;
530 /* NOTE: set the state to busy here, not queued, since this
531 * indicates the slot is in use and cannot be run by the IRQ
532 * finish routine. If we cannot queue the command when it
533 * is properly build, we then change to NCR_700_SLOT_QUEUED */
534 slot->state = NCR_700_SLOT_BUSY;
535 slot->flags = 0;
536 hostdata->command_slot_count++;
538 return slot;
541 STATIC void
542 free_slot(struct NCR_700_command_slot *slot,
543 struct NCR_700_Host_Parameters *hostdata)
545 if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
546 printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
548 if(slot->state == NCR_700_SLOT_FREE) {
549 printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
552 slot->resume_offset = 0;
553 slot->cmnd = NULL;
554 slot->state = NCR_700_SLOT_FREE;
555 slot->ITL_forw = hostdata->free_list;
556 hostdata->free_list = slot;
557 hostdata->command_slot_count--;
561 /* This routine really does very little. The command is indexed on
562 the ITL and (if tagged) the ITLQ lists in _queuecommand */
563 STATIC void
564 save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
565 struct scsi_cmnd *SCp, __u32 dsp)
567 /* Its just possible that this gets executed twice */
568 if(SCp != NULL) {
569 struct NCR_700_command_slot *slot =
570 (struct NCR_700_command_slot *)SCp->host_scribble;
572 slot->resume_offset = dsp;
574 hostdata->state = NCR_700_HOST_FREE;
575 hostdata->cmd = NULL;
578 STATIC inline void
579 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
580 struct NCR_700_command_slot *slot)
582 if(SCp->sc_data_direction != DMA_NONE &&
583 SCp->sc_data_direction != DMA_BIDIRECTIONAL)
584 scsi_dma_unmap(SCp);
587 STATIC inline void
588 NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
589 struct scsi_cmnd *SCp, int result)
591 hostdata->state = NCR_700_HOST_FREE;
592 hostdata->cmd = NULL;
594 if(SCp != NULL) {
595 struct NCR_700_command_slot *slot =
596 (struct NCR_700_command_slot *)SCp->host_scribble;
598 dma_unmap_single(hostdata->dev, slot->pCmd,
599 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
600 if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
601 char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
603 dma_unmap_single(hostdata->dev, slot->dma_handle,
604 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
605 /* restore the old result if the request sense was
606 * successful */
607 if (result == 0)
608 result = cmnd[7];
609 /* restore the original length */
610 SCp->cmd_len = cmnd[8];
611 } else
612 NCR_700_unmap(hostdata, SCp, slot);
614 free_slot(slot, hostdata);
615 #ifdef NCR_700_DEBUG
616 if(NCR_700_get_depth(SCp->device) == 0 ||
617 NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
618 printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
619 NCR_700_get_depth(SCp->device));
620 #endif /* NCR_700_DEBUG */
621 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
623 SCp->host_scribble = NULL;
624 SCp->result = result;
625 SCp->scsi_done(SCp);
626 } else {
627 printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
632 STATIC void
633 NCR_700_internal_bus_reset(struct Scsi_Host *host)
635 /* Bus reset */
636 NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
637 udelay(50);
638 NCR_700_writeb(0, host, SCNTL1_REG);
642 STATIC void
643 NCR_700_chip_setup(struct Scsi_Host *host)
645 struct NCR_700_Host_Parameters *hostdata =
646 (struct NCR_700_Host_Parameters *)host->hostdata[0];
647 __u8 min_period;
648 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
650 if(hostdata->chip710) {
651 __u8 burst_disable = 0;
652 __u8 burst_length = 0;
654 switch (hostdata->burst_length) {
655 case 1:
656 burst_length = BURST_LENGTH_1;
657 break;
658 case 2:
659 burst_length = BURST_LENGTH_2;
660 break;
661 case 4:
662 burst_length = BURST_LENGTH_4;
663 break;
664 case 8:
665 burst_length = BURST_LENGTH_8;
666 break;
667 default:
668 burst_disable = BURST_DISABLE;
669 break;
671 hostdata->dcntl_extra |= COMPAT_700_MODE;
673 NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
674 NCR_700_writeb(burst_length | hostdata->dmode_extra,
675 host, DMODE_710_REG);
676 NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
677 (hostdata->differential ? DIFF : 0),
678 host, CTEST7_REG);
679 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
680 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
681 | AUTO_ATN, host, SCNTL0_REG);
682 } else {
683 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
684 host, DMODE_700_REG);
685 NCR_700_writeb(hostdata->differential ?
686 DIFF : 0, host, CTEST7_REG);
687 if(hostdata->fast) {
688 /* this is for 700-66, does nothing on 700 */
689 NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
690 | GENERATE_RECEIVE_PARITY, host,
691 CTEST8_REG);
692 } else {
693 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
694 | PARITY | AUTO_ATN, host, SCNTL0_REG);
698 NCR_700_writeb(1 << host->this_id, host, SCID_REG);
699 NCR_700_writeb(0, host, SBCL_REG);
700 NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
702 NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
703 | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
705 NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
706 NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
707 if(hostdata->clock > 75) {
708 printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
709 /* do the best we can, but the async clock will be out
710 * of spec: sync divider 2, async divider 3 */
711 DEBUG(("53c700: sync 2 async 3\n"));
712 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
713 NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
714 hostdata->sync_clock = hostdata->clock/2;
715 } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
716 /* sync divider 1.5, async divider 3 */
717 DEBUG(("53c700: sync 1.5 async 3\n"));
718 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
719 NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
720 hostdata->sync_clock = hostdata->clock*2;
721 hostdata->sync_clock /= 3;
723 } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
724 /* sync divider 1, async divider 2 */
725 DEBUG(("53c700: sync 1 async 2\n"));
726 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
727 NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
728 hostdata->sync_clock = hostdata->clock;
729 } else if(hostdata->clock > 25 && hostdata->clock <=37) {
730 /* sync divider 1, async divider 1.5 */
731 DEBUG(("53c700: sync 1 async 1.5\n"));
732 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
733 NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
734 hostdata->sync_clock = hostdata->clock;
735 } else {
736 DEBUG(("53c700: sync 1 async 1\n"));
737 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
738 NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
739 /* sync divider 1, async divider 1 */
740 hostdata->sync_clock = hostdata->clock;
742 /* Calculate the actual minimum period that can be supported
743 * by our synchronous clock speed. See the 710 manual for
744 * exact details of this calculation which is based on a
745 * setting of the SXFER register */
746 min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
747 hostdata->min_period = NCR_700_MIN_PERIOD;
748 if(min_period > NCR_700_MIN_PERIOD)
749 hostdata->min_period = min_period;
752 STATIC void
753 NCR_700_chip_reset(struct Scsi_Host *host)
755 struct NCR_700_Host_Parameters *hostdata =
756 (struct NCR_700_Host_Parameters *)host->hostdata[0];
757 if(hostdata->chip710) {
758 NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
759 udelay(100);
761 NCR_700_writeb(0, host, ISTAT_REG);
762 } else {
763 NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
764 udelay(100);
766 NCR_700_writeb(0, host, DCNTL_REG);
769 mdelay(1000);
771 NCR_700_chip_setup(host);
774 /* The heart of the message processing engine is that the instruction
775 * immediately after the INT is the normal case (and so must be CLEAR
776 * ACK). If we want to do something else, we call that routine in
777 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
778 * ACK) so that the routine returns correctly to resume its activity
779 * */
780 STATIC __u32
781 process_extended_message(struct Scsi_Host *host,
782 struct NCR_700_Host_Parameters *hostdata,
783 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
785 __u32 resume_offset = dsp, temp = dsp + 8;
786 __u8 pun = 0xff, lun = 0xff;
788 if(SCp != NULL) {
789 pun = SCp->device->id;
790 lun = SCp->device->lun;
793 switch(hostdata->msgin[2]) {
794 case A_SDTR_MSG:
795 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
796 struct scsi_target *starget = SCp->device->sdev_target;
797 __u8 period = hostdata->msgin[3];
798 __u8 offset = hostdata->msgin[4];
800 if(offset == 0 || period == 0) {
801 offset = 0;
802 period = 0;
805 spi_offset(starget) = offset;
806 spi_period(starget) = period;
808 if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
809 spi_display_xfer_agreement(starget);
810 NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
813 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
814 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
816 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
817 host, SXFER_REG);
819 } else {
820 /* SDTR message out of the blue, reject it */
821 shost_printk(KERN_WARNING, host,
822 "Unexpected SDTR msg\n");
823 hostdata->msgout[0] = A_REJECT_MSG;
824 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
825 script_patch_16(hostdata->dev, hostdata->script,
826 MessageCount, 1);
827 /* SendMsgOut returns, so set up the return
828 * address */
829 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
831 break;
833 case A_WDTR_MSG:
834 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
835 host->host_no, pun, lun);
836 hostdata->msgout[0] = A_REJECT_MSG;
837 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
838 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
840 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
842 break;
844 default:
845 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
846 host->host_no, pun, lun,
847 NCR_700_phase[(dsps & 0xf00) >> 8]);
848 spi_print_msg(hostdata->msgin);
849 printk("\n");
850 /* just reject it */
851 hostdata->msgout[0] = A_REJECT_MSG;
852 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
853 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
855 /* SendMsgOut returns, so set up the return
856 * address */
857 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
859 NCR_700_writel(temp, host, TEMP_REG);
860 return resume_offset;
863 STATIC __u32
864 process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
865 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
867 /* work out where to return to */
868 __u32 temp = dsp + 8, resume_offset = dsp;
869 __u8 pun = 0xff, lun = 0xff;
871 if(SCp != NULL) {
872 pun = SCp->device->id;
873 lun = SCp->device->lun;
876 #ifdef NCR_700_DEBUG
877 printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
878 NCR_700_phase[(dsps & 0xf00) >> 8]);
879 spi_print_msg(hostdata->msgin);
880 printk("\n");
881 #endif
883 switch(hostdata->msgin[0]) {
885 case A_EXTENDED_MSG:
886 resume_offset = process_extended_message(host, hostdata, SCp,
887 dsp, dsps);
888 break;
890 case A_REJECT_MSG:
891 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
892 /* Rejected our sync negotiation attempt */
893 spi_period(SCp->device->sdev_target) =
894 spi_offset(SCp->device->sdev_target) = 0;
895 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
896 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
897 } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
898 /* rejected our first simple tag message */
899 scmd_printk(KERN_WARNING, SCp,
900 "Rejected first tag queue attempt, turning off tag queueing\n");
901 /* we're done negotiating */
902 NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
903 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
904 SCp->device->tagged_supported = 0;
905 scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
906 } else {
907 shost_printk(KERN_WARNING, host,
908 "(%d:%d) Unexpected REJECT Message %s\n",
909 pun, lun,
910 NCR_700_phase[(dsps & 0xf00) >> 8]);
911 /* however, just ignore it */
913 break;
915 case A_PARITY_ERROR_MSG:
916 printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
917 pun, lun);
918 NCR_700_internal_bus_reset(host);
919 break;
920 case A_SIMPLE_TAG_MSG:
921 printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
922 pun, lun, hostdata->msgin[1],
923 NCR_700_phase[(dsps & 0xf00) >> 8]);
924 /* just ignore it */
925 break;
926 default:
927 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
928 host->host_no, pun, lun,
929 NCR_700_phase[(dsps & 0xf00) >> 8]);
931 spi_print_msg(hostdata->msgin);
932 printk("\n");
933 /* just reject it */
934 hostdata->msgout[0] = A_REJECT_MSG;
935 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
936 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
938 /* SendMsgOut returns, so set up the return
939 * address */
940 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
942 break;
944 NCR_700_writel(temp, host, TEMP_REG);
945 /* set us up to receive another message */
946 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
947 return resume_offset;
950 STATIC __u32
951 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
952 struct Scsi_Host *host,
953 struct NCR_700_Host_Parameters *hostdata)
955 __u32 resume_offset = 0;
956 __u8 pun = 0xff, lun=0xff;
958 if(SCp != NULL) {
959 pun = SCp->device->id;
960 lun = SCp->device->lun;
963 if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
964 DEBUG((" COMMAND COMPLETE, status=%02x\n",
965 hostdata->status[0]));
966 /* OK, if TCQ still under negotiation, we now know it works */
967 if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
968 NCR_700_set_tag_neg_state(SCp->device,
969 NCR_700_FINISHED_TAG_NEGOTIATION);
971 /* check for contingent allegiance contitions */
972 if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
973 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
974 struct NCR_700_command_slot *slot =
975 (struct NCR_700_command_slot *)SCp->host_scribble;
976 if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
977 /* OOPS: bad device, returning another
978 * contingent allegiance condition */
979 scmd_printk(KERN_ERR, SCp,
980 "broken device is looping in contingent allegiance: ignoring\n");
981 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
982 } else {
983 char *cmnd =
984 NCR_700_get_sense_cmnd(SCp->device);
985 #ifdef NCR_DEBUG
986 scsi_print_command(SCp);
987 printk(" cmd %p has status %d, requesting sense\n",
988 SCp, hostdata->status[0]);
989 #endif
990 /* we can destroy the command here
991 * because the contingent allegiance
992 * condition will cause a retry which
993 * will re-copy the command from the
994 * saved data_cmnd. We also unmap any
995 * data associated with the command
996 * here */
997 NCR_700_unmap(hostdata, SCp, slot);
998 dma_unmap_single(hostdata->dev, slot->pCmd,
999 MAX_COMMAND_SIZE,
1000 DMA_TO_DEVICE);
1002 cmnd[0] = REQUEST_SENSE;
1003 cmnd[1] = (lun & 0x7) << 5;
1004 cmnd[2] = 0;
1005 cmnd[3] = 0;
1006 cmnd[4] = SCSI_SENSE_BUFFERSIZE;
1007 cmnd[5] = 0;
1008 /* Here's a quiet hack: the
1009 * REQUEST_SENSE command is six bytes,
1010 * so store a flag indicating that
1011 * this was an internal sense request
1012 * and the original status at the end
1013 * of the command */
1014 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1015 cmnd[7] = hostdata->status[0];
1016 cmnd[8] = SCp->cmd_len;
1017 SCp->cmd_len = 6; /* command length for
1018 * REQUEST_SENSE */
1019 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1020 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1021 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
1022 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1023 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1024 slot->SG[1].pAddr = 0;
1025 slot->resume_offset = hostdata->pScript;
1026 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1027 dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1029 /* queue the command for reissue */
1030 slot->state = NCR_700_SLOT_QUEUED;
1031 slot->flags = NCR_700_FLAG_AUTOSENSE;
1032 hostdata->state = NCR_700_HOST_FREE;
1033 hostdata->cmd = NULL;
1035 } else {
1036 // Currently rely on the mid layer evaluation
1037 // of the tag queuing capability
1039 //if(status_byte(hostdata->status[0]) == GOOD &&
1040 // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1041 // /* Piggy back the tag queueing support
1042 // * on this command */
1043 // dma_sync_single_for_cpu(hostdata->dev,
1044 // slot->dma_handle,
1045 // SCp->request_bufflen,
1046 // DMA_FROM_DEVICE);
1047 // if(((char *)SCp->request_buffer)[7] & 0x02) {
1048 // scmd_printk(KERN_INFO, SCp,
1049 // "Enabling Tag Command Queuing\n");
1050 // hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1051 // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1052 // } else {
1053 // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1054 // hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1055 // }
1057 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1059 } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1060 __u8 i = (dsps & 0xf00) >> 8;
1062 scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1063 NCR_700_phase[i],
1064 sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1065 scmd_printk(KERN_ERR, SCp, " len = %d, cmd =",
1066 SCp->cmd_len);
1067 scsi_print_command(SCp);
1069 NCR_700_internal_bus_reset(host);
1070 } else if((dsps & 0xfffff000) == A_FATAL) {
1071 int i = (dsps & 0xfff);
1073 printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1074 host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1075 if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1076 printk(KERN_ERR " msg begins %02x %02x\n",
1077 hostdata->msgin[0], hostdata->msgin[1]);
1079 NCR_700_internal_bus_reset(host);
1080 } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1081 #ifdef NCR_700_DEBUG
1082 __u8 i = (dsps & 0xf00) >> 8;
1084 printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1085 host->host_no, pun, lun,
1086 i, NCR_700_phase[i]);
1087 #endif
1088 save_for_reselection(hostdata, SCp, dsp);
1090 } else if(dsps == A_RESELECTION_IDENTIFIED) {
1091 __u8 lun;
1092 struct NCR_700_command_slot *slot;
1093 __u8 reselection_id = hostdata->reselection_id;
1094 struct scsi_device *SDp;
1096 lun = hostdata->msgin[0] & 0x1f;
1098 hostdata->reselection_id = 0xff;
1099 DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1100 host->host_no, reselection_id, lun));
1101 /* clear the reselection indicator */
1102 SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1103 if(unlikely(SDp == NULL)) {
1104 printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1105 host->host_no, reselection_id, lun);
1106 BUG();
1108 if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1109 struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1110 if(unlikely(SCp == NULL)) {
1111 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1112 host->host_no, reselection_id, lun, hostdata->msgin[2]);
1113 BUG();
1116 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1117 DDEBUG(KERN_DEBUG, SDp,
1118 "reselection is tag %d, slot %p(%d)\n",
1119 hostdata->msgin[2], slot, slot->tag);
1120 } else {
1121 struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1122 if(unlikely(SCp == NULL)) {
1123 sdev_printk(KERN_ERR, SDp,
1124 "no saved request for untagged cmd\n");
1125 BUG();
1127 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1130 if(slot == NULL) {
1131 printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1132 host->host_no, reselection_id, lun,
1133 hostdata->msgin[0], hostdata->msgin[1],
1134 hostdata->msgin[2]);
1135 } else {
1136 if(hostdata->state != NCR_700_HOST_BUSY)
1137 printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1138 host->host_no);
1139 resume_offset = slot->resume_offset;
1140 hostdata->cmd = slot->cmnd;
1142 /* re-patch for this command */
1143 script_patch_32_abs(hostdata->dev, hostdata->script,
1144 CommandAddress, slot->pCmd);
1145 script_patch_16(hostdata->dev, hostdata->script,
1146 CommandCount, slot->cmnd->cmd_len);
1147 script_patch_32_abs(hostdata->dev, hostdata->script,
1148 SGScriptStartAddress,
1149 to32bit(&slot->pSG[0].ins));
1151 /* Note: setting SXFER only works if we're
1152 * still in the MESSAGE phase, so it is vital
1153 * that ACK is still asserted when we process
1154 * the reselection message. The resume offset
1155 * should therefore always clear ACK */
1156 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1157 host, SXFER_REG);
1158 dma_cache_sync(hostdata->dev, hostdata->msgin,
1159 MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1160 dma_cache_sync(hostdata->dev, hostdata->msgout,
1161 MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1162 /* I'm just being paranoid here, the command should
1163 * already have been flushed from the cache */
1164 dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
1165 slot->cmnd->cmd_len, DMA_TO_DEVICE);
1170 } else if(dsps == A_RESELECTED_DURING_SELECTION) {
1172 /* This section is full of debugging code because I've
1173 * never managed to reach it. I think what happens is
1174 * that, because the 700 runs with selection
1175 * interrupts enabled the whole time that we take a
1176 * selection interrupt before we manage to get to the
1177 * reselected script interrupt */
1179 __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1180 struct NCR_700_command_slot *slot;
1182 /* Take out our own ID */
1183 reselection_id &= ~(1<<host->this_id);
1185 /* I've never seen this happen, so keep this as a printk rather
1186 * than a debug */
1187 printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1188 host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1191 /* FIXME: DEBUGGING CODE */
1192 __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1193 int i;
1195 for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1196 if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1197 && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1198 break;
1200 printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1201 SCp = hostdata->slots[i].cmnd;
1204 if(SCp != NULL) {
1205 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1206 /* change slot from busy to queued to redo command */
1207 slot->state = NCR_700_SLOT_QUEUED;
1209 hostdata->cmd = NULL;
1211 if(reselection_id == 0) {
1212 if(hostdata->reselection_id == 0xff) {
1213 printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1214 return 0;
1215 } else {
1216 printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1217 host->host_no);
1218 reselection_id = hostdata->reselection_id;
1220 } else {
1222 /* convert to real ID */
1223 reselection_id = bitmap_to_number(reselection_id);
1225 hostdata->reselection_id = reselection_id;
1226 /* just in case we have a stale simple tag message, clear it */
1227 hostdata->msgin[1] = 0;
1228 dma_cache_sync(hostdata->dev, hostdata->msgin,
1229 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1230 if(hostdata->tag_negotiated & (1<<reselection_id)) {
1231 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1232 } else {
1233 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1235 } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1236 /* we've just disconnected from the bus, do nothing since
1237 * a return here will re-run the queued command slot
1238 * that may have been interrupted by the initial selection */
1239 DEBUG((" SELECTION COMPLETED\n"));
1240 } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1241 resume_offset = process_message(host, hostdata, SCp,
1242 dsp, dsps);
1243 } else if((dsps & 0xfffff000) == 0) {
1244 __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1245 printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1246 host->host_no, pun, lun, NCR_700_condition[i],
1247 NCR_700_phase[j], dsp - hostdata->pScript);
1248 if(SCp != NULL) {
1249 struct scatterlist *sg;
1251 scsi_print_command(SCp);
1252 scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1253 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1256 NCR_700_internal_bus_reset(host);
1257 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1258 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1259 host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1260 resume_offset = dsp;
1261 } else {
1262 printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1263 host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1264 NCR_700_internal_bus_reset(host);
1266 return resume_offset;
1269 /* We run the 53c700 with selection interrupts always enabled. This
1270 * means that the chip may be selected as soon as the bus frees. On a
1271 * busy bus, this can be before the scripts engine finishes its
1272 * processing. Therefore, part of the selection processing has to be
1273 * to find out what the scripts engine is doing and complete the
1274 * function if necessary (i.e. process the pending disconnect or save
1275 * the interrupted initial selection */
1276 STATIC inline __u32
1277 process_selection(struct Scsi_Host *host, __u32 dsp)
1279 __u8 id = 0; /* Squash compiler warning */
1280 int count = 0;
1281 __u32 resume_offset = 0;
1282 struct NCR_700_Host_Parameters *hostdata =
1283 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1284 struct scsi_cmnd *SCp = hostdata->cmd;
1285 __u8 sbcl;
1287 for(count = 0; count < 5; count++) {
1288 id = NCR_700_readb(host, hostdata->chip710 ?
1289 CTEST9_REG : SFBR_REG);
1291 /* Take out our own ID */
1292 id &= ~(1<<host->this_id);
1293 if(id != 0)
1294 break;
1295 udelay(5);
1297 sbcl = NCR_700_readb(host, SBCL_REG);
1298 if((sbcl & SBCL_IO) == 0) {
1299 /* mark as having been selected rather than reselected */
1300 id = 0xff;
1301 } else {
1302 /* convert to real ID */
1303 hostdata->reselection_id = id = bitmap_to_number(id);
1304 DEBUG(("scsi%d: Reselected by %d\n",
1305 host->host_no, id));
1307 if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1308 struct NCR_700_command_slot *slot =
1309 (struct NCR_700_command_slot *)SCp->host_scribble;
1310 DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1312 switch(dsp - hostdata->pScript) {
1313 case Ent_Disconnect1:
1314 case Ent_Disconnect2:
1315 save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1316 break;
1317 case Ent_Disconnect3:
1318 case Ent_Disconnect4:
1319 save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1320 break;
1321 case Ent_Disconnect5:
1322 case Ent_Disconnect6:
1323 save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1324 break;
1325 case Ent_Disconnect7:
1326 case Ent_Disconnect8:
1327 save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1328 break;
1329 case Ent_Finish1:
1330 case Ent_Finish2:
1331 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1332 break;
1334 default:
1335 slot->state = NCR_700_SLOT_QUEUED;
1336 break;
1339 hostdata->state = NCR_700_HOST_BUSY;
1340 hostdata->cmd = NULL;
1341 /* clear any stale simple tag message */
1342 hostdata->msgin[1] = 0;
1343 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1344 DMA_BIDIRECTIONAL);
1346 if(id == 0xff) {
1347 /* Selected as target, Ignore */
1348 resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1349 } else if(hostdata->tag_negotiated & (1<<id)) {
1350 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1351 } else {
1352 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1354 return resume_offset;
1357 static inline void
1358 NCR_700_clear_fifo(struct Scsi_Host *host) {
1359 const struct NCR_700_Host_Parameters *hostdata
1360 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1361 if(hostdata->chip710) {
1362 NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1363 } else {
1364 NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1368 static inline void
1369 NCR_700_flush_fifo(struct Scsi_Host *host) {
1370 const struct NCR_700_Host_Parameters *hostdata
1371 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1372 if(hostdata->chip710) {
1373 NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1374 udelay(10);
1375 NCR_700_writeb(0, host, CTEST8_REG);
1376 } else {
1377 NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1378 udelay(10);
1379 NCR_700_writeb(0, host, DFIFO_REG);
1384 /* The queue lock with interrupts disabled must be held on entry to
1385 * this function */
1386 STATIC int
1387 NCR_700_start_command(struct scsi_cmnd *SCp)
1389 struct NCR_700_command_slot *slot =
1390 (struct NCR_700_command_slot *)SCp->host_scribble;
1391 struct NCR_700_Host_Parameters *hostdata =
1392 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1393 __u16 count = 1; /* for IDENTIFY message */
1394 u8 lun = SCp->device->lun;
1396 if(hostdata->state != NCR_700_HOST_FREE) {
1397 /* keep this inside the lock to close the race window where
1398 * the running command finishes on another CPU while we don't
1399 * change the state to queued on this one */
1400 slot->state = NCR_700_SLOT_QUEUED;
1402 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1403 SCp->device->host->host_no, slot->cmnd, slot));
1404 return 0;
1406 hostdata->state = NCR_700_HOST_BUSY;
1407 hostdata->cmd = SCp;
1408 slot->state = NCR_700_SLOT_BUSY;
1409 /* keep interrupts disabled until we have the command correctly
1410 * set up so we cannot take a selection interrupt */
1412 hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1413 slot->flags != NCR_700_FLAG_AUTOSENSE),
1414 lun);
1415 /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1416 * if the negotiated transfer parameters still hold, so
1417 * always renegotiate them */
1418 if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1419 slot->flags == NCR_700_FLAG_AUTOSENSE) {
1420 NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1423 /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1424 * If a contingent allegiance condition exists, the device
1425 * will refuse all tags, so send the request sense as untagged
1426 * */
1427 if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1428 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1429 slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1430 count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1433 if(hostdata->fast &&
1434 NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1435 count += spi_populate_sync_msg(&hostdata->msgout[count],
1436 spi_period(SCp->device->sdev_target),
1437 spi_offset(SCp->device->sdev_target));
1438 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1441 script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
1444 script_patch_ID(hostdata->dev, hostdata->script,
1445 Device_ID, 1<<scmd_id(SCp));
1447 script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
1448 slot->pCmd);
1449 script_patch_16(hostdata->dev, hostdata->script, CommandCount,
1450 SCp->cmd_len);
1451 /* finally plumb the beginning of the SG list into the script
1452 * */
1453 script_patch_32_abs(hostdata->dev, hostdata->script,
1454 SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1455 NCR_700_clear_fifo(SCp->device->host);
1457 if(slot->resume_offset == 0)
1458 slot->resume_offset = hostdata->pScript;
1459 /* now perform all the writebacks and invalidates */
1460 dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
1461 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1462 DMA_FROM_DEVICE);
1463 dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1464 dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
1466 /* set the synchronous period/offset */
1467 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1468 SCp->device->host, SXFER_REG);
1469 NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1470 NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1472 return 1;
1475 irqreturn_t
1476 NCR_700_intr(int irq, void *dev_id)
1478 struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1479 struct NCR_700_Host_Parameters *hostdata =
1480 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1481 __u8 istat;
1482 __u32 resume_offset = 0;
1483 __u8 pun = 0xff, lun = 0xff;
1484 unsigned long flags;
1485 int handled = 0;
1487 /* Use the host lock to serialise access to the 53c700
1488 * hardware. Note: In future, we may need to take the queue
1489 * lock to enter the done routines. When that happens, we
1490 * need to ensure that for this driver, the host lock and the
1491 * queue lock point to the same thing. */
1492 spin_lock_irqsave(host->host_lock, flags);
1493 if((istat = NCR_700_readb(host, ISTAT_REG))
1494 & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1495 __u32 dsps;
1496 __u8 sstat0 = 0, dstat = 0;
1497 __u32 dsp;
1498 struct scsi_cmnd *SCp = hostdata->cmd;
1499 enum NCR_700_Host_State state;
1501 handled = 1;
1502 state = hostdata->state;
1503 SCp = hostdata->cmd;
1505 if(istat & SCSI_INT_PENDING) {
1506 udelay(10);
1508 sstat0 = NCR_700_readb(host, SSTAT0_REG);
1511 if(istat & DMA_INT_PENDING) {
1512 udelay(10);
1514 dstat = NCR_700_readb(host, DSTAT_REG);
1517 dsps = NCR_700_readl(host, DSPS_REG);
1518 dsp = NCR_700_readl(host, DSP_REG);
1520 DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1521 host->host_no, istat, sstat0, dstat,
1522 (dsp - (__u32)(hostdata->pScript))/4,
1523 dsp, dsps));
1525 if(SCp != NULL) {
1526 pun = SCp->device->id;
1527 lun = SCp->device->lun;
1530 if(sstat0 & SCSI_RESET_DETECTED) {
1531 struct scsi_device *SDp;
1532 int i;
1534 hostdata->state = NCR_700_HOST_BUSY;
1536 printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1537 host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1539 scsi_report_bus_reset(host, 0);
1541 /* clear all the negotiated parameters */
1542 __shost_for_each_device(SDp, host)
1543 NCR_700_clear_flag(SDp, ~0);
1545 /* clear all the slots and their pending commands */
1546 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1547 struct scsi_cmnd *SCp;
1548 struct NCR_700_command_slot *slot =
1549 &hostdata->slots[i];
1551 if(slot->state == NCR_700_SLOT_FREE)
1552 continue;
1554 SCp = slot->cmnd;
1555 printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1556 slot, SCp);
1557 free_slot(slot, hostdata);
1558 SCp->host_scribble = NULL;
1559 NCR_700_set_depth(SCp->device, 0);
1560 /* NOTE: deadlock potential here: we
1561 * rely on mid-layer guarantees that
1562 * scsi_done won't try to issue the
1563 * command again otherwise we'll
1564 * deadlock on the
1565 * hostdata->state_lock */
1566 SCp->result = DID_RESET << 16;
1567 SCp->scsi_done(SCp);
1569 mdelay(25);
1570 NCR_700_chip_setup(host);
1572 hostdata->state = NCR_700_HOST_FREE;
1573 hostdata->cmd = NULL;
1574 /* signal back if this was an eh induced reset */
1575 if(hostdata->eh_complete != NULL)
1576 complete(hostdata->eh_complete);
1577 goto out_unlock;
1578 } else if(sstat0 & SELECTION_TIMEOUT) {
1579 DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1580 host->host_no, pun, lun));
1581 NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1582 } else if(sstat0 & PHASE_MISMATCH) {
1583 struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1584 (struct NCR_700_command_slot *)SCp->host_scribble;
1586 if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1587 /* It wants to reply to some part of
1588 * our message */
1589 #ifdef NCR_700_DEBUG
1590 __u32 temp = NCR_700_readl(host, TEMP_REG);
1591 int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1592 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1593 #endif
1594 resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1595 } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1596 dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1597 int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1598 int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1599 int residual = NCR_700_data_residual(host);
1600 int i;
1601 #ifdef NCR_700_DEBUG
1602 __u32 naddr = NCR_700_readl(host, DNAD_REG);
1604 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1605 host->host_no, pun, lun,
1606 SGcount, data_transfer);
1607 scsi_print_command(SCp);
1608 if(residual) {
1609 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1610 host->host_no, pun, lun,
1611 SGcount, data_transfer, residual);
1613 #endif
1614 data_transfer += residual;
1616 if(data_transfer != 0) {
1617 int count;
1618 __u32 pAddr;
1620 SGcount--;
1622 count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1623 DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1624 slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1625 slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1626 pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1627 pAddr += (count - data_transfer);
1628 #ifdef NCR_700_DEBUG
1629 if(pAddr != naddr) {
1630 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1632 #endif
1633 slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1635 /* set the executed moves to nops */
1636 for(i=0; i<SGcount; i++) {
1637 slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1638 slot->SG[i].pAddr = 0;
1640 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1641 /* and pretend we disconnected after
1642 * the command phase */
1643 resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1644 /* make sure all the data is flushed */
1645 NCR_700_flush_fifo(host);
1646 } else {
1647 __u8 sbcl = NCR_700_readb(host, SBCL_REG);
1648 printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1649 host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1650 NCR_700_internal_bus_reset(host);
1653 } else if(sstat0 & SCSI_GROSS_ERROR) {
1654 printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1655 host->host_no, pun, lun);
1656 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1657 } else if(sstat0 & PARITY_ERROR) {
1658 printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1659 host->host_no, pun, lun);
1660 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1661 } else if(dstat & SCRIPT_INT_RECEIVED) {
1662 DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1663 host->host_no, pun, lun));
1664 resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1665 } else if(dstat & (ILGL_INST_DETECTED)) {
1666 printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1667 " Please email James.Bottomley@HansenPartnership.com with the details\n",
1668 host->host_no, pun, lun,
1669 dsp, dsp - hostdata->pScript);
1670 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1671 } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1672 printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1673 host->host_no, pun, lun, dstat);
1674 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1678 /* NOTE: selection interrupt processing MUST occur
1679 * after script interrupt processing to correctly cope
1680 * with the case where we process a disconnect and
1681 * then get reselected before we process the
1682 * disconnection */
1683 if(sstat0 & SELECTED) {
1684 /* FIXME: It currently takes at least FOUR
1685 * interrupts to complete a command that
1686 * disconnects: one for the disconnect, one
1687 * for the reselection, one to get the
1688 * reselection data and one to complete the
1689 * command. If we guess the reselected
1690 * command here and prepare it, we only need
1691 * to get a reselection data interrupt if we
1692 * guessed wrongly. Since the interrupt
1693 * overhead is much greater than the command
1694 * setup, this would be an efficient
1695 * optimisation particularly as we probably
1696 * only have one outstanding command on a
1697 * target most of the time */
1699 resume_offset = process_selection(host, dsp);
1705 if(resume_offset) {
1706 if(hostdata->state != NCR_700_HOST_BUSY) {
1707 printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1708 host->host_no, resume_offset, resume_offset - hostdata->pScript);
1709 hostdata->state = NCR_700_HOST_BUSY;
1712 DEBUG(("Attempting to resume at %x\n", resume_offset));
1713 NCR_700_clear_fifo(host);
1714 NCR_700_writel(resume_offset, host, DSP_REG);
1716 /* There is probably a technical no-no about this: If we're a
1717 * shared interrupt and we got this interrupt because the
1718 * other device needs servicing not us, we're still going to
1719 * check our queued commands here---of course, there shouldn't
1720 * be any outstanding.... */
1721 if(hostdata->state == NCR_700_HOST_FREE) {
1722 int i;
1724 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1725 /* fairness: always run the queue from the last
1726 * position we left off */
1727 int j = (i + hostdata->saved_slot_position)
1728 % NCR_700_COMMAND_SLOTS_PER_HOST;
1730 if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1731 continue;
1732 if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1733 DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1734 host->host_no, &hostdata->slots[j],
1735 hostdata->slots[j].cmnd));
1736 hostdata->saved_slot_position = j + 1;
1739 break;
1742 out_unlock:
1743 spin_unlock_irqrestore(host->host_lock, flags);
1744 return IRQ_RETVAL(handled);
1747 static int
1748 NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1750 struct NCR_700_Host_Parameters *hostdata =
1751 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1752 __u32 move_ins;
1753 enum dma_data_direction direction;
1754 struct NCR_700_command_slot *slot;
1756 if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1757 /* We're over our allocation, this should never happen
1758 * since we report the max allocation to the mid layer */
1759 printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1760 return 1;
1762 /* check for untagged commands. We cannot have any outstanding
1763 * commands if we accept them. Commands could be untagged because:
1765 * - The tag negotiated bitmap is clear
1766 * - The blk layer sent and untagged command
1768 if(NCR_700_get_depth(SCp->device) != 0
1769 && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1770 || !blk_rq_tagged(SCp->request))) {
1771 CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1772 NCR_700_get_depth(SCp->device));
1773 return SCSI_MLQUEUE_DEVICE_BUSY;
1775 if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1776 CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1777 NCR_700_get_depth(SCp->device));
1778 return SCSI_MLQUEUE_DEVICE_BUSY;
1780 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1782 /* begin the command here */
1783 /* no need to check for NULL, test for command_slot_count above
1784 * ensures a slot is free */
1785 slot = find_empty_slot(hostdata);
1787 slot->cmnd = SCp;
1789 SCp->scsi_done = done;
1790 SCp->host_scribble = (unsigned char *)slot;
1791 SCp->SCp.ptr = NULL;
1792 SCp->SCp.buffer = NULL;
1794 #ifdef NCR_700_DEBUG
1795 printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1796 scsi_print_command(SCp);
1797 #endif
1798 if(blk_rq_tagged(SCp->request)
1799 && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1800 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1801 scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1802 hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1803 NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1806 /* here we may have to process an untagged command. The gate
1807 * above ensures that this will be the only one outstanding,
1808 * so clear the tag negotiated bit.
1810 * FIXME: This will royally screw up on multiple LUN devices
1811 * */
1812 if(!blk_rq_tagged(SCp->request)
1813 && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1814 scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1815 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1818 if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
1819 && scsi_get_tag_type(SCp->device)) {
1820 slot->tag = SCp->request->tag;
1821 CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1822 slot->tag, slot);
1823 } else {
1824 slot->tag = SCSI_NO_TAG;
1825 /* must populate current_cmnd for scsi_find_tag to work */
1826 SCp->device->current_cmnd = SCp;
1828 /* sanity check: some of the commands generated by the mid-layer
1829 * have an eccentric idea of their sc_data_direction */
1830 if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1831 SCp->sc_data_direction != DMA_NONE) {
1832 #ifdef NCR_700_DEBUG
1833 printk("53c700: Command");
1834 scsi_print_command(SCp);
1835 printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1836 #endif
1837 SCp->sc_data_direction = DMA_NONE;
1840 switch (SCp->cmnd[0]) {
1841 case REQUEST_SENSE:
1842 /* clear the internal sense magic */
1843 SCp->cmnd[6] = 0;
1844 /* fall through */
1845 default:
1846 /* OK, get it from the command */
1847 switch(SCp->sc_data_direction) {
1848 case DMA_BIDIRECTIONAL:
1849 default:
1850 printk(KERN_ERR "53c700: Unknown command for data direction ");
1851 scsi_print_command(SCp);
1853 move_ins = 0;
1854 break;
1855 case DMA_NONE:
1856 move_ins = 0;
1857 break;
1858 case DMA_FROM_DEVICE:
1859 move_ins = SCRIPT_MOVE_DATA_IN;
1860 break;
1861 case DMA_TO_DEVICE:
1862 move_ins = SCRIPT_MOVE_DATA_OUT;
1863 break;
1867 /* now build the scatter gather list */
1868 direction = SCp->sc_data_direction;
1869 if(move_ins != 0) {
1870 int i;
1871 int sg_count;
1872 dma_addr_t vPtr = 0;
1873 struct scatterlist *sg;
1874 __u32 count = 0;
1876 sg_count = scsi_dma_map(SCp);
1877 BUG_ON(sg_count < 0);
1879 scsi_for_each_sg(SCp, sg, sg_count, i) {
1880 vPtr = sg_dma_address(sg);
1881 count = sg_dma_len(sg);
1883 slot->SG[i].ins = bS_to_host(move_ins | count);
1884 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1885 i, count, slot->SG[i].ins, (unsigned long)vPtr));
1886 slot->SG[i].pAddr = bS_to_host(vPtr);
1888 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1889 slot->SG[i].pAddr = 0;
1890 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1891 DEBUG((" SETTING %08lx to %x\n",
1892 (&slot->pSG[i].ins),
1893 slot->SG[i].ins));
1895 slot->resume_offset = 0;
1896 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1897 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1898 NCR_700_start_command(SCp);
1899 return 0;
1902 STATIC DEF_SCSI_QCMD(NCR_700_queuecommand)
1904 STATIC int
1905 NCR_700_abort(struct scsi_cmnd * SCp)
1907 struct NCR_700_command_slot *slot;
1909 scmd_printk(KERN_INFO, SCp,
1910 "New error handler wants to abort command\n\t");
1911 scsi_print_command(SCp);
1913 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1915 if(slot == NULL)
1916 /* no outstanding command to abort */
1917 return SUCCESS;
1918 if(SCp->cmnd[0] == TEST_UNIT_READY) {
1919 /* FIXME: This is because of a problem in the new
1920 * error handler. When it is in error recovery, it
1921 * will send a TUR to a device it thinks may still be
1922 * showing a problem. If the TUR isn't responded to,
1923 * it will abort it and mark the device off line.
1924 * Unfortunately, it does no other error recovery, so
1925 * this would leave us with an outstanding command
1926 * occupying a slot. Rather than allow this to
1927 * happen, we issue a bus reset to force all
1928 * outstanding commands to terminate here. */
1929 NCR_700_internal_bus_reset(SCp->device->host);
1930 /* still drop through and return failed */
1932 return FAILED;
1936 STATIC int
1937 NCR_700_bus_reset(struct scsi_cmnd * SCp)
1939 DECLARE_COMPLETION_ONSTACK(complete);
1940 struct NCR_700_Host_Parameters *hostdata =
1941 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1943 scmd_printk(KERN_INFO, SCp,
1944 "New error handler wants BUS reset, cmd %p\n\t", SCp);
1945 scsi_print_command(SCp);
1947 /* In theory, eh_complete should always be null because the
1948 * eh is single threaded, but just in case we're handling a
1949 * reset via sg or something */
1950 spin_lock_irq(SCp->device->host->host_lock);
1951 while (hostdata->eh_complete != NULL) {
1952 spin_unlock_irq(SCp->device->host->host_lock);
1953 msleep_interruptible(100);
1954 spin_lock_irq(SCp->device->host->host_lock);
1957 hostdata->eh_complete = &complete;
1958 NCR_700_internal_bus_reset(SCp->device->host);
1960 spin_unlock_irq(SCp->device->host->host_lock);
1961 wait_for_completion(&complete);
1962 spin_lock_irq(SCp->device->host->host_lock);
1964 hostdata->eh_complete = NULL;
1965 /* Revalidate the transport parameters of the failing device */
1966 if(hostdata->fast)
1967 spi_schedule_dv_device(SCp->device);
1969 spin_unlock_irq(SCp->device->host->host_lock);
1970 return SUCCESS;
1973 STATIC int
1974 NCR_700_host_reset(struct scsi_cmnd * SCp)
1976 scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
1977 scsi_print_command(SCp);
1979 spin_lock_irq(SCp->device->host->host_lock);
1981 NCR_700_internal_bus_reset(SCp->device->host);
1982 NCR_700_chip_reset(SCp->device->host);
1984 spin_unlock_irq(SCp->device->host->host_lock);
1986 return SUCCESS;
1989 STATIC void
1990 NCR_700_set_period(struct scsi_target *STp, int period)
1992 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1993 struct NCR_700_Host_Parameters *hostdata =
1994 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
1996 if(!hostdata->fast)
1997 return;
1999 if(period < hostdata->min_period)
2000 period = hostdata->min_period;
2002 spi_period(STp) = period;
2003 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2004 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2005 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2008 STATIC void
2009 NCR_700_set_offset(struct scsi_target *STp, int offset)
2011 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2012 struct NCR_700_Host_Parameters *hostdata =
2013 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2014 int max_offset = hostdata->chip710
2015 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2017 if(!hostdata->fast)
2018 return;
2020 if(offset > max_offset)
2021 offset = max_offset;
2023 /* if we're currently async, make sure the period is reasonable */
2024 if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2025 spi_period(STp) > 0xff))
2026 spi_period(STp) = hostdata->min_period;
2028 spi_offset(STp) = offset;
2029 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2030 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2031 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2034 STATIC int
2035 NCR_700_slave_alloc(struct scsi_device *SDp)
2037 SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2038 GFP_KERNEL);
2040 if (!SDp->hostdata)
2041 return -ENOMEM;
2043 return 0;
2046 STATIC int
2047 NCR_700_slave_configure(struct scsi_device *SDp)
2049 struct NCR_700_Host_Parameters *hostdata =
2050 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2052 /* to do here: allocate memory; build a queue_full list */
2053 if(SDp->tagged_supported) {
2054 scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2055 scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2056 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2057 } else {
2058 /* initialise to default depth */
2059 scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2061 if(hostdata->fast) {
2062 /* Find the correct offset and period via domain validation */
2063 if (!spi_initial_dv(SDp->sdev_target))
2064 spi_dv_device(SDp);
2065 } else {
2066 spi_offset(SDp->sdev_target) = 0;
2067 spi_period(SDp->sdev_target) = 0;
2069 return 0;
2072 STATIC void
2073 NCR_700_slave_destroy(struct scsi_device *SDp)
2075 kfree(SDp->hostdata);
2076 SDp->hostdata = NULL;
2079 static int
2080 NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason)
2082 if (reason != SCSI_QDEPTH_DEFAULT)
2083 return -EOPNOTSUPP;
2085 if (depth > NCR_700_MAX_TAGS)
2086 depth = NCR_700_MAX_TAGS;
2088 scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2089 return depth;
2092 static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2094 int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
2095 || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2096 struct NCR_700_Host_Parameters *hostdata =
2097 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2099 scsi_set_tag_type(SDp, tag_type);
2101 /* We have a global (per target) flag to track whether TCQ is
2102 * enabled, so we'll be turning it off for the entire target here.
2103 * our tag algorithm will fail if we mix tagged and untagged commands,
2104 * so quiesce the device before doing this */
2105 if (change_tag)
2106 scsi_target_quiesce(SDp->sdev_target);
2108 if (!tag_type) {
2109 /* shift back to the default unqueued number of commands
2110 * (the user can still raise this) */
2111 scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
2112 hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
2113 } else {
2114 /* Here, we cleared the negotiation flag above, so this
2115 * will force the driver to renegotiate */
2116 scsi_activate_tcq(SDp, SDp->queue_depth);
2117 if (change_tag)
2118 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2120 if (change_tag)
2121 scsi_target_resume(SDp->sdev_target);
2123 return tag_type;
2126 static ssize_t
2127 NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2129 struct scsi_device *SDp = to_scsi_device(dev);
2131 return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2134 static struct device_attribute NCR_700_active_tags_attr = {
2135 .attr = {
2136 .name = "active_tags",
2137 .mode = S_IRUGO,
2139 .show = NCR_700_show_active_tags,
2142 STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2143 &NCR_700_active_tags_attr,
2144 NULL,
2147 EXPORT_SYMBOL(NCR_700_detect);
2148 EXPORT_SYMBOL(NCR_700_release);
2149 EXPORT_SYMBOL(NCR_700_intr);
2151 static struct spi_function_template NCR_700_transport_functions = {
2152 .set_period = NCR_700_set_period,
2153 .show_period = 1,
2154 .set_offset = NCR_700_set_offset,
2155 .show_offset = 1,
2158 static int __init NCR_700_init(void)
2160 NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2161 if(!NCR_700_transport_template)
2162 return -ENODEV;
2163 return 0;
2166 static void __exit NCR_700_exit(void)
2168 spi_release_transport(NCR_700_transport_template);
2171 module_init(NCR_700_init);
2172 module_exit(NCR_700_exit);