1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6 **-----------------------------------------------------------------------------
8 ** This program is free software; you can redistribute it and/or modify
9 ** it under the terms of the GNU General Public License as published by
10 ** the Free Software Foundation; either version 2 of the License, or
11 ** (at your option) any later version.
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
18 ** You should have received a copy of the GNU General Public License
19 ** along with this program; if not, write to the Free Software
20 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 **-----------------------------------------------------------------------------
27 * This driver is designed exclusively for these chips (virtually the
28 * earliest of the scripts engine chips). They need their own drivers
29 * because they are missing so many of the scripts and snazzy register
30 * features of their elder brothers (the 710, 720 and 770).
32 * The 700 is the lowliest of the line, it can only do async SCSI.
33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
35 * The 700 chip has no host bus interface logic of its own. However,
36 * it is usually mapped to a location with well defined register
37 * offsets. Therefore, if you can determine the base address and the
38 * irq your board incorporating this chip uses, you can probably use
39 * this driver to run it (although you'll probably have to write a
40 * minimal wrapper for the purpose---see the NCR_D700 driver for
41 * details about how to do this).
46 * 1. Better statistics in the proc fs
48 * 2. Implement message queue (queues SCSI messages like commands) and make
49 * the abort and device reset functions use them.
56 * Fixed bad bug affecting tag starvation processing (previously the
57 * driver would hang the system if too many tags starved. Also fixed
58 * bad bug having to do with 10 byte command processing and REQUEST
59 * SENSE (the command would loop forever getting a transfer length
60 * mismatch in the CMD phase).
64 * Fixed scripts problem which caused certain devices (notably CDRWs)
65 * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
66 * __raw_readl/writel for parisc compatibility (Thomas
67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68 * for sense requests (Ryan Bradetich).
72 * Following test of the 64 bit parisc kernel by Richard Hirst,
73 * several problems have now been corrected. Also adds support for
74 * consistent memory allocation.
78 * More Compatibility changes for 710 (now actually works). Enhanced
79 * support for odd clock speeds which constrain SDTR negotiations.
80 * correct cacheline separation for scsi messages and status for
81 * incoherent architectures. Use of the pci mapping functions on
82 * buffers to begin support for 64 bit drivers.
86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87 * special 53c710 instructions or registers are used).
91 * More endianness/cache coherency changes.
93 * Better bad device handling (handles devices lying about tag
94 * queueing support and devices which fail to provide sense data on
95 * contingent allegiance conditions)
97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98 * debugging this driver on the parisc architecture and suggesting
99 * many improvements and bug fixes.
101 * Thanks also go to Linuxcare Inc. for providing several PARISC
102 * machines for me to debug the driver on.
106 * Made the driver mem or io mapped; added endian invariance; added
107 * dma cache flushing operations for architectures which need it;
108 * added support for more varied clocking speeds.
112 * Initial modularisation from the D700. See NCR_D700.c for the rest of
115 #define NCR_700_VERSION "2.8"
117 #include <linux/kernel.h>
118 #include <linux/types.h>
119 #include <linux/string.h>
120 #include <linux/ioport.h>
121 #include <linux/delay.h>
122 #include <linux/spinlock.h>
123 #include <linux/completion.h>
124 #include <linux/sched.h>
125 #include <linux/init.h>
126 #include <linux/proc_fs.h>
127 #include <linux/blkdev.h>
128 #include <linux/module.h>
129 #include <linux/interrupt.h>
130 #include <linux/device.h>
132 #include <asm/system.h>
134 #include <asm/pgtable.h>
135 #include <asm/byteorder.h>
137 #include <scsi/scsi.h>
138 #include <scsi/scsi_cmnd.h>
139 #include <scsi/scsi_dbg.h>
140 #include <scsi/scsi_eh.h>
141 #include <scsi/scsi_host.h>
142 #include <scsi/scsi_tcq.h>
143 #include <scsi/scsi_transport.h>
144 #include <scsi/scsi_transport_spi.h>
148 /* NOTE: For 64 bit drivers there are points in the code where we use
149 * a non dereferenceable pointer to point to a structure in dma-able
150 * memory (which is 32 bits) so that we can use all of the structure
151 * operations but take the address at the end. This macro allows us
152 * to truncate the 64 bit pointer down to 32 bits without the compiler
154 #define to32bit(x) ((__u32)((unsigned long)(x)))
159 #define STATIC static
162 MODULE_AUTHOR("James Bottomley");
163 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
164 MODULE_LICENSE("GPL");
166 /* This is the script */
167 #include "53c700_d.h"
170 STATIC
int NCR_700_queuecommand(struct scsi_cmnd
*, void (*done
)(struct scsi_cmnd
*));
171 STATIC
int NCR_700_abort(struct scsi_cmnd
* SCpnt
);
172 STATIC
int NCR_700_bus_reset(struct scsi_cmnd
* SCpnt
);
173 STATIC
int NCR_700_host_reset(struct scsi_cmnd
* SCpnt
);
174 STATIC
void NCR_700_chip_setup(struct Scsi_Host
*host
);
175 STATIC
void NCR_700_chip_reset(struct Scsi_Host
*host
);
176 STATIC
int NCR_700_slave_alloc(struct scsi_device
*SDpnt
);
177 STATIC
int NCR_700_slave_configure(struct scsi_device
*SDpnt
);
178 STATIC
void NCR_700_slave_destroy(struct scsi_device
*SDpnt
);
179 static int NCR_700_change_queue_depth(struct scsi_device
*SDpnt
, int depth
);
180 static int NCR_700_change_queue_type(struct scsi_device
*SDpnt
, int depth
);
182 STATIC
struct device_attribute
*NCR_700_dev_attrs
[];
184 STATIC
struct scsi_transport_template
*NCR_700_transport_template
= NULL
;
186 static char *NCR_700_phase
[] = {
189 "before command phase",
190 "after command phase",
191 "after status phase",
192 "after data in phase",
193 "after data out phase",
197 static char *NCR_700_condition
[] = {
205 "REJECT_MSG RECEIVED",
206 "DISCONNECT_MSG RECEIVED",
212 static char *NCR_700_fatal_messages
[] = {
213 "unexpected message after reselection",
214 "still MSG_OUT after message injection",
215 "not MSG_IN after selection",
216 "Illegal message length received",
219 static char *NCR_700_SBCL_bits
[] = {
230 static char *NCR_700_SBCL_to_phase
[] = {
241 /* This translates the SDTR message offset and period to a value
242 * which can be loaded into the SXFER_REG.
244 * NOTE: According to SCSI-2, the true transfer period (in ns) is
245 * actually four times this period value */
247 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters
*hostdata
,
248 __u8 offset
, __u8 period
)
252 __u8 min_xferp
= (hostdata
->chip710
253 ? NCR_710_MIN_XFERP
: NCR_700_MIN_XFERP
);
254 __u8 max_offset
= (hostdata
->chip710
255 ? NCR_710_MAX_OFFSET
: NCR_700_MAX_OFFSET
);
260 if(period
< hostdata
->min_period
) {
261 printk(KERN_WARNING
"53c700: Period %dns is less than this chip's minimum, setting to %d\n", period
*4, NCR_700_MIN_PERIOD
*4);
262 period
= hostdata
->min_period
;
264 XFERP
= (period
*4 * hostdata
->sync_clock
)/1000 - 4;
265 if(offset
> max_offset
) {
266 printk(KERN_WARNING
"53c700: Offset %d exceeds chip maximum, setting to %d\n",
270 if(XFERP
< min_xferp
) {
271 printk(KERN_WARNING
"53c700: XFERP %d is less than minium, setting to %d\n",
275 return (offset
& 0x0f) | (XFERP
& 0x07)<<4;
279 NCR_700_get_SXFER(struct scsi_device
*SDp
)
281 struct NCR_700_Host_Parameters
*hostdata
=
282 (struct NCR_700_Host_Parameters
*)SDp
->host
->hostdata
[0];
284 return NCR_700_offset_period_to_sxfer(hostdata
,
285 spi_offset(SDp
->sdev_target
),
286 spi_period(SDp
->sdev_target
));
290 NCR_700_detect(struct scsi_host_template
*tpnt
,
291 struct NCR_700_Host_Parameters
*hostdata
, struct device
*dev
)
293 dma_addr_t pScript
, pSlots
;
296 struct Scsi_Host
*host
;
297 static int banner
= 0;
300 if(tpnt
->sdev_attrs
== NULL
)
301 tpnt
->sdev_attrs
= NCR_700_dev_attrs
;
303 memory
= dma_alloc_noncoherent(hostdata
->dev
, TOTAL_MEM_SIZE
,
304 &pScript
, GFP_KERNEL
);
306 printk(KERN_ERR
"53c700: Failed to allocate memory for driver, detatching\n");
310 script
= (__u32
*)memory
;
311 hostdata
->msgin
= memory
+ MSGIN_OFFSET
;
312 hostdata
->msgout
= memory
+ MSGOUT_OFFSET
;
313 hostdata
->status
= memory
+ STATUS_OFFSET
;
314 /* all of these offsets are L1_CACHE_BYTES separated. It is fatal
315 * if this isn't sufficient separation to avoid dma flushing issues */
316 BUG_ON(!dma_is_consistent(hostdata
->dev
, pScript
) && L1_CACHE_BYTES
< dma_get_cache_alignment());
317 hostdata
->slots
= (struct NCR_700_command_slot
*)(memory
+ SLOTS_OFFSET
);
320 pSlots
= pScript
+ SLOTS_OFFSET
;
322 /* Fill in the missing routines from the host template */
323 tpnt
->queuecommand
= NCR_700_queuecommand
;
324 tpnt
->eh_abort_handler
= NCR_700_abort
;
325 tpnt
->eh_bus_reset_handler
= NCR_700_bus_reset
;
326 tpnt
->eh_host_reset_handler
= NCR_700_host_reset
;
327 tpnt
->can_queue
= NCR_700_COMMAND_SLOTS_PER_HOST
;
328 tpnt
->sg_tablesize
= NCR_700_SG_SEGMENTS
;
329 tpnt
->cmd_per_lun
= NCR_700_CMD_PER_LUN
;
330 tpnt
->use_clustering
= ENABLE_CLUSTERING
;
331 tpnt
->slave_configure
= NCR_700_slave_configure
;
332 tpnt
->slave_destroy
= NCR_700_slave_destroy
;
333 tpnt
->slave_alloc
= NCR_700_slave_alloc
;
334 tpnt
->change_queue_depth
= NCR_700_change_queue_depth
;
335 tpnt
->change_queue_type
= NCR_700_change_queue_type
;
337 if(tpnt
->name
== NULL
)
338 tpnt
->name
= "53c700";
339 if(tpnt
->proc_name
== NULL
)
340 tpnt
->proc_name
= "53c700";
342 host
= scsi_host_alloc(tpnt
, 4);
345 memset(hostdata
->slots
, 0, sizeof(struct NCR_700_command_slot
)
346 * NCR_700_COMMAND_SLOTS_PER_HOST
);
347 for (j
= 0; j
< NCR_700_COMMAND_SLOTS_PER_HOST
; j
++) {
348 dma_addr_t offset
= (dma_addr_t
)((unsigned long)&hostdata
->slots
[j
].SG
[0]
349 - (unsigned long)&hostdata
->slots
[0].SG
[0]);
350 hostdata
->slots
[j
].pSG
= (struct NCR_700_SG_List
*)((unsigned long)(pSlots
+ offset
));
352 hostdata
->free_list
= &hostdata
->slots
[j
];
354 hostdata
->slots
[j
-1].ITL_forw
= &hostdata
->slots
[j
];
355 hostdata
->slots
[j
].state
= NCR_700_SLOT_FREE
;
358 for (j
= 0; j
< ARRAY_SIZE(SCRIPT
); j
++)
359 script
[j
] = bS_to_host(SCRIPT
[j
]);
361 /* adjust all labels to be bus physical */
362 for (j
= 0; j
< PATCHES
; j
++)
363 script
[LABELPATCHES
[j
]] = bS_to_host(pScript
+ SCRIPT
[LABELPATCHES
[j
]]);
364 /* now patch up fixed addresses. */
365 script_patch_32(hostdata
->dev
, script
, MessageLocation
,
366 pScript
+ MSGOUT_OFFSET
);
367 script_patch_32(hostdata
->dev
, script
, StatusAddress
,
368 pScript
+ STATUS_OFFSET
);
369 script_patch_32(hostdata
->dev
, script
, ReceiveMsgAddress
,
370 pScript
+ MSGIN_OFFSET
);
372 hostdata
->script
= script
;
373 hostdata
->pScript
= pScript
;
374 dma_sync_single_for_device(hostdata
->dev
, pScript
, sizeof(SCRIPT
), DMA_TO_DEVICE
);
375 hostdata
->state
= NCR_700_HOST_FREE
;
376 hostdata
->cmd
= NULL
;
378 host
->max_lun
= NCR_700_MAX_LUNS
;
379 BUG_ON(NCR_700_transport_template
== NULL
);
380 host
->transportt
= NCR_700_transport_template
;
381 host
->unique_id
= (unsigned long)hostdata
->base
;
382 hostdata
->eh_complete
= NULL
;
383 host
->hostdata
[0] = (unsigned long)hostdata
;
385 NCR_700_writeb(0xff, host
, CTEST9_REG
);
386 if (hostdata
->chip710
)
387 hostdata
->rev
= (NCR_700_readb(host
, CTEST8_REG
)>>4) & 0x0f;
389 hostdata
->rev
= (NCR_700_readb(host
, CTEST7_REG
)>>4) & 0x0f;
390 hostdata
->fast
= (NCR_700_readb(host
, CTEST9_REG
) == 0);
392 printk(KERN_NOTICE
"53c700: Version " NCR_700_VERSION
" By James.Bottomley@HansenPartnership.com\n");
395 printk(KERN_NOTICE
"scsi%d: %s rev %d %s\n", host
->host_no
,
396 hostdata
->chip710
? "53c710" :
397 (hostdata
->fast
? "53c700-66" : "53c700"),
398 hostdata
->rev
, hostdata
->differential
?
399 "(Differential)" : "");
401 NCR_700_chip_reset(host
);
403 if (scsi_add_host(host
, dev
)) {
404 dev_printk(KERN_ERR
, dev
, "53c700: scsi_add_host failed\n");
409 spi_signalling(host
) = hostdata
->differential
? SPI_SIGNAL_HVD
:
416 NCR_700_release(struct Scsi_Host
*host
)
418 struct NCR_700_Host_Parameters
*hostdata
=
419 (struct NCR_700_Host_Parameters
*)host
->hostdata
[0];
421 dma_free_noncoherent(hostdata
->dev
, TOTAL_MEM_SIZE
,
422 hostdata
->script
, hostdata
->pScript
);
427 NCR_700_identify(int can_disconnect
, __u8 lun
)
429 return IDENTIFY_BASE
|
430 ((can_disconnect
) ? 0x40 : 0) |
431 (lun
& NCR_700_LUN_MASK
);
435 * Function : static int data_residual (Scsi_Host *host)
437 * Purpose : return residual data count of what's in the chip. If you
438 * really want to know what this function is doing, it's almost a
439 * direct transcription of the algorithm described in the 53c710
440 * guide, except that the DBC and DFIFO registers are only 6 bits
443 * Inputs : host - SCSI host */
445 NCR_700_data_residual (struct Scsi_Host
*host
) {
446 struct NCR_700_Host_Parameters
*hostdata
=
447 (struct NCR_700_Host_Parameters
*)host
->hostdata
[0];
448 int count
, synchronous
= 0;
451 if(hostdata
->chip710
) {
452 count
= ((NCR_700_readb(host
, DFIFO_REG
) & 0x7f) -
453 (NCR_700_readl(host
, DBC_REG
) & 0x7f)) & 0x7f;
455 count
= ((NCR_700_readb(host
, DFIFO_REG
) & 0x3f) -
456 (NCR_700_readl(host
, DBC_REG
) & 0x3f)) & 0x3f;
460 synchronous
= NCR_700_readb(host
, SXFER_REG
) & 0x0f;
462 /* get the data direction */
463 ddir
= NCR_700_readb(host
, CTEST0_REG
) & 0x01;
468 count
+= (NCR_700_readb(host
, SSTAT2_REG
) & 0xf0) >> 4;
470 if (NCR_700_readb(host
, SSTAT1_REG
) & SIDL_REG_FULL
)
474 __u8 sstat
= NCR_700_readb(host
, SSTAT1_REG
);
475 if (sstat
& SODL_REG_FULL
)
477 if (synchronous
&& (sstat
& SODR_REG_FULL
))
482 printk("RESIDUAL IS %d (ddir %d)\n", count
, ddir
);
487 /* print out the SCSI wires and corresponding phase from the SBCL register
490 sbcl_to_string(__u8 sbcl
)
493 static char ret
[256];
498 strcat(ret
, NCR_700_SBCL_bits
[i
]);
500 strcat(ret
, NCR_700_SBCL_to_phase
[sbcl
& 0x07]);
505 bitmap_to_number(__u8 bitmap
)
509 for(i
=0; i
<8 && !(bitmap
&(1<<i
)); i
++)
514 /* Pull a slot off the free list */
515 STATIC
struct NCR_700_command_slot
*
516 find_empty_slot(struct NCR_700_Host_Parameters
*hostdata
)
518 struct NCR_700_command_slot
*slot
= hostdata
->free_list
;
522 if(hostdata
->command_slot_count
!= NCR_700_COMMAND_SLOTS_PER_HOST
)
523 printk(KERN_ERR
"SLOTS FULL, but count is %d, should be %d\n", hostdata
->command_slot_count
, NCR_700_COMMAND_SLOTS_PER_HOST
);
527 if(slot
->state
!= NCR_700_SLOT_FREE
)
529 printk(KERN_ERR
"BUSY SLOT ON FREE LIST!!!\n");
532 hostdata
->free_list
= slot
->ITL_forw
;
533 slot
->ITL_forw
= NULL
;
536 /* NOTE: set the state to busy here, not queued, since this
537 * indicates the slot is in use and cannot be run by the IRQ
538 * finish routine. If we cannot queue the command when it
539 * is properly build, we then change to NCR_700_SLOT_QUEUED */
540 slot
->state
= NCR_700_SLOT_BUSY
;
542 hostdata
->command_slot_count
++;
548 free_slot(struct NCR_700_command_slot
*slot
,
549 struct NCR_700_Host_Parameters
*hostdata
)
551 if((slot
->state
& NCR_700_SLOT_MASK
) != NCR_700_SLOT_MAGIC
) {
552 printk(KERN_ERR
"53c700: SLOT %p is not MAGIC!!!\n", slot
);
554 if(slot
->state
== NCR_700_SLOT_FREE
) {
555 printk(KERN_ERR
"53c700: SLOT %p is FREE!!!\n", slot
);
558 slot
->resume_offset
= 0;
560 slot
->state
= NCR_700_SLOT_FREE
;
561 slot
->ITL_forw
= hostdata
->free_list
;
562 hostdata
->free_list
= slot
;
563 hostdata
->command_slot_count
--;
567 /* This routine really does very little. The command is indexed on
568 the ITL and (if tagged) the ITLQ lists in _queuecommand */
570 save_for_reselection(struct NCR_700_Host_Parameters
*hostdata
,
571 struct scsi_cmnd
*SCp
, __u32 dsp
)
573 /* Its just possible that this gets executed twice */
575 struct NCR_700_command_slot
*slot
=
576 (struct NCR_700_command_slot
*)SCp
->host_scribble
;
578 slot
->resume_offset
= dsp
;
580 hostdata
->state
= NCR_700_HOST_FREE
;
581 hostdata
->cmd
= NULL
;
585 NCR_700_unmap(struct NCR_700_Host_Parameters
*hostdata
, struct scsi_cmnd
*SCp
,
586 struct NCR_700_command_slot
*slot
)
588 if(SCp
->sc_data_direction
!= DMA_NONE
&&
589 SCp
->sc_data_direction
!= DMA_BIDIRECTIONAL
) {
591 dma_unmap_sg(hostdata
->dev
, SCp
->request_buffer
,
592 SCp
->use_sg
, SCp
->sc_data_direction
);
594 dma_unmap_single(hostdata
->dev
, slot
->dma_handle
,
595 SCp
->request_bufflen
,
596 SCp
->sc_data_direction
);
602 NCR_700_scsi_done(struct NCR_700_Host_Parameters
*hostdata
,
603 struct scsi_cmnd
*SCp
, int result
)
605 hostdata
->state
= NCR_700_HOST_FREE
;
606 hostdata
->cmd
= NULL
;
609 struct NCR_700_command_slot
*slot
=
610 (struct NCR_700_command_slot
*)SCp
->host_scribble
;
612 dma_unmap_single(hostdata
->dev
, slot
->pCmd
,
613 sizeof(SCp
->cmnd
), DMA_TO_DEVICE
);
614 if (slot
->flags
== NCR_700_FLAG_AUTOSENSE
) {
615 char *cmnd
= NCR_700_get_sense_cmnd(SCp
->device
);
617 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
618 SCp
, SCp
->cmnd
[7], result
);
619 scsi_print_sense("53c700", SCp
);
622 dma_unmap_single(hostdata
->dev
, slot
->dma_handle
, sizeof(SCp
->sense_buffer
), DMA_FROM_DEVICE
);
623 /* restore the old result if the request sense was
627 /* restore the original length */
628 SCp
->cmd_len
= cmnd
[8];
630 NCR_700_unmap(hostdata
, SCp
, slot
);
632 free_slot(slot
, hostdata
);
634 if(NCR_700_get_depth(SCp
->device
) == 0 ||
635 NCR_700_get_depth(SCp
->device
) > SCp
->device
->queue_depth
)
636 printk(KERN_ERR
"Invalid depth in NCR_700_scsi_done(): %d\n",
637 NCR_700_get_depth(SCp
->device
));
638 #endif /* NCR_700_DEBUG */
639 NCR_700_set_depth(SCp
->device
, NCR_700_get_depth(SCp
->device
) - 1);
641 SCp
->host_scribble
= NULL
;
642 SCp
->result
= result
;
645 printk(KERN_ERR
"53c700: SCSI DONE HAS NULL SCp\n");
651 NCR_700_internal_bus_reset(struct Scsi_Host
*host
)
654 NCR_700_writeb(ASSERT_RST
, host
, SCNTL1_REG
);
656 NCR_700_writeb(0, host
, SCNTL1_REG
);
661 NCR_700_chip_setup(struct Scsi_Host
*host
)
663 struct NCR_700_Host_Parameters
*hostdata
=
664 (struct NCR_700_Host_Parameters
*)host
->hostdata
[0];
665 __u32 dcntl_extra
= 0;
667 __u8 min_xferp
= (hostdata
->chip710
? NCR_710_MIN_XFERP
: NCR_700_MIN_XFERP
);
669 if(hostdata
->chip710
) {
670 __u8 burst_disable
= 0;
671 __u8 burst_length
= 0;
673 switch (hostdata
->burst_length
) {
675 burst_length
= BURST_LENGTH_1
;
678 burst_length
= BURST_LENGTH_2
;
681 burst_length
= BURST_LENGTH_4
;
684 burst_length
= BURST_LENGTH_8
;
687 burst_disable
= BURST_DISABLE
;
690 dcntl_extra
= COMPAT_700_MODE
;
692 NCR_700_writeb(dcntl_extra
, host
, DCNTL_REG
);
693 NCR_700_writeb(burst_length
| hostdata
->dmode_extra
,
694 host
, DMODE_710_REG
);
695 NCR_700_writeb(burst_disable
| (hostdata
->differential
?
696 DIFF
: 0), host
, CTEST7_REG
);
697 NCR_700_writeb(BTB_TIMER_DISABLE
, host
, CTEST0_REG
);
698 NCR_700_writeb(FULL_ARBITRATION
| ENABLE_PARITY
| PARITY
699 | AUTO_ATN
, host
, SCNTL0_REG
);
701 NCR_700_writeb(BURST_LENGTH_8
| hostdata
->dmode_extra
,
702 host
, DMODE_700_REG
);
703 NCR_700_writeb(hostdata
->differential
?
704 DIFF
: 0, host
, CTEST7_REG
);
706 /* this is for 700-66, does nothing on 700 */
707 NCR_700_writeb(LAST_DIS_ENBL
| ENABLE_ACTIVE_NEGATION
708 | GENERATE_RECEIVE_PARITY
, host
,
711 NCR_700_writeb(FULL_ARBITRATION
| ENABLE_PARITY
712 | PARITY
| AUTO_ATN
, host
, SCNTL0_REG
);
716 NCR_700_writeb(1 << host
->this_id
, host
, SCID_REG
);
717 NCR_700_writeb(0, host
, SBCL_REG
);
718 NCR_700_writeb(ASYNC_OPERATION
, host
, SXFER_REG
);
720 NCR_700_writeb(PHASE_MM_INT
| SEL_TIMEOUT_INT
| GROSS_ERR_INT
| UX_DISC_INT
721 | RST_INT
| PAR_ERR_INT
| SELECT_INT
, host
, SIEN_REG
);
723 NCR_700_writeb(ABORT_INT
| INT_INST_INT
| ILGL_INST_INT
, host
, DIEN_REG
);
724 NCR_700_writeb(ENABLE_SELECT
, host
, SCNTL1_REG
);
725 if(hostdata
->clock
> 75) {
726 printk(KERN_ERR
"53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata
->clock
);
727 /* do the best we can, but the async clock will be out
728 * of spec: sync divider 2, async divider 3 */
729 DEBUG(("53c700: sync 2 async 3\n"));
730 NCR_700_writeb(SYNC_DIV_2_0
, host
, SBCL_REG
);
731 NCR_700_writeb(ASYNC_DIV_3_0
| dcntl_extra
, host
, DCNTL_REG
);
732 hostdata
->sync_clock
= hostdata
->clock
/2;
733 } else if(hostdata
->clock
> 50 && hostdata
->clock
<= 75) {
734 /* sync divider 1.5, async divider 3 */
735 DEBUG(("53c700: sync 1.5 async 3\n"));
736 NCR_700_writeb(SYNC_DIV_1_5
, host
, SBCL_REG
);
737 NCR_700_writeb(ASYNC_DIV_3_0
| dcntl_extra
, host
, DCNTL_REG
);
738 hostdata
->sync_clock
= hostdata
->clock
*2;
739 hostdata
->sync_clock
/= 3;
741 } else if(hostdata
->clock
> 37 && hostdata
->clock
<= 50) {
742 /* sync divider 1, async divider 2 */
743 DEBUG(("53c700: sync 1 async 2\n"));
744 NCR_700_writeb(SYNC_DIV_1_0
, host
, SBCL_REG
);
745 NCR_700_writeb(ASYNC_DIV_2_0
| dcntl_extra
, host
, DCNTL_REG
);
746 hostdata
->sync_clock
= hostdata
->clock
;
747 } else if(hostdata
->clock
> 25 && hostdata
->clock
<=37) {
748 /* sync divider 1, async divider 1.5 */
749 DEBUG(("53c700: sync 1 async 1.5\n"));
750 NCR_700_writeb(SYNC_DIV_1_0
, host
, SBCL_REG
);
751 NCR_700_writeb(ASYNC_DIV_1_5
| dcntl_extra
, host
, DCNTL_REG
);
752 hostdata
->sync_clock
= hostdata
->clock
;
754 DEBUG(("53c700: sync 1 async 1\n"));
755 NCR_700_writeb(SYNC_DIV_1_0
, host
, SBCL_REG
);
756 NCR_700_writeb(ASYNC_DIV_1_0
| dcntl_extra
, host
, DCNTL_REG
);
757 /* sync divider 1, async divider 1 */
758 hostdata
->sync_clock
= hostdata
->clock
;
760 /* Calculate the actual minimum period that can be supported
761 * by our synchronous clock speed. See the 710 manual for
762 * exact details of this calculation which is based on a
763 * setting of the SXFER register */
764 min_period
= 1000*(4+min_xferp
)/(4*hostdata
->sync_clock
);
765 hostdata
->min_period
= NCR_700_MIN_PERIOD
;
766 if(min_period
> NCR_700_MIN_PERIOD
)
767 hostdata
->min_period
= min_period
;
771 NCR_700_chip_reset(struct Scsi_Host
*host
)
773 struct NCR_700_Host_Parameters
*hostdata
=
774 (struct NCR_700_Host_Parameters
*)host
->hostdata
[0];
775 if(hostdata
->chip710
) {
776 NCR_700_writeb(SOFTWARE_RESET_710
, host
, ISTAT_REG
);
779 NCR_700_writeb(0, host
, ISTAT_REG
);
781 NCR_700_writeb(SOFTWARE_RESET
, host
, DCNTL_REG
);
784 NCR_700_writeb(0, host
, DCNTL_REG
);
789 NCR_700_chip_setup(host
);
792 /* The heart of the message processing engine is that the instruction
793 * immediately after the INT is the normal case (and so must be CLEAR
794 * ACK). If we want to do something else, we call that routine in
795 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
796 * ACK) so that the routine returns correctly to resume its activity
799 process_extended_message(struct Scsi_Host
*host
,
800 struct NCR_700_Host_Parameters
*hostdata
,
801 struct scsi_cmnd
*SCp
, __u32 dsp
, __u32 dsps
)
803 __u32 resume_offset
= dsp
, temp
= dsp
+ 8;
804 __u8 pun
= 0xff, lun
= 0xff;
807 pun
= SCp
->device
->id
;
808 lun
= SCp
->device
->lun
;
811 switch(hostdata
->msgin
[2]) {
813 if(SCp
!= NULL
&& NCR_700_is_flag_set(SCp
->device
, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION
)) {
814 struct scsi_target
*starget
= SCp
->device
->sdev_target
;
815 __u8 period
= hostdata
->msgin
[3];
816 __u8 offset
= hostdata
->msgin
[4];
818 if(offset
== 0 || period
== 0) {
823 spi_offset(starget
) = offset
;
824 spi_period(starget
) = period
;
826 if(NCR_700_is_flag_set(SCp
->device
, NCR_700_DEV_PRINT_SYNC_NEGOTIATION
)) {
827 spi_display_xfer_agreement(starget
);
828 NCR_700_clear_flag(SCp
->device
, NCR_700_DEV_PRINT_SYNC_NEGOTIATION
);
831 NCR_700_set_flag(SCp
->device
, NCR_700_DEV_NEGOTIATED_SYNC
);
832 NCR_700_clear_flag(SCp
->device
, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION
);
834 NCR_700_writeb(NCR_700_get_SXFER(SCp
->device
),
838 /* SDTR message out of the blue, reject it */
839 shost_printk(KERN_WARNING
, host
,
840 "Unexpected SDTR msg\n");
841 hostdata
->msgout
[0] = A_REJECT_MSG
;
842 dma_cache_sync(hostdata
->dev
, hostdata
->msgout
, 1, DMA_TO_DEVICE
);
843 script_patch_16(hostdata
->dev
, hostdata
->script
,
845 /* SendMsgOut returns, so set up the return
847 resume_offset
= hostdata
->pScript
+ Ent_SendMessageWithATN
;
852 printk(KERN_INFO
"scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
853 host
->host_no
, pun
, lun
);
854 hostdata
->msgout
[0] = A_REJECT_MSG
;
855 dma_cache_sync(hostdata
->dev
, hostdata
->msgout
, 1, DMA_TO_DEVICE
);
856 script_patch_16(hostdata
->dev
, hostdata
->script
, MessageCount
,
858 resume_offset
= hostdata
->pScript
+ Ent_SendMessageWithATN
;
863 printk(KERN_INFO
"scsi%d (%d:%d): Unexpected message %s: ",
864 host
->host_no
, pun
, lun
,
865 NCR_700_phase
[(dsps
& 0xf00) >> 8]);
866 spi_print_msg(hostdata
->msgin
);
869 hostdata
->msgout
[0] = A_REJECT_MSG
;
870 dma_cache_sync(hostdata
->dev
, hostdata
->msgout
, 1, DMA_TO_DEVICE
);
871 script_patch_16(hostdata
->dev
, hostdata
->script
, MessageCount
,
873 /* SendMsgOut returns, so set up the return
875 resume_offset
= hostdata
->pScript
+ Ent_SendMessageWithATN
;
877 NCR_700_writel(temp
, host
, TEMP_REG
);
878 return resume_offset
;
882 process_message(struct Scsi_Host
*host
, struct NCR_700_Host_Parameters
*hostdata
,
883 struct scsi_cmnd
*SCp
, __u32 dsp
, __u32 dsps
)
885 /* work out where to return to */
886 __u32 temp
= dsp
+ 8, resume_offset
= dsp
;
887 __u8 pun
= 0xff, lun
= 0xff;
890 pun
= SCp
->device
->id
;
891 lun
= SCp
->device
->lun
;
895 printk("scsi%d (%d:%d): message %s: ", host
->host_no
, pun
, lun
,
896 NCR_700_phase
[(dsps
& 0xf00) >> 8]);
897 spi_print_msg(hostdata
->msgin
);
901 switch(hostdata
->msgin
[0]) {
904 resume_offset
= process_extended_message(host
, hostdata
, SCp
,
909 if(SCp
!= NULL
&& NCR_700_is_flag_set(SCp
->device
, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION
)) {
910 /* Rejected our sync negotiation attempt */
911 spi_period(SCp
->device
->sdev_target
) =
912 spi_offset(SCp
->device
->sdev_target
) = 0;
913 NCR_700_set_flag(SCp
->device
, NCR_700_DEV_NEGOTIATED_SYNC
);
914 NCR_700_clear_flag(SCp
->device
, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION
);
915 } else if(SCp
!= NULL
&& NCR_700_get_tag_neg_state(SCp
->device
) == NCR_700_DURING_TAG_NEGOTIATION
) {
916 /* rejected our first simple tag message */
917 scmd_printk(KERN_WARNING
, SCp
,
918 "Rejected first tag queue attempt, turning off tag queueing\n");
919 /* we're done negotiating */
920 NCR_700_set_tag_neg_state(SCp
->device
, NCR_700_FINISHED_TAG_NEGOTIATION
);
921 hostdata
->tag_negotiated
&= ~(1<<scmd_id(SCp
));
922 SCp
->device
->tagged_supported
= 0;
923 scsi_deactivate_tcq(SCp
->device
, host
->cmd_per_lun
);
925 shost_printk(KERN_WARNING
, host
,
926 "(%d:%d) Unexpected REJECT Message %s\n",
928 NCR_700_phase
[(dsps
& 0xf00) >> 8]);
929 /* however, just ignore it */
933 case A_PARITY_ERROR_MSG
:
934 printk(KERN_ERR
"scsi%d (%d:%d) Parity Error!\n", host
->host_no
,
936 NCR_700_internal_bus_reset(host
);
938 case A_SIMPLE_TAG_MSG
:
939 printk(KERN_INFO
"scsi%d (%d:%d) SIMPLE TAG %d %s\n", host
->host_no
,
940 pun
, lun
, hostdata
->msgin
[1],
941 NCR_700_phase
[(dsps
& 0xf00) >> 8]);
945 printk(KERN_INFO
"scsi%d (%d:%d): Unexpected message %s: ",
946 host
->host_no
, pun
, lun
,
947 NCR_700_phase
[(dsps
& 0xf00) >> 8]);
949 spi_print_msg(hostdata
->msgin
);
952 hostdata
->msgout
[0] = A_REJECT_MSG
;
953 dma_cache_sync(hostdata
->dev
, hostdata
->msgout
, 1, DMA_TO_DEVICE
);
954 script_patch_16(hostdata
->dev
, hostdata
->script
, MessageCount
,
956 /* SendMsgOut returns, so set up the return
958 resume_offset
= hostdata
->pScript
+ Ent_SendMessageWithATN
;
962 NCR_700_writel(temp
, host
, TEMP_REG
);
963 /* set us up to receive another message */
964 dma_cache_sync(hostdata
->dev
, hostdata
->msgin
, MSG_ARRAY_SIZE
, DMA_FROM_DEVICE
);
965 return resume_offset
;
969 process_script_interrupt(__u32 dsps
, __u32 dsp
, struct scsi_cmnd
*SCp
,
970 struct Scsi_Host
*host
,
971 struct NCR_700_Host_Parameters
*hostdata
)
973 __u32 resume_offset
= 0;
974 __u8 pun
= 0xff, lun
=0xff;
977 pun
= SCp
->device
->id
;
978 lun
= SCp
->device
->lun
;
981 if(dsps
== A_GOOD_STATUS_AFTER_STATUS
) {
982 DEBUG((" COMMAND COMPLETE, status=%02x\n",
983 hostdata
->status
[0]));
984 /* OK, if TCQ still under negotiation, we now know it works */
985 if (NCR_700_get_tag_neg_state(SCp
->device
) == NCR_700_DURING_TAG_NEGOTIATION
)
986 NCR_700_set_tag_neg_state(SCp
->device
,
987 NCR_700_FINISHED_TAG_NEGOTIATION
);
989 /* check for contingent allegiance contitions */
990 if(status_byte(hostdata
->status
[0]) == CHECK_CONDITION
||
991 status_byte(hostdata
->status
[0]) == COMMAND_TERMINATED
) {
992 struct NCR_700_command_slot
*slot
=
993 (struct NCR_700_command_slot
*)SCp
->host_scribble
;
994 if(slot
->flags
== NCR_700_FLAG_AUTOSENSE
) {
995 /* OOPS: bad device, returning another
996 * contingent allegiance condition */
997 scmd_printk(KERN_ERR
, SCp
,
998 "broken device is looping in contingent allegiance: ignoring\n");
999 NCR_700_scsi_done(hostdata
, SCp
, hostdata
->status
[0]);
1002 NCR_700_get_sense_cmnd(SCp
->device
);
1004 scsi_print_command(SCp
);
1005 printk(" cmd %p has status %d, requesting sense\n",
1006 SCp
, hostdata
->status
[0]);
1008 /* we can destroy the command here
1009 * because the contingent allegiance
1010 * condition will cause a retry which
1011 * will re-copy the command from the
1012 * saved data_cmnd. We also unmap any
1013 * data associated with the command
1015 NCR_700_unmap(hostdata
, SCp
, slot
);
1016 dma_unmap_single(hostdata
->dev
, slot
->pCmd
,
1020 cmnd
[0] = REQUEST_SENSE
;
1021 cmnd
[1] = (SCp
->device
->lun
& 0x7) << 5;
1024 cmnd
[4] = sizeof(SCp
->sense_buffer
);
1026 /* Here's a quiet hack: the
1027 * REQUEST_SENSE command is six bytes,
1028 * so store a flag indicating that
1029 * this was an internal sense request
1030 * and the original status at the end
1032 cmnd
[6] = NCR_700_INTERNAL_SENSE_MAGIC
;
1033 cmnd
[7] = hostdata
->status
[0];
1034 cmnd
[8] = SCp
->cmd_len
;
1035 SCp
->cmd_len
= 6; /* command length for
1037 slot
->pCmd
= dma_map_single(hostdata
->dev
, cmnd
, MAX_COMMAND_SIZE
, DMA_TO_DEVICE
);
1038 slot
->dma_handle
= dma_map_single(hostdata
->dev
, SCp
->sense_buffer
, sizeof(SCp
->sense_buffer
), DMA_FROM_DEVICE
);
1039 slot
->SG
[0].ins
= bS_to_host(SCRIPT_MOVE_DATA_IN
| sizeof(SCp
->sense_buffer
));
1040 slot
->SG
[0].pAddr
= bS_to_host(slot
->dma_handle
);
1041 slot
->SG
[1].ins
= bS_to_host(SCRIPT_RETURN
);
1042 slot
->SG
[1].pAddr
= 0;
1043 slot
->resume_offset
= hostdata
->pScript
;
1044 dma_cache_sync(hostdata
->dev
, slot
->SG
, sizeof(slot
->SG
[0])*2, DMA_TO_DEVICE
);
1045 dma_cache_sync(hostdata
->dev
, SCp
->sense_buffer
, sizeof(SCp
->sense_buffer
), DMA_FROM_DEVICE
);
1047 /* queue the command for reissue */
1048 slot
->state
= NCR_700_SLOT_QUEUED
;
1049 slot
->flags
= NCR_700_FLAG_AUTOSENSE
;
1050 hostdata
->state
= NCR_700_HOST_FREE
;
1051 hostdata
->cmd
= NULL
;
1054 // Currently rely on the mid layer evaluation
1055 // of the tag queuing capability
1057 //if(status_byte(hostdata->status[0]) == GOOD &&
1058 // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1059 // /* Piggy back the tag queueing support
1060 // * on this command */
1061 // dma_sync_single_for_cpu(hostdata->dev,
1062 // slot->dma_handle,
1063 // SCp->request_bufflen,
1064 // DMA_FROM_DEVICE);
1065 // if(((char *)SCp->request_buffer)[7] & 0x02) {
1066 // scmd_printk(KERN_INFO, SCp,
1067 // "Enabling Tag Command Queuing\n");
1068 // hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1069 // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1071 // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1072 // hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1075 NCR_700_scsi_done(hostdata
, SCp
, hostdata
->status
[0]);
1077 } else if((dsps
& 0xfffff0f0) == A_UNEXPECTED_PHASE
) {
1078 __u8 i
= (dsps
& 0xf00) >> 8;
1080 scmd_printk(KERN_ERR
, SCp
, "UNEXPECTED PHASE %s (%s)\n",
1082 sbcl_to_string(NCR_700_readb(host
, SBCL_REG
)));
1083 scmd_printk(KERN_ERR
, SCp
, " len = %d, cmd =",
1085 scsi_print_command(SCp
);
1087 NCR_700_internal_bus_reset(host
);
1088 } else if((dsps
& 0xfffff000) == A_FATAL
) {
1089 int i
= (dsps
& 0xfff);
1091 printk(KERN_ERR
"scsi%d: (%d:%d) FATAL ERROR: %s\n",
1092 host
->host_no
, pun
, lun
, NCR_700_fatal_messages
[i
]);
1093 if(dsps
== A_FATAL_ILLEGAL_MSG_LENGTH
) {
1094 printk(KERN_ERR
" msg begins %02x %02x\n",
1095 hostdata
->msgin
[0], hostdata
->msgin
[1]);
1097 NCR_700_internal_bus_reset(host
);
1098 } else if((dsps
& 0xfffff0f0) == A_DISCONNECT
) {
1099 #ifdef NCR_700_DEBUG
1100 __u8 i
= (dsps
& 0xf00) >> 8;
1102 printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1103 host
->host_no
, pun
, lun
,
1104 i
, NCR_700_phase
[i
]);
1106 save_for_reselection(hostdata
, SCp
, dsp
);
1108 } else if(dsps
== A_RESELECTION_IDENTIFIED
) {
1110 struct NCR_700_command_slot
*slot
;
1111 __u8 reselection_id
= hostdata
->reselection_id
;
1112 struct scsi_device
*SDp
;
1114 lun
= hostdata
->msgin
[0] & 0x1f;
1116 hostdata
->reselection_id
= 0xff;
1117 DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1118 host
->host_no
, reselection_id
, lun
));
1119 /* clear the reselection indicator */
1120 SDp
= __scsi_device_lookup(host
, 0, reselection_id
, lun
);
1121 if(unlikely(SDp
== NULL
)) {
1122 printk(KERN_ERR
"scsi%d: (%d:%d) HAS NO device\n",
1123 host
->host_no
, reselection_id
, lun
);
1126 if(hostdata
->msgin
[1] == A_SIMPLE_TAG_MSG
) {
1127 struct scsi_cmnd
*SCp
= scsi_find_tag(SDp
, hostdata
->msgin
[2]);
1128 if(unlikely(SCp
== NULL
)) {
1129 printk(KERN_ERR
"scsi%d: (%d:%d) no saved request for tag %d\n",
1130 host
->host_no
, reselection_id
, lun
, hostdata
->msgin
[2]);
1134 slot
= (struct NCR_700_command_slot
*)SCp
->host_scribble
;
1135 DDEBUG(KERN_DEBUG
, SDp
,
1136 "reselection is tag %d, slot %p(%d)\n",
1137 hostdata
->msgin
[2], slot
, slot
->tag
);
1139 struct scsi_cmnd
*SCp
= scsi_find_tag(SDp
, SCSI_NO_TAG
);
1140 if(unlikely(SCp
== NULL
)) {
1141 sdev_printk(KERN_ERR
, SDp
,
1142 "no saved request for untagged cmd\n");
1145 slot
= (struct NCR_700_command_slot
*)SCp
->host_scribble
;
1149 printk(KERN_ERR
"scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1150 host
->host_no
, reselection_id
, lun
,
1151 hostdata
->msgin
[0], hostdata
->msgin
[1],
1152 hostdata
->msgin
[2]);
1154 if(hostdata
->state
!= NCR_700_HOST_BUSY
)
1155 printk(KERN_ERR
"scsi%d: FATAL, host not busy during valid reselection!\n",
1157 resume_offset
= slot
->resume_offset
;
1158 hostdata
->cmd
= slot
->cmnd
;
1160 /* re-patch for this command */
1161 script_patch_32_abs(hostdata
->dev
, hostdata
->script
,
1162 CommandAddress
, slot
->pCmd
);
1163 script_patch_16(hostdata
->dev
, hostdata
->script
,
1164 CommandCount
, slot
->cmnd
->cmd_len
);
1165 script_patch_32_abs(hostdata
->dev
, hostdata
->script
,
1166 SGScriptStartAddress
,
1167 to32bit(&slot
->pSG
[0].ins
));
1169 /* Note: setting SXFER only works if we're
1170 * still in the MESSAGE phase, so it is vital
1171 * that ACK is still asserted when we process
1172 * the reselection message. The resume offset
1173 * should therefore always clear ACK */
1174 NCR_700_writeb(NCR_700_get_SXFER(hostdata
->cmd
->device
),
1176 dma_cache_sync(hostdata
->dev
, hostdata
->msgin
,
1177 MSG_ARRAY_SIZE
, DMA_FROM_DEVICE
);
1178 dma_cache_sync(hostdata
->dev
, hostdata
->msgout
,
1179 MSG_ARRAY_SIZE
, DMA_TO_DEVICE
);
1180 /* I'm just being paranoid here, the command should
1181 * already have been flushed from the cache */
1182 dma_cache_sync(hostdata
->dev
, slot
->cmnd
->cmnd
,
1183 slot
->cmnd
->cmd_len
, DMA_TO_DEVICE
);
1188 } else if(dsps
== A_RESELECTED_DURING_SELECTION
) {
1190 /* This section is full of debugging code because I've
1191 * never managed to reach it. I think what happens is
1192 * that, because the 700 runs with selection
1193 * interrupts enabled the whole time that we take a
1194 * selection interrupt before we manage to get to the
1195 * reselected script interrupt */
1197 __u8 reselection_id
= NCR_700_readb(host
, SFBR_REG
);
1198 struct NCR_700_command_slot
*slot
;
1200 /* Take out our own ID */
1201 reselection_id
&= ~(1<<host
->this_id
);
1203 /* I've never seen this happen, so keep this as a printk rather
1205 printk(KERN_INFO
"scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1206 host
->host_no
, reselection_id
, lun
, dsp
, dsp
- hostdata
->pScript
, hostdata
->state
, hostdata
->command_slot_count
);
1209 /* FIXME: DEBUGGING CODE */
1210 __u32 SG
= (__u32
)bS_to_cpu(hostdata
->script
[A_SGScriptStartAddress_used
[0]]);
1213 for(i
=0; i
< NCR_700_COMMAND_SLOTS_PER_HOST
; i
++) {
1214 if(SG
>= to32bit(&hostdata
->slots
[i
].pSG
[0])
1215 && SG
<= to32bit(&hostdata
->slots
[i
].pSG
[NCR_700_SG_SEGMENTS
]))
1218 printk(KERN_INFO
"IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG
, &hostdata
->slots
[i
], hostdata
->slots
[i
].cmnd
, hostdata
->slots
[i
].resume_offset
);
1219 SCp
= hostdata
->slots
[i
].cmnd
;
1223 slot
= (struct NCR_700_command_slot
*)SCp
->host_scribble
;
1224 /* change slot from busy to queued to redo command */
1225 slot
->state
= NCR_700_SLOT_QUEUED
;
1227 hostdata
->cmd
= NULL
;
1229 if(reselection_id
== 0) {
1230 if(hostdata
->reselection_id
== 0xff) {
1231 printk(KERN_ERR
"scsi%d: Invalid reselection during selection!!\n", host
->host_no
);
1234 printk(KERN_ERR
"scsi%d: script reselected and we took a selection interrupt\n",
1236 reselection_id
= hostdata
->reselection_id
;
1240 /* convert to real ID */
1241 reselection_id
= bitmap_to_number(reselection_id
);
1243 hostdata
->reselection_id
= reselection_id
;
1244 /* just in case we have a stale simple tag message, clear it */
1245 hostdata
->msgin
[1] = 0;
1246 dma_cache_sync(hostdata
->dev
, hostdata
->msgin
,
1247 MSG_ARRAY_SIZE
, DMA_BIDIRECTIONAL
);
1248 if(hostdata
->tag_negotiated
& (1<<reselection_id
)) {
1249 resume_offset
= hostdata
->pScript
+ Ent_GetReselectionWithTag
;
1251 resume_offset
= hostdata
->pScript
+ Ent_GetReselectionData
;
1253 } else if(dsps
== A_COMPLETED_SELECTION_AS_TARGET
) {
1254 /* we've just disconnected from the bus, do nothing since
1255 * a return here will re-run the queued command slot
1256 * that may have been interrupted by the initial selection */
1257 DEBUG((" SELECTION COMPLETED\n"));
1258 } else if((dsps
& 0xfffff0f0) == A_MSG_IN
) {
1259 resume_offset
= process_message(host
, hostdata
, SCp
,
1261 } else if((dsps
& 0xfffff000) == 0) {
1262 __u8 i
= (dsps
& 0xf0) >> 4, j
= (dsps
& 0xf00) >> 8;
1263 printk(KERN_ERR
"scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1264 host
->host_no
, pun
, lun
, NCR_700_condition
[i
],
1265 NCR_700_phase
[j
], dsp
- hostdata
->pScript
);
1267 scsi_print_command(SCp
);
1270 for(i
= 0; i
< SCp
->use_sg
+ 1; i
++) {
1271 printk(KERN_INFO
" SG[%d].length = %d, move_insn=%08x, addr %08x\n", i
, ((struct scatterlist
*)SCp
->request_buffer
)[i
].length
, ((struct NCR_700_command_slot
*)SCp
->host_scribble
)->SG
[i
].ins
, ((struct NCR_700_command_slot
*)SCp
->host_scribble
)->SG
[i
].pAddr
);
1275 NCR_700_internal_bus_reset(host
);
1276 } else if((dsps
& 0xfffff000) == A_DEBUG_INTERRUPT
) {
1277 printk(KERN_NOTICE
"scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1278 host
->host_no
, pun
, lun
, dsps
& 0xfff, dsp
, dsp
- hostdata
->pScript
);
1279 resume_offset
= dsp
;
1281 printk(KERN_ERR
"scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1282 host
->host_no
, pun
, lun
, dsps
, dsp
- hostdata
->pScript
);
1283 NCR_700_internal_bus_reset(host
);
1285 return resume_offset
;
1288 /* We run the 53c700 with selection interrupts always enabled. This
1289 * means that the chip may be selected as soon as the bus frees. On a
1290 * busy bus, this can be before the scripts engine finishes its
1291 * processing. Therefore, part of the selection processing has to be
1292 * to find out what the scripts engine is doing and complete the
1293 * function if necessary (i.e. process the pending disconnect or save
1294 * the interrupted initial selection */
1296 process_selection(struct Scsi_Host
*host
, __u32 dsp
)
1298 __u8 id
= 0; /* Squash compiler warning */
1300 __u32 resume_offset
= 0;
1301 struct NCR_700_Host_Parameters
*hostdata
=
1302 (struct NCR_700_Host_Parameters
*)host
->hostdata
[0];
1303 struct scsi_cmnd
*SCp
= hostdata
->cmd
;
1306 for(count
= 0; count
< 5; count
++) {
1307 id
= NCR_700_readb(host
, hostdata
->chip710
?
1308 CTEST9_REG
: SFBR_REG
);
1310 /* Take out our own ID */
1311 id
&= ~(1<<host
->this_id
);
1316 sbcl
= NCR_700_readb(host
, SBCL_REG
);
1317 if((sbcl
& SBCL_IO
) == 0) {
1318 /* mark as having been selected rather than reselected */
1321 /* convert to real ID */
1322 hostdata
->reselection_id
= id
= bitmap_to_number(id
);
1323 DEBUG(("scsi%d: Reselected by %d\n",
1324 host
->host_no
, id
));
1326 if(hostdata
->state
== NCR_700_HOST_BUSY
&& SCp
!= NULL
) {
1327 struct NCR_700_command_slot
*slot
=
1328 (struct NCR_700_command_slot
*)SCp
->host_scribble
;
1329 DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id
, hostdata
->cmd
, slot
, dsp
, dsp
- hostdata
->pScript
, resume_offset
));
1331 switch(dsp
- hostdata
->pScript
) {
1332 case Ent_Disconnect1
:
1333 case Ent_Disconnect2
:
1334 save_for_reselection(hostdata
, SCp
, Ent_Disconnect2
+ hostdata
->pScript
);
1336 case Ent_Disconnect3
:
1337 case Ent_Disconnect4
:
1338 save_for_reselection(hostdata
, SCp
, Ent_Disconnect4
+ hostdata
->pScript
);
1340 case Ent_Disconnect5
:
1341 case Ent_Disconnect6
:
1342 save_for_reselection(hostdata
, SCp
, Ent_Disconnect6
+ hostdata
->pScript
);
1344 case Ent_Disconnect7
:
1345 case Ent_Disconnect8
:
1346 save_for_reselection(hostdata
, SCp
, Ent_Disconnect8
+ hostdata
->pScript
);
1350 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS
, dsp
, SCp
, host
, hostdata
);
1354 slot
->state
= NCR_700_SLOT_QUEUED
;
1358 hostdata
->state
= NCR_700_HOST_BUSY
;
1359 hostdata
->cmd
= NULL
;
1360 /* clear any stale simple tag message */
1361 hostdata
->msgin
[1] = 0;
1362 dma_cache_sync(hostdata
->dev
, hostdata
->msgin
, MSG_ARRAY_SIZE
,
1366 /* Selected as target, Ignore */
1367 resume_offset
= hostdata
->pScript
+ Ent_SelectedAsTarget
;
1368 } else if(hostdata
->tag_negotiated
& (1<<id
)) {
1369 resume_offset
= hostdata
->pScript
+ Ent_GetReselectionWithTag
;
1371 resume_offset
= hostdata
->pScript
+ Ent_GetReselectionData
;
1373 return resume_offset
;
1377 NCR_700_clear_fifo(struct Scsi_Host
*host
) {
1378 const struct NCR_700_Host_Parameters
*hostdata
1379 = (struct NCR_700_Host_Parameters
*)host
->hostdata
[0];
1380 if(hostdata
->chip710
) {
1381 NCR_700_writeb(CLR_FIFO_710
, host
, CTEST8_REG
);
1383 NCR_700_writeb(CLR_FIFO
, host
, DFIFO_REG
);
1388 NCR_700_flush_fifo(struct Scsi_Host
*host
) {
1389 const struct NCR_700_Host_Parameters
*hostdata
1390 = (struct NCR_700_Host_Parameters
*)host
->hostdata
[0];
1391 if(hostdata
->chip710
) {
1392 NCR_700_writeb(FLUSH_DMA_FIFO_710
, host
, CTEST8_REG
);
1394 NCR_700_writeb(0, host
, CTEST8_REG
);
1396 NCR_700_writeb(FLUSH_DMA_FIFO
, host
, DFIFO_REG
);
1398 NCR_700_writeb(0, host
, DFIFO_REG
);
1403 /* The queue lock with interrupts disabled must be held on entry to
1406 NCR_700_start_command(struct scsi_cmnd
*SCp
)
1408 struct NCR_700_command_slot
*slot
=
1409 (struct NCR_700_command_slot
*)SCp
->host_scribble
;
1410 struct NCR_700_Host_Parameters
*hostdata
=
1411 (struct NCR_700_Host_Parameters
*)SCp
->device
->host
->hostdata
[0];
1412 __u16 count
= 1; /* for IDENTIFY message */
1414 if(hostdata
->state
!= NCR_700_HOST_FREE
) {
1415 /* keep this inside the lock to close the race window where
1416 * the running command finishes on another CPU while we don't
1417 * change the state to queued on this one */
1418 slot
->state
= NCR_700_SLOT_QUEUED
;
1420 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1421 SCp
->device
->host
->host_no
, slot
->cmnd
, slot
));
1424 hostdata
->state
= NCR_700_HOST_BUSY
;
1425 hostdata
->cmd
= SCp
;
1426 slot
->state
= NCR_700_SLOT_BUSY
;
1427 /* keep interrupts disabled until we have the command correctly
1428 * set up so we cannot take a selection interrupt */
1430 hostdata
->msgout
[0] = NCR_700_identify((SCp
->cmnd
[0] != REQUEST_SENSE
&&
1431 slot
->flags
!= NCR_700_FLAG_AUTOSENSE
),
1433 /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1434 * if the negotiated transfer parameters still hold, so
1435 * always renegotiate them */
1436 if(SCp
->cmnd
[0] == INQUIRY
|| SCp
->cmnd
[0] == REQUEST_SENSE
||
1437 slot
->flags
== NCR_700_FLAG_AUTOSENSE
) {
1438 NCR_700_clear_flag(SCp
->device
, NCR_700_DEV_NEGOTIATED_SYNC
);
1441 /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1442 * If a contingent allegiance condition exists, the device
1443 * will refuse all tags, so send the request sense as untagged
1445 if((hostdata
->tag_negotiated
& (1<<scmd_id(SCp
)))
1446 && (slot
->tag
!= SCSI_NO_TAG
&& SCp
->cmnd
[0] != REQUEST_SENSE
&&
1447 slot
->flags
!= NCR_700_FLAG_AUTOSENSE
)) {
1448 count
+= scsi_populate_tag_msg(SCp
, &hostdata
->msgout
[count
]);
1451 if(hostdata
->fast
&&
1452 NCR_700_is_flag_clear(SCp
->device
, NCR_700_DEV_NEGOTIATED_SYNC
)) {
1453 count
+= spi_populate_sync_msg(&hostdata
->msgout
[count
],
1454 spi_period(SCp
->device
->sdev_target
),
1455 spi_offset(SCp
->device
->sdev_target
));
1456 NCR_700_set_flag(SCp
->device
, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION
);
1459 script_patch_16(hostdata
->dev
, hostdata
->script
, MessageCount
, count
);
1462 script_patch_ID(hostdata
->dev
, hostdata
->script
,
1463 Device_ID
, 1<<scmd_id(SCp
));
1465 script_patch_32_abs(hostdata
->dev
, hostdata
->script
, CommandAddress
,
1467 script_patch_16(hostdata
->dev
, hostdata
->script
, CommandCount
,
1469 /* finally plumb the beginning of the SG list into the script
1471 script_patch_32_abs(hostdata
->dev
, hostdata
->script
,
1472 SGScriptStartAddress
, to32bit(&slot
->pSG
[0].ins
));
1473 NCR_700_clear_fifo(SCp
->device
->host
);
1475 if(slot
->resume_offset
== 0)
1476 slot
->resume_offset
= hostdata
->pScript
;
1477 /* now perform all the writebacks and invalidates */
1478 dma_cache_sync(hostdata
->dev
, hostdata
->msgout
, count
, DMA_TO_DEVICE
);
1479 dma_cache_sync(hostdata
->dev
, hostdata
->msgin
, MSG_ARRAY_SIZE
,
1481 dma_cache_sync(hostdata
->dev
, SCp
->cmnd
, SCp
->cmd_len
, DMA_TO_DEVICE
);
1482 dma_cache_sync(hostdata
->dev
, hostdata
->status
, 1, DMA_FROM_DEVICE
);
1484 /* set the synchronous period/offset */
1485 NCR_700_writeb(NCR_700_get_SXFER(SCp
->device
),
1486 SCp
->device
->host
, SXFER_REG
);
1487 NCR_700_writel(slot
->temp
, SCp
->device
->host
, TEMP_REG
);
1488 NCR_700_writel(slot
->resume_offset
, SCp
->device
->host
, DSP_REG
);
1494 NCR_700_intr(int irq
, void *dev_id
)
1496 struct Scsi_Host
*host
= (struct Scsi_Host
*)dev_id
;
1497 struct NCR_700_Host_Parameters
*hostdata
=
1498 (struct NCR_700_Host_Parameters
*)host
->hostdata
[0];
1500 __u32 resume_offset
= 0;
1501 __u8 pun
= 0xff, lun
= 0xff;
1502 unsigned long flags
;
1505 /* Use the host lock to serialise acess to the 53c700
1506 * hardware. Note: In future, we may need to take the queue
1507 * lock to enter the done routines. When that happens, we
1508 * need to ensure that for this driver, the host lock and the
1509 * queue lock point to the same thing. */
1510 spin_lock_irqsave(host
->host_lock
, flags
);
1511 if((istat
= NCR_700_readb(host
, ISTAT_REG
))
1512 & (SCSI_INT_PENDING
| DMA_INT_PENDING
)) {
1514 __u8 sstat0
= 0, dstat
= 0;
1516 struct scsi_cmnd
*SCp
= hostdata
->cmd
;
1517 enum NCR_700_Host_State state
;
1520 state
= hostdata
->state
;
1521 SCp
= hostdata
->cmd
;
1523 if(istat
& SCSI_INT_PENDING
) {
1526 sstat0
= NCR_700_readb(host
, SSTAT0_REG
);
1529 if(istat
& DMA_INT_PENDING
) {
1532 dstat
= NCR_700_readb(host
, DSTAT_REG
);
1535 dsps
= NCR_700_readl(host
, DSPS_REG
);
1536 dsp
= NCR_700_readl(host
, DSP_REG
);
1538 DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1539 host
->host_no
, istat
, sstat0
, dstat
,
1540 (dsp
- (__u32
)(hostdata
->pScript
))/4,
1544 pun
= SCp
->device
->id
;
1545 lun
= SCp
->device
->lun
;
1548 if(sstat0
& SCSI_RESET_DETECTED
) {
1549 struct scsi_device
*SDp
;
1552 hostdata
->state
= NCR_700_HOST_BUSY
;
1554 printk(KERN_ERR
"scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1555 host
->host_no
, SCp
, SCp
== NULL
? NULL
: SCp
->host_scribble
, dsp
, dsp
- hostdata
->pScript
);
1557 scsi_report_bus_reset(host
, 0);
1559 /* clear all the negotiated parameters */
1560 __shost_for_each_device(SDp
, host
)
1561 NCR_700_clear_flag(SDp
, ~0);
1563 /* clear all the slots and their pending commands */
1564 for(i
= 0; i
< NCR_700_COMMAND_SLOTS_PER_HOST
; i
++) {
1565 struct scsi_cmnd
*SCp
;
1566 struct NCR_700_command_slot
*slot
=
1567 &hostdata
->slots
[i
];
1569 if(slot
->state
== NCR_700_SLOT_FREE
)
1573 printk(KERN_ERR
" failing command because of reset, slot %p, cmnd %p\n",
1575 free_slot(slot
, hostdata
);
1576 SCp
->host_scribble
= NULL
;
1577 NCR_700_set_depth(SCp
->device
, 0);
1578 /* NOTE: deadlock potential here: we
1579 * rely on mid-layer guarantees that
1580 * scsi_done won't try to issue the
1581 * command again otherwise we'll
1583 * hostdata->state_lock */
1584 SCp
->result
= DID_RESET
<< 16;
1585 SCp
->scsi_done(SCp
);
1588 NCR_700_chip_setup(host
);
1590 hostdata
->state
= NCR_700_HOST_FREE
;
1591 hostdata
->cmd
= NULL
;
1592 /* signal back if this was an eh induced reset */
1593 if(hostdata
->eh_complete
!= NULL
)
1594 complete(hostdata
->eh_complete
);
1596 } else if(sstat0
& SELECTION_TIMEOUT
) {
1597 DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1598 host
->host_no
, pun
, lun
));
1599 NCR_700_scsi_done(hostdata
, SCp
, DID_NO_CONNECT
<<16);
1600 } else if(sstat0
& PHASE_MISMATCH
) {
1601 struct NCR_700_command_slot
*slot
= (SCp
== NULL
) ? NULL
:
1602 (struct NCR_700_command_slot
*)SCp
->host_scribble
;
1604 if(dsp
== Ent_SendMessage
+ 8 + hostdata
->pScript
) {
1605 /* It wants to reply to some part of
1607 #ifdef NCR_700_DEBUG
1608 __u32 temp
= NCR_700_readl(host
, TEMP_REG
);
1609 int count
= (hostdata
->script
[Ent_SendMessage
/4] & 0xffffff) - ((NCR_700_readl(host
, DBC_REG
) & 0xffffff) + NCR_700_data_residual(host
));
1610 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host
->host_no
, pun
, lun
, count
, (void *)temp
, temp
- hostdata
->pScript
, sbcl_to_string(NCR_700_readb(host
, SBCL_REG
)));
1612 resume_offset
= hostdata
->pScript
+ Ent_SendMessagePhaseMismatch
;
1613 } else if(dsp
>= to32bit(&slot
->pSG
[0].ins
) &&
1614 dsp
<= to32bit(&slot
->pSG
[NCR_700_SG_SEGMENTS
].ins
)) {
1615 int data_transfer
= NCR_700_readl(host
, DBC_REG
) & 0xffffff;
1616 int SGcount
= (dsp
- to32bit(&slot
->pSG
[0].ins
))/sizeof(struct NCR_700_SG_List
);
1617 int residual
= NCR_700_data_residual(host
);
1619 #ifdef NCR_700_DEBUG
1620 __u32 naddr
= NCR_700_readl(host
, DNAD_REG
);
1622 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1623 host
->host_no
, pun
, lun
,
1624 SGcount
, data_transfer
);
1625 scsi_print_command(SCp
);
1627 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1628 host
->host_no
, pun
, lun
,
1629 SGcount
, data_transfer
, residual
);
1632 data_transfer
+= residual
;
1634 if(data_transfer
!= 0) {
1640 count
= (bS_to_cpu(slot
->SG
[SGcount
].ins
) & 0x00ffffff);
1641 DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count
, count
-data_transfer
));
1642 slot
->SG
[SGcount
].ins
&= bS_to_host(0xff000000);
1643 slot
->SG
[SGcount
].ins
|= bS_to_host(data_transfer
);
1644 pAddr
= bS_to_cpu(slot
->SG
[SGcount
].pAddr
);
1645 pAddr
+= (count
- data_transfer
);
1646 #ifdef NCR_700_DEBUG
1647 if(pAddr
!= naddr
) {
1648 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host
->host_no
, pun
, lun
, (unsigned long)pAddr
, (unsigned long)naddr
, data_transfer
, residual
);
1651 slot
->SG
[SGcount
].pAddr
= bS_to_host(pAddr
);
1653 /* set the executed moves to nops */
1654 for(i
=0; i
<SGcount
; i
++) {
1655 slot
->SG
[i
].ins
= bS_to_host(SCRIPT_NOP
);
1656 slot
->SG
[i
].pAddr
= 0;
1658 dma_cache_sync(hostdata
->dev
, slot
->SG
, sizeof(slot
->SG
), DMA_TO_DEVICE
);
1659 /* and pretend we disconnected after
1660 * the command phase */
1661 resume_offset
= hostdata
->pScript
+ Ent_MsgInDuringData
;
1662 /* make sure all the data is flushed */
1663 NCR_700_flush_fifo(host
);
1665 __u8 sbcl
= NCR_700_readb(host
, SBCL_REG
);
1666 printk(KERN_ERR
"scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1667 host
->host_no
, pun
, lun
, dsp
- hostdata
->pScript
, sbcl_to_string(sbcl
));
1668 NCR_700_internal_bus_reset(host
);
1671 } else if(sstat0
& SCSI_GROSS_ERROR
) {
1672 printk(KERN_ERR
"scsi%d: (%d:%d) GROSS ERROR\n",
1673 host
->host_no
, pun
, lun
);
1674 NCR_700_scsi_done(hostdata
, SCp
, DID_ERROR
<<16);
1675 } else if(sstat0
& PARITY_ERROR
) {
1676 printk(KERN_ERR
"scsi%d: (%d:%d) PARITY ERROR\n",
1677 host
->host_no
, pun
, lun
);
1678 NCR_700_scsi_done(hostdata
, SCp
, DID_ERROR
<<16);
1679 } else if(dstat
& SCRIPT_INT_RECEIVED
) {
1680 DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1681 host
->host_no
, pun
, lun
));
1682 resume_offset
= process_script_interrupt(dsps
, dsp
, SCp
, host
, hostdata
);
1683 } else if(dstat
& (ILGL_INST_DETECTED
)) {
1684 printk(KERN_ERR
"scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1685 " Please email James.Bottomley@HansenPartnership.com with the details\n",
1686 host
->host_no
, pun
, lun
,
1687 dsp
, dsp
- hostdata
->pScript
);
1688 NCR_700_scsi_done(hostdata
, SCp
, DID_ERROR
<<16);
1689 } else if(dstat
& (WATCH_DOG_INTERRUPT
|ABORTED
)) {
1690 printk(KERN_ERR
"scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1691 host
->host_no
, pun
, lun
, dstat
);
1692 NCR_700_scsi_done(hostdata
, SCp
, DID_ERROR
<<16);
1696 /* NOTE: selection interrupt processing MUST occur
1697 * after script interrupt processing to correctly cope
1698 * with the case where we process a disconnect and
1699 * then get reselected before we process the
1701 if(sstat0
& SELECTED
) {
1702 /* FIXME: It currently takes at least FOUR
1703 * interrupts to complete a command that
1704 * disconnects: one for the disconnect, one
1705 * for the reselection, one to get the
1706 * reselection data and one to complete the
1707 * command. If we guess the reselected
1708 * command here and prepare it, we only need
1709 * to get a reselection data interrupt if we
1710 * guessed wrongly. Since the interrupt
1711 * overhead is much greater than the command
1712 * setup, this would be an efficient
1713 * optimisation particularly as we probably
1714 * only have one outstanding command on a
1715 * target most of the time */
1717 resume_offset
= process_selection(host
, dsp
);
1724 if(hostdata
->state
!= NCR_700_HOST_BUSY
) {
1725 printk(KERN_ERR
"scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1726 host
->host_no
, resume_offset
, resume_offset
- hostdata
->pScript
);
1727 hostdata
->state
= NCR_700_HOST_BUSY
;
1730 DEBUG(("Attempting to resume at %x\n", resume_offset
));
1731 NCR_700_clear_fifo(host
);
1732 NCR_700_writel(resume_offset
, host
, DSP_REG
);
1734 /* There is probably a technical no-no about this: If we're a
1735 * shared interrupt and we got this interrupt because the
1736 * other device needs servicing not us, we're still going to
1737 * check our queued commands here---of course, there shouldn't
1738 * be any outstanding.... */
1739 if(hostdata
->state
== NCR_700_HOST_FREE
) {
1742 for(i
= 0; i
< NCR_700_COMMAND_SLOTS_PER_HOST
; i
++) {
1743 /* fairness: always run the queue from the last
1744 * position we left off */
1745 int j
= (i
+ hostdata
->saved_slot_position
)
1746 % NCR_700_COMMAND_SLOTS_PER_HOST
;
1748 if(hostdata
->slots
[j
].state
!= NCR_700_SLOT_QUEUED
)
1750 if(NCR_700_start_command(hostdata
->slots
[j
].cmnd
)) {
1751 DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1752 host
->host_no
, &hostdata
->slots
[j
],
1753 hostdata
->slots
[j
].cmnd
));
1754 hostdata
->saved_slot_position
= j
+ 1;
1761 spin_unlock_irqrestore(host
->host_lock
, flags
);
1762 return IRQ_RETVAL(handled
);
1766 NCR_700_queuecommand(struct scsi_cmnd
*SCp
, void (*done
)(struct scsi_cmnd
*))
1768 struct NCR_700_Host_Parameters
*hostdata
=
1769 (struct NCR_700_Host_Parameters
*)SCp
->device
->host
->hostdata
[0];
1771 enum dma_data_direction direction
;
1772 struct NCR_700_command_slot
*slot
;
1774 if(hostdata
->command_slot_count
>= NCR_700_COMMAND_SLOTS_PER_HOST
) {
1775 /* We're over our allocation, this should never happen
1776 * since we report the max allocation to the mid layer */
1777 printk(KERN_WARNING
"scsi%d: Command depth has gone over queue depth\n", SCp
->device
->host
->host_no
);
1780 /* check for untagged commands. We cannot have any outstanding
1781 * commands if we accept them. Commands could be untagged because:
1783 * - The tag negotiated bitmap is clear
1784 * - The blk layer sent and untagged command
1786 if(NCR_700_get_depth(SCp
->device
) != 0
1787 && (!(hostdata
->tag_negotiated
& (1<<scmd_id(SCp
)))
1788 || !blk_rq_tagged(SCp
->request
))) {
1789 CDEBUG(KERN_ERR
, SCp
, "has non zero depth %d\n",
1790 NCR_700_get_depth(SCp
->device
));
1791 return SCSI_MLQUEUE_DEVICE_BUSY
;
1793 if(NCR_700_get_depth(SCp
->device
) >= SCp
->device
->queue_depth
) {
1794 CDEBUG(KERN_ERR
, SCp
, "has max tag depth %d\n",
1795 NCR_700_get_depth(SCp
->device
));
1796 return SCSI_MLQUEUE_DEVICE_BUSY
;
1798 NCR_700_set_depth(SCp
->device
, NCR_700_get_depth(SCp
->device
) + 1);
1800 /* begin the command here */
1801 /* no need to check for NULL, test for command_slot_count above
1802 * ensures a slot is free */
1803 slot
= find_empty_slot(hostdata
);
1807 SCp
->scsi_done
= done
;
1808 SCp
->host_scribble
= (unsigned char *)slot
;
1809 SCp
->SCp
.ptr
= NULL
;
1810 SCp
->SCp
.buffer
= NULL
;
1812 #ifdef NCR_700_DEBUG
1813 printk("53c700: scsi%d, command ", SCp
->device
->host
->host_no
);
1814 scsi_print_command(SCp
);
1816 if(blk_rq_tagged(SCp
->request
)
1817 && (hostdata
->tag_negotiated
&(1<<scmd_id(SCp
))) == 0
1818 && NCR_700_get_tag_neg_state(SCp
->device
) == NCR_700_START_TAG_NEGOTIATION
) {
1819 scmd_printk(KERN_ERR
, SCp
, "Enabling Tag Command Queuing\n");
1820 hostdata
->tag_negotiated
|= (1<<scmd_id(SCp
));
1821 NCR_700_set_tag_neg_state(SCp
->device
, NCR_700_DURING_TAG_NEGOTIATION
);
1824 /* here we may have to process an untagged command. The gate
1825 * above ensures that this will be the only one outstanding,
1826 * so clear the tag negotiated bit.
1828 * FIXME: This will royally screw up on multiple LUN devices
1830 if(!blk_rq_tagged(SCp
->request
)
1831 && (hostdata
->tag_negotiated
&(1<<scmd_id(SCp
)))) {
1832 scmd_printk(KERN_INFO
, SCp
, "Disabling Tag Command Queuing\n");
1833 hostdata
->tag_negotiated
&= ~(1<<scmd_id(SCp
));
1836 if((hostdata
->tag_negotiated
&(1<<scmd_id(SCp
)))
1837 && scsi_get_tag_type(SCp
->device
)) {
1838 slot
->tag
= SCp
->request
->tag
;
1839 CDEBUG(KERN_DEBUG
, SCp
, "sending out tag %d, slot %p\n",
1842 slot
->tag
= SCSI_NO_TAG
;
1843 /* must populate current_cmnd for scsi_find_tag to work */
1844 SCp
->device
->current_cmnd
= SCp
;
1846 /* sanity check: some of the commands generated by the mid-layer
1847 * have an eccentric idea of their sc_data_direction */
1848 if(!SCp
->use_sg
&& !SCp
->request_bufflen
1849 && SCp
->sc_data_direction
!= DMA_NONE
) {
1850 #ifdef NCR_700_DEBUG
1851 printk("53c700: Command");
1852 scsi_print_command(SCp
);
1853 printk("Has wrong data direction %d\n", SCp
->sc_data_direction
);
1855 SCp
->sc_data_direction
= DMA_NONE
;
1858 switch (SCp
->cmnd
[0]) {
1860 /* clear the internal sense magic */
1864 /* OK, get it from the command */
1865 switch(SCp
->sc_data_direction
) {
1866 case DMA_BIDIRECTIONAL
:
1868 printk(KERN_ERR
"53c700: Unknown command for data direction ");
1869 scsi_print_command(SCp
);
1876 case DMA_FROM_DEVICE
:
1877 move_ins
= SCRIPT_MOVE_DATA_IN
;
1880 move_ins
= SCRIPT_MOVE_DATA_OUT
;
1885 /* now build the scatter gather list */
1886 direction
= SCp
->sc_data_direction
;
1890 dma_addr_t vPtr
= 0;
1894 sg_count
= dma_map_sg(hostdata
->dev
,
1895 SCp
->request_buffer
, SCp
->use_sg
,
1898 vPtr
= dma_map_single(hostdata
->dev
,
1899 SCp
->request_buffer
,
1900 SCp
->request_bufflen
,
1902 count
= SCp
->request_bufflen
;
1903 slot
->dma_handle
= vPtr
;
1908 for(i
= 0; i
< sg_count
; i
++) {
1911 struct scatterlist
*sg
= SCp
->request_buffer
;
1913 vPtr
= sg_dma_address(&sg
[i
]);
1914 count
= sg_dma_len(&sg
[i
]);
1917 slot
->SG
[i
].ins
= bS_to_host(move_ins
| count
);
1918 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1919 i
, count
, slot
->SG
[i
].ins
, (unsigned long)vPtr
));
1920 slot
->SG
[i
].pAddr
= bS_to_host(vPtr
);
1922 slot
->SG
[i
].ins
= bS_to_host(SCRIPT_RETURN
);
1923 slot
->SG
[i
].pAddr
= 0;
1924 dma_cache_sync(hostdata
->dev
, slot
->SG
, sizeof(slot
->SG
), DMA_TO_DEVICE
);
1925 DEBUG((" SETTING %08lx to %x\n",
1926 (&slot
->pSG
[i
].ins
),
1929 slot
->resume_offset
= 0;
1930 slot
->pCmd
= dma_map_single(hostdata
->dev
, SCp
->cmnd
,
1931 sizeof(SCp
->cmnd
), DMA_TO_DEVICE
);
1932 NCR_700_start_command(SCp
);
1937 NCR_700_abort(struct scsi_cmnd
* SCp
)
1939 struct NCR_700_command_slot
*slot
;
1941 scmd_printk(KERN_INFO
, SCp
,
1942 "New error handler wants to abort command\n\t");
1943 scsi_print_command(SCp
);
1945 slot
= (struct NCR_700_command_slot
*)SCp
->host_scribble
;
1948 /* no outstanding command to abort */
1950 if(SCp
->cmnd
[0] == TEST_UNIT_READY
) {
1951 /* FIXME: This is because of a problem in the new
1952 * error handler. When it is in error recovery, it
1953 * will send a TUR to a device it thinks may still be
1954 * showing a problem. If the TUR isn't responded to,
1955 * it will abort it and mark the device off line.
1956 * Unfortunately, it does no other error recovery, so
1957 * this would leave us with an outstanding command
1958 * occupying a slot. Rather than allow this to
1959 * happen, we issue a bus reset to force all
1960 * outstanding commands to terminate here. */
1961 NCR_700_internal_bus_reset(SCp
->device
->host
);
1962 /* still drop through and return failed */
1969 NCR_700_bus_reset(struct scsi_cmnd
* SCp
)
1971 DECLARE_COMPLETION_ONSTACK(complete
);
1972 struct NCR_700_Host_Parameters
*hostdata
=
1973 (struct NCR_700_Host_Parameters
*)SCp
->device
->host
->hostdata
[0];
1975 scmd_printk(KERN_INFO
, SCp
,
1976 "New error handler wants BUS reset, cmd %p\n\t", SCp
);
1977 scsi_print_command(SCp
);
1979 /* In theory, eh_complete should always be null because the
1980 * eh is single threaded, but just in case we're handling a
1981 * reset via sg or something */
1982 spin_lock_irq(SCp
->device
->host
->host_lock
);
1983 while (hostdata
->eh_complete
!= NULL
) {
1984 spin_unlock_irq(SCp
->device
->host
->host_lock
);
1985 msleep_interruptible(100);
1986 spin_lock_irq(SCp
->device
->host
->host_lock
);
1989 hostdata
->eh_complete
= &complete
;
1990 NCR_700_internal_bus_reset(SCp
->device
->host
);
1992 spin_unlock_irq(SCp
->device
->host
->host_lock
);
1993 wait_for_completion(&complete
);
1994 spin_lock_irq(SCp
->device
->host
->host_lock
);
1996 hostdata
->eh_complete
= NULL
;
1997 /* Revalidate the transport parameters of the failing device */
1999 spi_schedule_dv_device(SCp
->device
);
2001 spin_unlock_irq(SCp
->device
->host
->host_lock
);
2006 NCR_700_host_reset(struct scsi_cmnd
* SCp
)
2008 scmd_printk(KERN_INFO
, SCp
, "New error handler wants HOST reset\n\t");
2009 scsi_print_command(SCp
);
2011 spin_lock_irq(SCp
->device
->host
->host_lock
);
2013 NCR_700_internal_bus_reset(SCp
->device
->host
);
2014 NCR_700_chip_reset(SCp
->device
->host
);
2016 spin_unlock_irq(SCp
->device
->host
->host_lock
);
2022 NCR_700_set_period(struct scsi_target
*STp
, int period
)
2024 struct Scsi_Host
*SHp
= dev_to_shost(STp
->dev
.parent
);
2025 struct NCR_700_Host_Parameters
*hostdata
=
2026 (struct NCR_700_Host_Parameters
*)SHp
->hostdata
[0];
2031 if(period
< hostdata
->min_period
)
2032 period
= hostdata
->min_period
;
2034 spi_period(STp
) = period
;
2035 spi_flags(STp
) &= ~(NCR_700_DEV_NEGOTIATED_SYNC
|
2036 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION
);
2037 spi_flags(STp
) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION
;
2041 NCR_700_set_offset(struct scsi_target
*STp
, int offset
)
2043 struct Scsi_Host
*SHp
= dev_to_shost(STp
->dev
.parent
);
2044 struct NCR_700_Host_Parameters
*hostdata
=
2045 (struct NCR_700_Host_Parameters
*)SHp
->hostdata
[0];
2046 int max_offset
= hostdata
->chip710
2047 ? NCR_710_MAX_OFFSET
: NCR_700_MAX_OFFSET
;
2052 if(offset
> max_offset
)
2053 offset
= max_offset
;
2055 /* if we're currently async, make sure the period is reasonable */
2056 if(spi_offset(STp
) == 0 && (spi_period(STp
) < hostdata
->min_period
||
2057 spi_period(STp
) > 0xff))
2058 spi_period(STp
) = hostdata
->min_period
;
2060 spi_offset(STp
) = offset
;
2061 spi_flags(STp
) &= ~(NCR_700_DEV_NEGOTIATED_SYNC
|
2062 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION
);
2063 spi_flags(STp
) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION
;
2067 NCR_700_slave_alloc(struct scsi_device
*SDp
)
2069 SDp
->hostdata
= kzalloc(sizeof(struct NCR_700_Device_Parameters
),
2079 NCR_700_slave_configure(struct scsi_device
*SDp
)
2081 struct NCR_700_Host_Parameters
*hostdata
=
2082 (struct NCR_700_Host_Parameters
*)SDp
->host
->hostdata
[0];
2084 /* to do here: allocate memory; build a queue_full list */
2085 if(SDp
->tagged_supported
) {
2086 scsi_set_tag_type(SDp
, MSG_ORDERED_TAG
);
2087 scsi_activate_tcq(SDp
, NCR_700_DEFAULT_TAGS
);
2088 NCR_700_set_tag_neg_state(SDp
, NCR_700_START_TAG_NEGOTIATION
);
2090 /* initialise to default depth */
2091 scsi_adjust_queue_depth(SDp
, 0, SDp
->host
->cmd_per_lun
);
2093 if(hostdata
->fast
) {
2094 /* Find the correct offset and period via domain validation */
2095 if (!spi_initial_dv(SDp
->sdev_target
))
2098 spi_offset(SDp
->sdev_target
) = 0;
2099 spi_period(SDp
->sdev_target
) = 0;
2105 NCR_700_slave_destroy(struct scsi_device
*SDp
)
2107 kfree(SDp
->hostdata
);
2108 SDp
->hostdata
= NULL
;
2112 NCR_700_change_queue_depth(struct scsi_device
*SDp
, int depth
)
2114 if (depth
> NCR_700_MAX_TAGS
)
2115 depth
= NCR_700_MAX_TAGS
;
2117 scsi_adjust_queue_depth(SDp
, scsi_get_tag_type(SDp
), depth
);
2121 static int NCR_700_change_queue_type(struct scsi_device
*SDp
, int tag_type
)
2123 int change_tag
= ((tag_type
==0 && scsi_get_tag_type(SDp
) != 0)
2124 || (tag_type
!= 0 && scsi_get_tag_type(SDp
) == 0));
2125 struct NCR_700_Host_Parameters
*hostdata
=
2126 (struct NCR_700_Host_Parameters
*)SDp
->host
->hostdata
[0];
2128 scsi_set_tag_type(SDp
, tag_type
);
2130 /* We have a global (per target) flag to track whether TCQ is
2131 * enabled, so we'll be turning it off for the entire target here.
2132 * our tag algorithm will fail if we mix tagged and untagged commands,
2133 * so quiesce the device before doing this */
2135 scsi_target_quiesce(SDp
->sdev_target
);
2138 /* shift back to the default unqueued number of commands
2139 * (the user can still raise this) */
2140 scsi_deactivate_tcq(SDp
, SDp
->host
->cmd_per_lun
);
2141 hostdata
->tag_negotiated
&= ~(1 << sdev_id(SDp
));
2143 /* Here, we cleared the negotiation flag above, so this
2144 * will force the driver to renegotiate */
2145 scsi_activate_tcq(SDp
, SDp
->queue_depth
);
2147 NCR_700_set_tag_neg_state(SDp
, NCR_700_START_TAG_NEGOTIATION
);
2150 scsi_target_resume(SDp
->sdev_target
);
2156 NCR_700_show_active_tags(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2158 struct scsi_device
*SDp
= to_scsi_device(dev
);
2160 return snprintf(buf
, 20, "%d\n", NCR_700_get_depth(SDp
));
2163 static struct device_attribute NCR_700_active_tags_attr
= {
2165 .name
= "active_tags",
2168 .show
= NCR_700_show_active_tags
,
2171 STATIC
struct device_attribute
*NCR_700_dev_attrs
[] = {
2172 &NCR_700_active_tags_attr
,
2176 EXPORT_SYMBOL(NCR_700_detect
);
2177 EXPORT_SYMBOL(NCR_700_release
);
2178 EXPORT_SYMBOL(NCR_700_intr
);
2180 static struct spi_function_template NCR_700_transport_functions
= {
2181 .set_period
= NCR_700_set_period
,
2183 .set_offset
= NCR_700_set_offset
,
2187 static int __init
NCR_700_init(void)
2189 NCR_700_transport_template
= spi_attach_transport(&NCR_700_transport_functions
);
2190 if(!NCR_700_transport_template
)
2195 static void __exit
NCR_700_exit(void)
2197 spi_release_transport(NCR_700_transport_template
);
2200 module_init(NCR_700_init
);
2201 module_exit(NCR_700_exit
);