Sync CAM with FreeBSD using lockmgr locks instead of mutexes.
[dragonfly.git] / sys / dev / disk / sym / sym_hipd.c
blob768ae2661fa2ef6b2b81bedde1d4f3f2b8b03859
1 /*
2 * Device driver optimized for the Symbios/LSI 53C896/53C895A/53C1010
3 * PCI-SCSI controllers.
5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
7 * This driver also supports the following Symbios/LSI PCI-SCSI chips:
8 * 53C810A, 53C825A, 53C860, 53C875, 53C876, 53C885, 53C895,
9 * 53C810, 53C815, 53C825 and the 53C1510D is 53C8XX mode.
12 * This driver for FreeBSD-CAM is derived from the Linux sym53c8xx driver.
13 * Copyright (C) 1998-1999 Gerard Roudier
15 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
16 * a port of the FreeBSD ncr driver to Linux-1.2.13.
18 * The original ncr driver has been written for 386bsd and FreeBSD by
19 * Wolfgang Stanglmeier <wolf@cologne.de>
20 * Stefan Esser <se@mi.Uni-Koeln.de>
21 * Copyright (C) 1994 Wolfgang Stanglmeier
23 * The initialisation code, and part of the code that addresses
24 * FreeBSD-CAM services is based on the aic7xxx driver for FreeBSD-CAM
25 * written by Justin T. Gibbs.
27 * Other major contributions:
29 * NVRAM detection and reading.
30 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
32 *-----------------------------------------------------------------------------
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. The name of the author may not be used to endorse or promote products
43 * derived from this software without specific prior written permission.
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
49 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
58 /* $FreeBSD: src/sys/dev/sym/sym_hipd.c,v 1.6.2.12 2001/12/02 19:01:10 groudier Exp $ */
59 /* $DragonFly: src/sys/dev/disk/sym/sym_hipd.c,v 1.24 2008/05/18 20:30:22 pavalos Exp $ */
61 #define SYM_DRIVER_NAME "sym-1.6.5-20000902"
63 /* #define SYM_DEBUG_GENERIC_SUPPORT */
65 #include "use_pci.h"
66 #include <sys/param.h>
69 * Only use the BUS stuff for PCI under FreeBSD 4 and later versions.
70 * Note that the old BUS stuff also works for FreeBSD 4 and spares
71 * about 1 KB for the driver object file.
73 #if defined(__DragonFly__) || __FreeBSD_version >= 400000
74 #define FreeBSD_Bus_Dma_Abstraction
75 #define FreeBSD_Bus_Io_Abstraction
76 #define FreeBSD_Bus_Space_Abstraction
77 #endif
80 * Driver configuration options.
82 #include "opt_sym.h"
83 #include "sym_conf.h"
85 #ifndef FreeBSD_Bus_Io_Abstraction
86 #include "use_ncr.h" /* To know if the ncr has been configured */
87 #endif
89 #include <sys/systm.h>
90 #include <sys/malloc.h>
91 #include <sys/kernel.h>
92 #ifdef FreeBSD_Bus_Io_Abstraction
93 #include <sys/module.h>
94 #include <sys/bus.h>
95 #include <sys/rman.h>
96 #endif
97 #include <sys/thread2.h>
99 #include <sys/proc.h>
101 #include <bus/pci/pcireg.h>
102 #include <bus/pci/pcivar.h>
104 #include <machine/clock.h>
106 #include <bus/cam/cam.h>
107 #include <bus/cam/cam_ccb.h>
108 #include <bus/cam/cam_sim.h>
109 #include <bus/cam/cam_xpt_sim.h>
110 #include <bus/cam/cam_debug.h>
112 #include <bus/cam/scsi/scsi_all.h>
113 #include <bus/cam/scsi/scsi_message.h>
115 #include <vm/vm.h>
116 #include <vm/vm_param.h>
117 #include <vm/pmap.h>
119 /* Short and quite clear integer types */
120 typedef int8_t s8;
121 typedef int16_t s16;
122 typedef int32_t s32;
123 typedef u_int8_t u8;
124 typedef u_int16_t u16;
125 typedef u_int32_t u32;
128 * Driver definitions.
130 #include "sym_defs.h"
131 #include "sym_fw.h"
134 * IA32 architecture does not reorder STORES and prevents
135 * LOADS from passing STORES. It is called `program order'
136 * by Intel and allows device drivers to deal with memory
137 * ordering by only ensuring that the code is not reordered
138 * by the compiler when ordering is required.
139 * Other architectures implement a weaker ordering that
140 * requires memory barriers (and also IO barriers when they
141 * make sense) to be used.
144 #if defined __i386__ || defined __amd64__
145 #define MEMORY_BARRIER() do { ; } while(0)
146 #elif defined __powerpc__
147 #define MEMORY_BARRIER() __asm__ volatile("eieio; sync" : : : "memory")
148 #elif defined __ia64__
149 #define MEMORY_BARRIER() __asm__ volatile("mf.a; mf" : : : "memory")
150 #elif defined __sparc64__
151 #define MEMORY_BARRIER() __asm__ volatile("membar #Sync" : : : "memory")
152 #else
153 #error "Not supported platform"
154 #endif
157 * Portable but silly implemented byte order primitives.
158 * We define the primitives we need, since FreeBSD doesn't
159 * seem to have them yet.
161 #if BYTE_ORDER == BIG_ENDIAN
163 #define __revb16(x) ( (((u16)(x) & (u16)0x00ffU) << 8) | \
164 (((u16)(x) & (u16)0xff00U) >> 8) )
165 #define __revb32(x) ( (((u32)(x) & 0x000000ffU) << 24) | \
166 (((u32)(x) & 0x0000ff00U) << 8) | \
167 (((u32)(x) & 0x00ff0000U) >> 8) | \
168 (((u32)(x) & 0xff000000U) >> 24) )
170 #define __htole16(v) __revb16(v)
171 #define __htole32(v) __revb32(v)
172 #define __le16toh(v) __htole16(v)
173 #define __le32toh(v) __htole32(v)
175 static __inline u16 _htole16(u16 v) { return __htole16(v); }
176 static __inline u32 _htole32(u32 v) { return __htole32(v); }
177 #define _le16toh _htole16
178 #define _le32toh _htole32
180 #else /* LITTLE ENDIAN */
182 #define __htole16(v) (v)
183 #define __htole32(v) (v)
184 #define __le16toh(v) (v)
185 #define __le32toh(v) (v)
187 #define _htole16(v) (v)
188 #define _htole32(v) (v)
189 #define _le16toh(v) (v)
190 #define _le32toh(v) (v)
192 #endif /* BYTE_ORDER */
195 * A la VMS/CAM-3 queue management.
198 typedef struct sym_quehead {
199 struct sym_quehead *flink; /* Forward pointer */
200 struct sym_quehead *blink; /* Backward pointer */
201 } SYM_QUEHEAD;
203 #define sym_que_init(ptr) do { \
204 (ptr)->flink = (ptr); (ptr)->blink = (ptr); \
205 } while (0)
207 static __inline struct sym_quehead *sym_que_first(struct sym_quehead *head)
209 return (head->flink == head) ? 0 : head->flink;
212 static __inline struct sym_quehead *sym_que_last(struct sym_quehead *head)
214 return (head->blink == head) ? 0 : head->blink;
217 static __inline void __sym_que_add(struct sym_quehead * new,
218 struct sym_quehead * blink,
219 struct sym_quehead * flink)
221 flink->blink = new;
222 new->flink = flink;
223 new->blink = blink;
224 blink->flink = new;
227 static __inline void __sym_que_del(struct sym_quehead * blink,
228 struct sym_quehead * flink)
230 flink->blink = blink;
231 blink->flink = flink;
234 static __inline int sym_que_empty(struct sym_quehead *head)
236 return head->flink == head;
239 static __inline void sym_que_splice(struct sym_quehead *list,
240 struct sym_quehead *head)
242 struct sym_quehead *first = list->flink;
244 if (first != list) {
245 struct sym_quehead *last = list->blink;
246 struct sym_quehead *at = head->flink;
248 first->blink = head;
249 head->flink = first;
251 last->flink = at;
252 at->blink = last;
256 #define sym_que_entry(ptr, type, member) \
257 ((type *)((char *)(ptr)-(unsigned int)(&((type *)0)->member)))
260 #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink)
262 #define sym_remque(el) __sym_que_del((el)->blink, (el)->flink)
264 #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink)
266 static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head)
268 struct sym_quehead *elem = head->flink;
270 if (elem != head)
271 __sym_que_del(head, elem->flink);
272 else
273 elem = 0;
274 return elem;
277 #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head)
279 static __inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head)
281 struct sym_quehead *elem = head->blink;
283 if (elem != head)
284 __sym_que_del(elem->blink, head);
285 else
286 elem = 0;
287 return elem;
291 * This one may be useful.
293 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \
294 for (qp = (head)->flink; qp != (head); qp = qp->flink)
296 * FreeBSD does not offer our kind of queue in the CAM CCB.
297 * So, we have to cast.
299 #define sym_qptr(p) ((struct sym_quehead *) (p))
302 * Simple bitmap operations.
304 #define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f)))
305 #define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f)))
306 #define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f)))
309 * Number of tasks per device we want to handle.
311 #if SYM_CONF_MAX_TAG_ORDER > 8
312 #error "more than 256 tags per logical unit not allowed."
313 #endif
314 #define SYM_CONF_MAX_TASK (1<<SYM_CONF_MAX_TAG_ORDER)
317 * Donnot use more tasks that we can handle.
319 #ifndef SYM_CONF_MAX_TAG
320 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
321 #endif
322 #if SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK
323 #undef SYM_CONF_MAX_TAG
324 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
325 #endif
328 * This one means 'NO TAG for this job'
330 #define NO_TAG (256)
333 * Number of SCSI targets.
335 #if SYM_CONF_MAX_TARGET > 16
336 #error "more than 16 targets not allowed."
337 #endif
340 * Number of logical units per target.
342 #if SYM_CONF_MAX_LUN > 64
343 #error "more than 64 logical units per target not allowed."
344 #endif
347 * Asynchronous pre-scaler (ns). Shall be 40 for
348 * the SCSI timings to be compliant.
350 #define SYM_CONF_MIN_ASYNC (40)
353 * Number of entries in the START and DONE queues.
355 * We limit to 1 PAGE in order to succeed allocation of
356 * these queues. Each entry is 8 bytes long (2 DWORDS).
358 #ifdef SYM_CONF_MAX_START
359 #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2)
360 #else
361 #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2)
362 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
363 #endif
365 #if SYM_CONF_MAX_QUEUE > PAGE_SIZE/8
366 #undef SYM_CONF_MAX_QUEUE
367 #define SYM_CONF_MAX_QUEUE PAGE_SIZE/8
368 #undef SYM_CONF_MAX_START
369 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
370 #endif
373 * For this one, we want a short name :-)
375 #define MAX_QUEUE SYM_CONF_MAX_QUEUE
378 * Active debugging tags and verbosity.
380 #define DEBUG_ALLOC (0x0001)
381 #define DEBUG_PHASE (0x0002)
382 #define DEBUG_POLL (0x0004)
383 #define DEBUG_QUEUE (0x0008)
384 #define DEBUG_RESULT (0x0010)
385 #define DEBUG_SCATTER (0x0020)
386 #define DEBUG_SCRIPT (0x0040)
387 #define DEBUG_TINY (0x0080)
388 #define DEBUG_TIMING (0x0100)
389 #define DEBUG_NEGO (0x0200)
390 #define DEBUG_TAGS (0x0400)
391 #define DEBUG_POINTER (0x0800)
393 #if 0
394 static int sym_debug = 0;
395 #define DEBUG_FLAGS sym_debug
396 #else
397 /* #define DEBUG_FLAGS (0x0631) */
398 #define DEBUG_FLAGS (0x0000)
400 #endif
401 #define sym_verbose (np->verbose)
404 * Insert a delay in micro-seconds and milli-seconds.
406 static void UDELAY(int us) { DELAY(us); }
407 static void MDELAY(int ms) { while (ms--) UDELAY(1000); }
410 * Simple power of two buddy-like allocator.
412 * This simple code is not intended to be fast, but to
413 * provide power of 2 aligned memory allocations.
414 * Since the SCRIPTS processor only supplies 8 bit arithmetic,
415 * this allocator allows simple and fast address calculations
416 * from the SCRIPTS code. In addition, cache line alignment
417 * is guaranteed for power of 2 cache line size.
419 * This allocator has been developped for the Linux sym53c8xx
420 * driver, since this O/S does not provide naturally aligned
421 * allocations.
422 * It has the advantage of allowing the driver to use private
423 * pages of memory that will be useful if we ever need to deal
424 * with IO MMUs for PCI.
427 #define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */
428 #define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */
429 #if 0
430 #define MEMO_FREE_UNUSED /* Free unused pages immediately */
431 #endif
432 #define MEMO_WARN 1
433 #define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)
434 #define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)
435 #define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)
437 #define get_pages() kmalloc(MEMO_CLUSTER_SIZE, M_DEVBUF, M_INTWAIT)
438 #define free_pages(p) kfree((p), M_DEVBUF)
440 typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */
442 typedef struct m_link { /* Link between free memory chunks */
443 struct m_link *next;
444 } m_link_s;
446 #ifdef FreeBSD_Bus_Dma_Abstraction
447 typedef struct m_vtob { /* Virtual to Bus address translation */
448 struct m_vtob *next;
449 bus_dmamap_t dmamap; /* Map for this chunk */
450 m_addr_t vaddr; /* Virtual address */
451 m_addr_t baddr; /* Bus physical address */
452 } m_vtob_s;
453 /* Hash this stuff a bit to speed up translations */
454 #define VTOB_HASH_SHIFT 5
455 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
456 #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
457 #define VTOB_HASH_CODE(m) \
458 ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
459 #endif
461 typedef struct m_pool { /* Memory pool of a given kind */
462 #ifdef FreeBSD_Bus_Dma_Abstraction
463 bus_dma_tag_t dev_dmat; /* Identifies the pool */
464 bus_dma_tag_t dmat; /* Tag for our fixed allocations */
465 m_addr_t (*getp)(struct m_pool *);
466 #ifdef MEMO_FREE_UNUSED
467 void (*freep)(struct m_pool *, m_addr_t);
468 #endif
469 #define M_GETP() mp->getp(mp)
470 #define M_FREEP(p) mp->freep(mp, p)
471 int nump;
472 m_vtob_s *(vtob[VTOB_HASH_SIZE]);
473 struct m_pool *next;
474 #else
475 #define M_GETP() get_pages()
476 #define M_FREEP(p) free_pages(p)
477 #endif /* FreeBSD_Bus_Dma_Abstraction */
478 struct m_link h[MEMO_CLUSTER_SHIFT - MEMO_SHIFT + 1];
479 } m_pool_s;
481 static void *___sym_malloc(m_pool_s *mp, int size)
483 int i = 0;
484 int s = (1 << MEMO_SHIFT);
485 int j;
486 m_addr_t a;
487 m_link_s *h = mp->h;
489 if (size > MEMO_CLUSTER_SIZE)
490 return 0;
492 while (size > s) {
493 s <<= 1;
494 ++i;
497 j = i;
498 while (!h[j].next) {
499 if (s == MEMO_CLUSTER_SIZE) {
500 h[j].next = (m_link_s *) M_GETP();
501 if (h[j].next)
502 h[j].next->next = 0;
503 break;
505 ++j;
506 s <<= 1;
508 a = (m_addr_t) h[j].next;
509 if (a) {
510 h[j].next = h[j].next->next;
511 while (j > i) {
512 j -= 1;
513 s >>= 1;
514 h[j].next = (m_link_s *) (a+s);
515 h[j].next->next = 0;
518 #ifdef DEBUG
519 kprintf("___sym_malloc(%d) = %p\n", size, (void *) a);
520 #endif
521 return (void *) a;
524 static void ___sym_mfree(m_pool_s *mp, void *ptr, int size)
526 int i = 0;
527 int s = (1 << MEMO_SHIFT);
528 m_link_s *q;
529 m_addr_t a, b;
530 m_link_s *h = mp->h;
532 #ifdef DEBUG
533 kprintf("___sym_mfree(%p, %d)\n", ptr, size);
534 #endif
536 if (size > MEMO_CLUSTER_SIZE)
537 return;
539 while (size > s) {
540 s <<= 1;
541 ++i;
544 a = (m_addr_t) ptr;
546 while (1) {
547 #ifdef MEMO_FREE_UNUSED
548 if (s == MEMO_CLUSTER_SIZE) {
549 M_FREEP(a);
550 break;
552 #endif
553 b = a ^ s;
554 q = &h[i];
555 while (q->next && q->next != (m_link_s *) b) {
556 q = q->next;
558 if (!q->next) {
559 ((m_link_s *) a)->next = h[i].next;
560 h[i].next = (m_link_s *) a;
561 break;
563 q->next = q->next->next;
564 a = a & b;
565 s <<= 1;
566 ++i;
570 static void *__sym_calloc2(m_pool_s *mp, int size, char *name, int uflags)
572 void *p;
574 p = ___sym_malloc(mp, size);
576 if (DEBUG_FLAGS & DEBUG_ALLOC)
577 kprintf ("new %-10s[%4d] @%p.\n", name, size, p);
579 if (p)
580 bzero(p, size);
581 else if (uflags & MEMO_WARN)
582 kprintf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size);
584 return p;
587 #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, MEMO_WARN)
589 static void __sym_mfree(m_pool_s *mp, void *ptr, int size, char *name)
591 if (DEBUG_FLAGS & DEBUG_ALLOC)
592 kprintf ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
594 ___sym_mfree(mp, ptr, size);
599 * Default memory pool we donnot need to involve in DMA.
601 #ifndef FreeBSD_Bus_Dma_Abstraction
603 * Without the `bus dma abstraction', all the memory is assumed
604 * DMAable and a single pool is all what we need.
606 static m_pool_s mp0;
608 #else
610 * With the `bus dma abstraction', we use a separate pool for
611 * memory we donnot need to involve in DMA.
613 static m_addr_t ___mp0_getp(m_pool_s *mp)
615 m_addr_t m = (m_addr_t) get_pages();
616 if (m)
617 ++mp->nump;
618 return m;
621 #ifdef MEMO_FREE_UNUSED
622 static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
624 free_pages(m);
625 --mp->nump;
627 #endif
629 #ifdef MEMO_FREE_UNUSED
630 static m_pool_s mp0 = {0, 0, ___mp0_getp, ___mp0_freep};
631 #else
632 static m_pool_s mp0 = {0, 0, ___mp0_getp};
633 #endif
635 #endif /* FreeBSD_Bus_Dma_Abstraction */
638 * Actual memory allocation routine for non-DMAed memory.
640 static void *sym_calloc(int size, char *name)
642 void *m;
643 /* Lock */
644 m = __sym_calloc(&mp0, size, name);
645 /* Unlock */
646 return m;
650 * Actual memory allocation routine for non-DMAed memory.
652 static void sym_mfree(void *ptr, int size, char *name)
654 /* Lock */
655 __sym_mfree(&mp0, ptr, size, name);
656 /* Unlock */
660 * DMAable pools.
662 #ifndef FreeBSD_Bus_Dma_Abstraction
664 * Without `bus dma abstraction', all the memory is DMAable, and
665 * only a single pool is needed (vtophys() is our friend).
667 #define __sym_calloc_dma(b, s, n) sym_calloc(s, n)
668 #define __sym_mfree_dma(b, p, s, n) sym_mfree(p, s, n)
669 #define __vtobus(b, p) vtophys(p)
671 #else
673 * With `bus dma abstraction', we use a separate pool per parent
674 * BUS handle. A reverse table (hashed) is maintained for virtual
675 * to BUS address translation.
677 static void getbaddrcb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
679 bus_addr_t *baddr;
680 baddr = (bus_addr_t *)arg;
681 *baddr = segs->ds_addr;
684 static m_addr_t ___dma_getp(m_pool_s *mp)
686 m_vtob_s *vbp;
687 void *vaddr = 0;
688 bus_addr_t baddr = 0;
690 vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB");
691 if (!vbp)
692 goto out_err;
694 if (bus_dmamem_alloc(mp->dmat, &vaddr,
695 BUS_DMA_NOWAIT, &vbp->dmamap))
696 goto out_err;
697 bus_dmamap_load(mp->dmat, vbp->dmamap, vaddr,
698 MEMO_CLUSTER_SIZE, getbaddrcb, &baddr, 0);
699 if (baddr) {
700 int hc = VTOB_HASH_CODE(vaddr);
701 vbp->vaddr = (m_addr_t) vaddr;
702 vbp->baddr = (m_addr_t) baddr;
703 vbp->next = mp->vtob[hc];
704 mp->vtob[hc] = vbp;
705 ++mp->nump;
706 return (m_addr_t) vaddr;
708 out_err:
709 if (baddr)
710 bus_dmamap_unload(mp->dmat, vbp->dmamap);
711 if (vaddr)
712 bus_dmamem_free(mp->dmat, vaddr, vbp->dmamap);
713 if (vbp->dmamap)
714 bus_dmamap_destroy(mp->dmat, vbp->dmamap);
715 if (vbp)
716 __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
717 return 0;
720 #ifdef MEMO_FREE_UNUSED
721 static void ___dma_freep(m_pool_s *mp, m_addr_t m)
723 m_vtob_s **vbpp, *vbp;
724 int hc = VTOB_HASH_CODE(m);
726 vbpp = &mp->vtob[hc];
727 while (*vbpp && (*vbpp)->vaddr != m)
728 vbpp = &(*vbpp)->next;
729 if (*vbpp) {
730 vbp = *vbpp;
731 *vbpp = (*vbpp)->next;
732 bus_dmamap_unload(mp->dmat, vbp->dmamap);
733 bus_dmamem_free(mp->dmat, (void *) vbp->vaddr, vbp->dmamap);
734 bus_dmamap_destroy(mp->dmat, vbp->dmamap);
735 __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
736 --mp->nump;
739 #endif
741 static __inline m_pool_s *___get_dma_pool(bus_dma_tag_t dev_dmat)
743 m_pool_s *mp;
744 for (mp = mp0.next; mp && mp->dev_dmat != dev_dmat; mp = mp->next);
745 return mp;
748 static m_pool_s *___cre_dma_pool(bus_dma_tag_t dev_dmat)
750 m_pool_s *mp = 0;
752 mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL");
753 if (mp) {
754 mp->dev_dmat = dev_dmat;
755 if (!bus_dma_tag_create(dev_dmat, 1, MEMO_CLUSTER_SIZE,
756 BUS_SPACE_MAXADDR_32BIT,
757 BUS_SPACE_MAXADDR_32BIT,
758 NULL, NULL, MEMO_CLUSTER_SIZE, 1,
759 MEMO_CLUSTER_SIZE, 0, &mp->dmat)) {
760 mp->getp = ___dma_getp;
761 #ifdef MEMO_FREE_UNUSED
762 mp->freep = ___dma_freep;
763 #endif
764 mp->next = mp0.next;
765 mp0.next = mp;
766 return mp;
769 if (mp)
770 __sym_mfree(&mp0, mp, sizeof(*mp), "MPOOL");
771 return 0;
774 #ifdef MEMO_FREE_UNUSED
775 static void ___del_dma_pool(m_pool_s *p)
777 struct m_pool **pp = &mp0.next;
779 while (*pp && *pp != p)
780 pp = &(*pp)->next;
781 if (*pp) {
782 *pp = (*pp)->next;
783 bus_dma_tag_destroy(p->dmat);
784 __sym_mfree(&mp0, p, sizeof(*p), "MPOOL");
787 #endif
789 static void *__sym_calloc_dma(bus_dma_tag_t dev_dmat, int size, char *name)
791 struct m_pool *mp;
792 void *m = 0;
794 /* Lock */
795 mp = ___get_dma_pool(dev_dmat);
796 if (!mp)
797 mp = ___cre_dma_pool(dev_dmat);
798 if (mp)
799 m = __sym_calloc(mp, size, name);
800 #ifdef MEMO_FREE_UNUSED
801 if (mp && !mp->nump)
802 ___del_dma_pool(mp);
803 #endif
804 /* Unlock */
806 return m;
809 static void
810 __sym_mfree_dma(bus_dma_tag_t dev_dmat, void *m, int size, char *name)
812 struct m_pool *mp;
814 /* Lock */
815 mp = ___get_dma_pool(dev_dmat);
816 if (mp)
817 __sym_mfree(mp, m, size, name);
818 #ifdef MEMO_FREE_UNUSED
819 if (mp && !mp->nump)
820 ___del_dma_pool(mp);
821 #endif
822 /* Unlock */
825 static m_addr_t __vtobus(bus_dma_tag_t dev_dmat, void *m)
827 m_pool_s *mp;
828 int hc = VTOB_HASH_CODE(m);
829 m_vtob_s *vp = 0;
830 m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
832 /* Lock */
833 mp = ___get_dma_pool(dev_dmat);
834 if (mp) {
835 vp = mp->vtob[hc];
836 while (vp && (m_addr_t) vp->vaddr != a)
837 vp = vp->next;
839 /* Unlock */
840 if (!vp)
841 panic("sym: VTOBUS FAILED!\n");
842 return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
845 #endif /* FreeBSD_Bus_Dma_Abstraction */
848 * Verbs for DMAable memory handling.
849 * The _uvptv_ macro avoids a nasty warning about pointer to volatile
850 * being discarded.
852 #define _uvptv_(p) ((void *)((vm_offset_t)(p)))
853 #define _sym_calloc_dma(np, s, n) __sym_calloc_dma(np->bus_dmat, s, n)
854 #define _sym_mfree_dma(np, p, s, n) \
855 __sym_mfree_dma(np->bus_dmat, _uvptv_(p), s, n)
856 #define sym_calloc_dma(s, n) _sym_calloc_dma(np, s, n)
857 #define sym_mfree_dma(p, s, n) _sym_mfree_dma(np, p, s, n)
858 #define _vtobus(np, p) __vtobus(np->bus_dmat, _uvptv_(p))
859 #define vtobus(p) _vtobus(np, p)
863 * Print a buffer in hexadecimal format.
865 static void sym_printb_hex (u_char *p, int n)
867 while (n-- > 0)
868 kprintf (" %x", *p++);
872 * Same with a label at beginning and .\n at end.
874 static void sym_printl_hex (char *label, u_char *p, int n)
876 kprintf ("%s", label);
877 sym_printb_hex (p, n);
878 kprintf (".\n");
882 * Return a string for SCSI BUS mode.
884 static char *sym_scsi_bus_mode(int mode)
886 switch(mode) {
887 case SMODE_HVD: return "HVD";
888 case SMODE_SE: return "SE";
889 case SMODE_LVD: return "LVD";
891 return "??";
895 * Some poor and bogus sync table that refers to Tekram NVRAM layout.
897 #ifdef SYM_CONF_NVRAM_SUPPORT
898 static u_char Tekram_sync[16] =
899 {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10};
900 #endif
903 * Union of supported NVRAM formats.
905 struct sym_nvram {
906 int type;
907 #define SYM_SYMBIOS_NVRAM (1)
908 #define SYM_TEKRAM_NVRAM (2)
909 #ifdef SYM_CONF_NVRAM_SUPPORT
910 union {
911 Symbios_nvram Symbios;
912 Tekram_nvram Tekram;
913 } data;
914 #endif
918 * This one is hopefully useless, but actually useful. :-)
920 #ifndef assert
921 #define assert(expression) { \
922 if (!(expression)) { \
923 (void)panic( \
924 "assertion \"%s\" failed: file \"%s\", line %d\n", \
925 #expression, \
926 __FILE__, __LINE__); \
929 #endif
932 * Some provision for a possible big endian mode supported by
933 * Symbios chips (never seen, by the way).
934 * For now, this stuff does not deserve any comments. :)
937 #define sym_offb(o) (o)
938 #define sym_offw(o) (o)
941 * Some provision for support for BIG ENDIAN CPU.
942 * Btw, FreeBSD does not seem to be ready yet for big endian.
945 #if BYTE_ORDER == BIG_ENDIAN
946 #define cpu_to_scr(dw) _htole32(dw)
947 #define scr_to_cpu(dw) _le32toh(dw)
948 #else
949 #define cpu_to_scr(dw) (dw)
950 #define scr_to_cpu(dw) (dw)
951 #endif
954 * Access to the chip IO registers and on-chip RAM.
955 * We use the `bus space' interface under FreeBSD-4 and
956 * later kernel versions.
959 #ifdef FreeBSD_Bus_Space_Abstraction
961 #if defined(SYM_CONF_IOMAPPED)
963 #define INB_OFF(o) bus_space_read_1(np->io_tag, np->io_bsh, o)
964 #define INW_OFF(o) bus_space_read_2(np->io_tag, np->io_bsh, o)
965 #define INL_OFF(o) bus_space_read_4(np->io_tag, np->io_bsh, o)
967 #define OUTB_OFF(o, v) bus_space_write_1(np->io_tag, np->io_bsh, o, (v))
968 #define OUTW_OFF(o, v) bus_space_write_2(np->io_tag, np->io_bsh, o, (v))
969 #define OUTL_OFF(o, v) bus_space_write_4(np->io_tag, np->io_bsh, o, (v))
971 #else /* Memory mapped IO */
973 #define INB_OFF(o) bus_space_read_1(np->mmio_tag, np->mmio_bsh, o)
974 #define INW_OFF(o) bus_space_read_2(np->mmio_tag, np->mmio_bsh, o)
975 #define INL_OFF(o) bus_space_read_4(np->mmio_tag, np->mmio_bsh, o)
977 #define OUTB_OFF(o, v) bus_space_write_1(np->mmio_tag, np->mmio_bsh, o, (v))
978 #define OUTW_OFF(o, v) bus_space_write_2(np->mmio_tag, np->mmio_bsh, o, (v))
979 #define OUTL_OFF(o, v) bus_space_write_4(np->mmio_tag, np->mmio_bsh, o, (v))
981 #endif /* SYM_CONF_IOMAPPED */
983 #define OUTRAM_OFF(o, a, l) \
984 bus_space_write_region_1(np->ram_tag, np->ram_bsh, o, (a), (l))
986 #else /* not defined FreeBSD_Bus_Space_Abstraction */
988 #if BYTE_ORDER == BIG_ENDIAN
989 #error "BIG ENDIAN support requires bus space kernel interface"
990 #endif
993 * Access to the chip IO registers and on-chip RAM.
994 * We use legacy MMIO and IO interface for FreeBSD 3.X versions.
998 * Define some understable verbs for IO and MMIO.
1000 #define io_read8(p) scr_to_cpu(inb((p)))
1001 #define io_read16(p) scr_to_cpu(inw((p)))
1002 #define io_read32(p) scr_to_cpu(inl((p)))
1003 #define io_write8(p, v) outb((p), cpu_to_scr(v))
1004 #define io_write16(p, v) outw((p), cpu_to_scr(v))
1005 #define io_write32(p, v) outl((p), cpu_to_scr(v))
1007 #define mmio_read8(a) scr_to_cpu((*(volatile unsigned char *) (a)))
1008 #define mmio_read16(a) scr_to_cpu((*(volatile unsigned short *) (a)))
1009 #define mmio_read32(a) scr_to_cpu((*(volatile unsigned int *) (a)))
1010 #define mmio_write8(a, b) (*(volatile unsigned char *) (a)) = cpu_to_scr(b)
1011 #define mmio_write16(a, b) (*(volatile unsigned short *) (a)) = cpu_to_scr(b)
1012 #define mmio_write32(a, b) (*(volatile unsigned int *) (a)) = cpu_to_scr(b)
1013 #define memcpy_to_pci(d, s, n) bcopy((s), (void *)(d), (n))
1016 * Normal IO
1018 #if defined(SYM_CONF_IOMAPPED)
1020 #define INB_OFF(o) io_read8(np->io_port + sym_offb(o))
1021 #define OUTB_OFF(o, v) io_write8(np->io_port + sym_offb(o), (v))
1023 #define INW_OFF(o) io_read16(np->io_port + sym_offw(o))
1024 #define OUTW_OFF(o, v) io_write16(np->io_port + sym_offw(o), (v))
1026 #define INL_OFF(o) io_read32(np->io_port + (o))
1027 #define OUTL_OFF(o, v) io_write32(np->io_port + (o), (v))
1029 #else /* Memory mapped IO */
1031 #define INB_OFF(o) mmio_read8(np->mmio_va + sym_offb(o))
1032 #define OUTB_OFF(o, v) mmio_write8(np->mmio_va + sym_offb(o), (v))
1034 #define INW_OFF(o) mmio_read16(np->mmio_va + sym_offw(o))
1035 #define OUTW_OFF(o, v) mmio_write16(np->mmio_va + sym_offw(o), (v))
1037 #define INL_OFF(o) mmio_read32(np->mmio_va + (o))
1038 #define OUTL_OFF(o, v) mmio_write32(np->mmio_va + (o), (v))
1040 #endif
1042 #define OUTRAM_OFF(o, a, l) memcpy_to_pci(np->ram_va + (o), (a), (l))
1044 #endif /* FreeBSD_Bus_Space_Abstraction */
1047 * Common definitions for both bus space and legacy IO methods.
1049 #define INB(r) INB_OFF(offsetof(struct sym_reg,r))
1050 #define INW(r) INW_OFF(offsetof(struct sym_reg,r))
1051 #define INL(r) INL_OFF(offsetof(struct sym_reg,r))
1053 #define OUTB(r, v) OUTB_OFF(offsetof(struct sym_reg,r), (v))
1054 #define OUTW(r, v) OUTW_OFF(offsetof(struct sym_reg,r), (v))
1055 #define OUTL(r, v) OUTL_OFF(offsetof(struct sym_reg,r), (v))
1057 #define OUTONB(r, m) OUTB(r, INB(r) | (m))
1058 #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m))
1059 #define OUTONW(r, m) OUTW(r, INW(r) | (m))
1060 #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m))
1061 #define OUTONL(r, m) OUTL(r, INL(r) | (m))
1062 #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m))
1065 * We normally want the chip to have a consistent view
1066 * of driver internal data structures when we restart it.
1067 * Thus these macros.
1069 #define OUTL_DSP(v) \
1070 do { \
1071 MEMORY_BARRIER(); \
1072 OUTL (nc_dsp, (v)); \
1073 } while (0)
1075 #define OUTONB_STD() \
1076 do { \
1077 MEMORY_BARRIER(); \
1078 OUTONB (nc_dcntl, (STD|NOCOM)); \
1079 } while (0)
1082 * Command control block states.
1084 #define HS_IDLE (0)
1085 #define HS_BUSY (1)
1086 #define HS_NEGOTIATE (2) /* sync/wide data transfer*/
1087 #define HS_DISCONNECT (3) /* Disconnected by target */
1088 #define HS_WAIT (4) /* waiting for resource */
1090 #define HS_DONEMASK (0x80)
1091 #define HS_COMPLETE (4|HS_DONEMASK)
1092 #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */
1093 #define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */
1094 #define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */
1097 * Software Interrupt Codes
1099 #define SIR_BAD_SCSI_STATUS (1)
1100 #define SIR_SEL_ATN_NO_MSG_OUT (2)
1101 #define SIR_MSG_RECEIVED (3)
1102 #define SIR_MSG_WEIRD (4)
1103 #define SIR_NEGO_FAILED (5)
1104 #define SIR_NEGO_PROTO (6)
1105 #define SIR_SCRIPT_STOPPED (7)
1106 #define SIR_REJECT_TO_SEND (8)
1107 #define SIR_SWIDE_OVERRUN (9)
1108 #define SIR_SODL_UNDERRUN (10)
1109 #define SIR_RESEL_NO_MSG_IN (11)
1110 #define SIR_RESEL_NO_IDENTIFY (12)
1111 #define SIR_RESEL_BAD_LUN (13)
1112 #define SIR_TARGET_SELECTED (14)
1113 #define SIR_RESEL_BAD_I_T_L (15)
1114 #define SIR_RESEL_BAD_I_T_L_Q (16)
1115 #define SIR_ABORT_SENT (17)
1116 #define SIR_RESEL_ABORTED (18)
1117 #define SIR_MSG_OUT_DONE (19)
1118 #define SIR_COMPLETE_ERROR (20)
1119 #define SIR_DATA_OVERRUN (21)
1120 #define SIR_BAD_PHASE (22)
1121 #define SIR_MAX (22)
1124 * Extended error bit codes.
1125 * xerr_status field of struct sym_ccb.
1127 #define XE_EXTRA_DATA (1) /* unexpected data phase */
1128 #define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */
1129 #define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */
1130 #define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */
1131 #define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */
1134 * Negotiation status.
1135 * nego_status field of struct sym_ccb.
1137 #define NS_SYNC (1)
1138 #define NS_WIDE (2)
1139 #define NS_PPR (3)
1142 * A CCB hashed table is used to retrieve CCB address
1143 * from DSA value.
1145 #define CCB_HASH_SHIFT 8
1146 #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT)
1147 #define CCB_HASH_MASK (CCB_HASH_SIZE-1)
1148 #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK)
1151 * Device flags.
1153 #define SYM_DISC_ENABLED (1)
1154 #define SYM_TAGS_ENABLED (1<<1)
1155 #define SYM_SCAN_BOOT_DISABLED (1<<2)
1156 #define SYM_SCAN_LUNS_DISABLED (1<<3)
1159 * Host adapter miscellaneous flags.
1161 #define SYM_AVOID_BUS_RESET (1)
1162 #define SYM_SCAN_TARGETS_HILO (1<<1)
1165 * Device quirks.
1166 * Some devices, for example the CHEETAH 2 LVD, disconnects without
1167 * saving the DATA POINTER then reselects and terminates the IO.
1168 * On reselection, the automatic RESTORE DATA POINTER makes the
1169 * CURRENT DATA POINTER not point at the end of the IO.
1170 * This behaviour just breaks our calculation of the residual.
1171 * For now, we just force an AUTO SAVE on disconnection and will
1172 * fix that in a further driver version.
1174 #define SYM_QUIRK_AUTOSAVE 1
1177 * Misc.
1179 #define SYM_SNOOP_TIMEOUT (10000000)
1180 #define SYM_PCI_IO PCIR_MAPS
1181 #define SYM_PCI_MMIO (PCIR_MAPS + 4)
1182 #define SYM_PCI_RAM (PCIR_MAPS + 8)
1183 #define SYM_PCI_RAM64 (PCIR_MAPS + 12)
1186 * Back-pointer from the CAM CCB to our data structures.
1188 #define sym_hcb_ptr spriv_ptr0
1189 /* #define sym_ccb_ptr spriv_ptr1 */
1192 * We mostly have to deal with pointers.
1193 * Thus these typedef's.
1195 typedef struct sym_tcb *tcb_p;
1196 typedef struct sym_lcb *lcb_p;
1197 typedef struct sym_ccb *ccb_p;
1198 typedef struct sym_hcb *hcb_p;
1201 * Gather negotiable parameters value
1203 struct sym_trans {
1204 u8 scsi_version;
1205 u8 spi_version;
1206 u8 period;
1207 u8 offset;
1208 u8 width;
1209 u8 options; /* PPR options */
1212 struct sym_tinfo {
1213 struct sym_trans current;
1214 struct sym_trans goal;
1215 struct sym_trans user;
1218 #define BUS_8_BIT MSG_EXT_WDTR_BUS_8_BIT
1219 #define BUS_16_BIT MSG_EXT_WDTR_BUS_16_BIT
1222 * Global TCB HEADER.
1224 * Due to lack of indirect addressing on earlier NCR chips,
1225 * this substructure is copied from the TCB to a global
1226 * address after selection.
1227 * For SYMBIOS chips that support LOAD/STORE this copy is
1228 * not needed and thus not performed.
1230 struct sym_tcbh {
1232 * Scripts bus addresses of LUN table accessed from scripts.
1233 * LUN #0 is a special case, since multi-lun devices are rare,
1234 * and we we want to speed-up the general case and not waste
1235 * resources.
1237 u32 luntbl_sa; /* bus address of this table */
1238 u32 lun0_sa; /* bus address of LCB #0 */
1240 * Actual SYNC/WIDE IO registers value for this target.
1241 * 'sval', 'wval' and 'uval' are read from SCRIPTS and
1242 * so have alignment constraints.
1244 /*0*/ u_char uval; /* -> SCNTL4 register */
1245 /*1*/ u_char sval; /* -> SXFER io register */
1246 /*2*/ u_char filler1;
1247 /*3*/ u_char wval; /* -> SCNTL3 io register */
1251 * Target Control Block
1253 struct sym_tcb {
1255 * TCB header.
1256 * Assumed at offset 0.
1258 /*0*/ struct sym_tcbh head;
1261 * LUN table used by the SCRIPTS processor.
1262 * An array of bus addresses is used on reselection.
1264 u32 *luntbl; /* LCBs bus address table */
1267 * LUN table used by the C code.
1269 lcb_p lun0p; /* LCB of LUN #0 (usual case) */
1270 #if SYM_CONF_MAX_LUN > 1
1271 lcb_p *lunmp; /* Other LCBs [1..MAX_LUN] */
1272 #endif
1275 * Bitmap that tells about LUNs that succeeded at least
1276 * 1 IO and therefore assumed to be a real device.
1277 * Avoid useless allocation of the LCB structure.
1279 u32 lun_map[(SYM_CONF_MAX_LUN+31)/32];
1282 * Bitmap that tells about LUNs that haven't yet an LCB
1283 * allocated (not discovered or LCB allocation failed).
1285 u32 busy0_map[(SYM_CONF_MAX_LUN+31)/32];
1288 * Transfer capabilities (SIP)
1290 struct sym_tinfo tinfo;
1293 * Keep track of the CCB used for the negotiation in order
1294 * to ensure that only 1 negotiation is queued at a time.
1296 ccb_p nego_cp; /* CCB used for the nego */
1299 * Set when we want to reset the device.
1301 u_char to_reset;
1304 * Other user settable limits and options.
1305 * These limits are read from the NVRAM if present.
1307 u_char usrflags;
1308 u_short usrtags;
1312 * Global LCB HEADER.
1314 * Due to lack of indirect addressing on earlier NCR chips,
1315 * this substructure is copied from the LCB to a global
1316 * address after selection.
1317 * For SYMBIOS chips that support LOAD/STORE this copy is
1318 * not needed and thus not performed.
1320 struct sym_lcbh {
1322 * SCRIPTS address jumped by SCRIPTS on reselection.
1323 * For not probed logical units, this address points to
1324 * SCRIPTS that deal with bad LU handling (must be at
1325 * offset zero of the LCB for that reason).
1327 /*0*/ u32 resel_sa;
1330 * Task (bus address of a CCB) read from SCRIPTS that points
1331 * to the unique ITL nexus allowed to be disconnected.
1333 u32 itl_task_sa;
1336 * Task table bus address (read from SCRIPTS).
1338 u32 itlq_tbl_sa;
1342 * Logical Unit Control Block
1344 struct sym_lcb {
1346 * TCB header.
1347 * Assumed at offset 0.
1349 /*0*/ struct sym_lcbh head;
1352 * Task table read from SCRIPTS that contains pointers to
1353 * ITLQ nexuses. The bus address read from SCRIPTS is
1354 * inside the header.
1356 u32 *itlq_tbl; /* Kernel virtual address */
1359 * Busy CCBs management.
1361 u_short busy_itlq; /* Number of busy tagged CCBs */
1362 u_short busy_itl; /* Number of busy untagged CCBs */
1365 * Circular tag allocation buffer.
1367 u_short ia_tag; /* Tag allocation index */
1368 u_short if_tag; /* Tag release index */
1369 u_char *cb_tags; /* Circular tags buffer */
1372 * Set when we want to clear all tasks.
1374 u_char to_clear;
1377 * Capabilities.
1379 u_char user_flags;
1380 u_char current_flags;
1384 * Action from SCRIPTS on a task.
1385 * Is part of the CCB, but is also used separately to plug
1386 * error handling action to perform from SCRIPTS.
1388 struct sym_actscr {
1389 u32 start; /* Jumped by SCRIPTS after selection */
1390 u32 restart; /* Jumped by SCRIPTS on relection */
1394 * Phase mismatch context.
1396 * It is part of the CCB and is used as parameters for the
1397 * DATA pointer. We need two contexts to handle correctly the
1398 * SAVED DATA POINTER.
1400 struct sym_pmc {
1401 struct sym_tblmove sg; /* Updated interrupted SG block */
1402 u32 ret; /* SCRIPT return address */
1406 * LUN control block lookup.
1407 * We use a direct pointer for LUN #0, and a table of
1408 * pointers which is only allocated for devices that support
1409 * LUN(s) > 0.
1411 #if SYM_CONF_MAX_LUN <= 1
1412 #define sym_lp(np, tp, lun) (!lun) ? (tp)->lun0p : 0
1413 #else
1414 #define sym_lp(np, tp, lun) \
1415 (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0
1416 #endif
1419 * Status are used by the host and the script processor.
1421 * The last four bytes (status[4]) are copied to the
1422 * scratchb register (declared as scr0..scr3) just after the
1423 * select/reselect, and copied back just after disconnecting.
1424 * Inside the script the XX_REG are used.
1428 * Last four bytes (script)
1430 #define QU_REG scr0
1431 #define HS_REG scr1
1432 #define HS_PRT nc_scr1
1433 #define SS_REG scr2
1434 #define SS_PRT nc_scr2
1435 #define HF_REG scr3
1436 #define HF_PRT nc_scr3
1439 * Last four bytes (host)
1441 #define actualquirks phys.head.status[0]
1442 #define host_status phys.head.status[1]
1443 #define ssss_status phys.head.status[2]
1444 #define host_flags phys.head.status[3]
1447 * Host flags
1449 #define HF_IN_PM0 1u
1450 #define HF_IN_PM1 (1u<<1)
1451 #define HF_ACT_PM (1u<<2)
1452 #define HF_DP_SAVED (1u<<3)
1453 #define HF_SENSE (1u<<4)
1454 #define HF_EXT_ERR (1u<<5)
1455 #define HF_DATA_IN (1u<<6)
1456 #ifdef SYM_CONF_IARB_SUPPORT
1457 #define HF_HINT_IARB (1u<<7)
1458 #endif
1461 * Global CCB HEADER.
1463 * Due to lack of indirect addressing on earlier NCR chips,
1464 * this substructure is copied from the ccb to a global
1465 * address after selection (or reselection) and copied back
1466 * before disconnect.
1467 * For SYMBIOS chips that support LOAD/STORE this copy is
1468 * not needed and thus not performed.
1471 struct sym_ccbh {
1473 * Start and restart SCRIPTS addresses (must be at 0).
1475 /*0*/ struct sym_actscr go;
1478 * SCRIPTS jump address that deal with data pointers.
1479 * 'savep' points to the position in the script responsible
1480 * for the actual transfer of data.
1481 * It's written on reception of a SAVE_DATA_POINTER message.
1483 u32 savep; /* Jump address to saved data pointer */
1484 u32 lastp; /* SCRIPTS address at end of data */
1485 u32 goalp; /* Not accessed for now from SCRIPTS */
1488 * Status fields.
1490 u8 status[4];
1494 * Data Structure Block
1496 * During execution of a ccb by the script processor, the
1497 * DSA (data structure address) register points to this
1498 * substructure of the ccb.
1500 struct sym_dsb {
1502 * CCB header.
1503 * Also assumed at offset 0 of the sym_ccb structure.
1505 /*0*/ struct sym_ccbh head;
1508 * Phase mismatch contexts.
1509 * We need two to handle correctly the SAVED DATA POINTER.
1510 * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic
1511 * for address calculation from SCRIPTS.
1513 struct sym_pmc pm0;
1514 struct sym_pmc pm1;
1517 * Table data for Script
1519 struct sym_tblsel select;
1520 struct sym_tblmove smsg;
1521 struct sym_tblmove smsg_ext;
1522 struct sym_tblmove cmd;
1523 struct sym_tblmove sense;
1524 struct sym_tblmove wresid;
1525 struct sym_tblmove data [SYM_CONF_MAX_SG];
1529 * Our Command Control Block
1531 struct sym_ccb {
1533 * This is the data structure which is pointed by the DSA
1534 * register when it is executed by the script processor.
1535 * It must be the first entry.
1537 struct sym_dsb phys;
1540 * Pointer to CAM ccb and related stuff.
1542 union ccb *cam_ccb; /* CAM scsiio ccb */
1543 u8 cdb_buf[16]; /* Copy of CDB */
1544 u8 *sns_bbuf; /* Bounce buffer for sense data */
1545 #define SYM_SNS_BBUF_LEN sizeof(struct scsi_sense_data)
1546 int data_len; /* Total data length */
1547 int segments; /* Number of SG segments */
1550 * Miscellaneous status'.
1552 u_char nego_status; /* Negotiation status */
1553 u_char xerr_status; /* Extended error flags */
1554 u32 extra_bytes; /* Extraneous bytes transferred */
1557 * Message areas.
1558 * We prepare a message to be sent after selection.
1559 * We may use a second one if the command is rescheduled
1560 * due to CHECK_CONDITION or COMMAND TERMINATED.
1561 * Contents are IDENTIFY and SIMPLE_TAG.
1562 * While negotiating sync or wide transfer,
1563 * a SDTR or WDTR message is appended.
1565 u_char scsi_smsg [12];
1566 u_char scsi_smsg2[12];
1569 * Auto request sense related fields.
1571 u_char sensecmd[6]; /* Request Sense command */
1572 u_char sv_scsi_status; /* Saved SCSI status */
1573 u_char sv_xerr_status; /* Saved extended status */
1574 int sv_resid; /* Saved residual */
1577 * Map for the DMA of user data.
1579 #ifdef FreeBSD_Bus_Dma_Abstraction
1580 void *arg; /* Argument for some callback */
1581 bus_dmamap_t dmamap; /* DMA map for user data */
1582 u_char dmamapped;
1583 #define SYM_DMA_NONE 0
1584 #define SYM_DMA_READ 1
1585 #define SYM_DMA_WRITE 2
1586 #endif
1588 * Other fields.
1590 u32 ccb_ba; /* BUS address of this CCB */
1591 u_short tag; /* Tag for this transfer */
1592 /* NO_TAG means no tag */
1593 u_char target;
1594 u_char lun;
1595 ccb_p link_ccbh; /* Host adapter CCB hash chain */
1596 SYM_QUEHEAD
1597 link_ccbq; /* Link to free/busy CCB queue */
1598 u32 startp; /* Initial data pointer */
1599 int ext_sg; /* Extreme data pointer, used */
1600 int ext_ofs; /* to calculate the residual. */
1601 u_char to_abort; /* Want this IO to be aborted */
1604 #define CCB_BA(cp,lbl) (cp->ccb_ba + offsetof(struct sym_ccb, lbl))
1607 * Host Control Block
1609 struct sym_hcb {
1611 * Global headers.
1612 * Due to poorness of addressing capabilities, earlier
1613 * chips (810, 815, 825) copy part of the data structures
1614 * (CCB, TCB and LCB) in fixed areas.
1616 #ifdef SYM_CONF_GENERIC_SUPPORT
1617 struct sym_ccbh ccb_head;
1618 struct sym_tcbh tcb_head;
1619 struct sym_lcbh lcb_head;
1620 #endif
1622 * Idle task and invalid task actions and
1623 * their bus addresses.
1625 struct sym_actscr idletask, notask, bad_itl, bad_itlq;
1626 vm_offset_t idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba;
1629 * Dummy lun table to protect us against target
1630 * returning bad lun number on reselection.
1632 u32 *badluntbl; /* Table physical address */
1633 u32 badlun_sa; /* SCRIPT handler BUS address */
1636 * Bus address of this host control block.
1638 u32 hcb_ba;
1641 * Bit 32-63 of the on-chip RAM bus address in LE format.
1642 * The START_RAM64 script loads the MMRS and MMWS from this
1643 * field.
1645 u32 scr_ram_seg;
1648 * Chip and controller indentification.
1650 #ifdef FreeBSD_Bus_Io_Abstraction
1651 device_t device;
1652 #else
1653 pcici_t pci_tag;
1654 #endif
1655 int unit;
1656 char inst_name[8];
1659 * Initial value of some IO register bits.
1660 * These values are assumed to have been set by BIOS, and may
1661 * be used to probe adapter implementation differences.
1663 u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
1664 sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4,
1665 sv_stest1;
1668 * Actual initial value of IO register bits used by the
1669 * driver. They are loaded at initialisation according to
1670 * features that are to be enabled/disabled.
1672 u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4,
1673 rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
1676 * Target data.
1678 struct sym_tcb target[SYM_CONF_MAX_TARGET];
1681 * Target control block bus address array used by the SCRIPT
1682 * on reselection.
1684 u32 *targtbl;
1685 u32 targtbl_ba;
1688 * CAM SIM information for this instance.
1690 struct cam_sim *sim;
1691 struct cam_path *path;
1694 * Allocated hardware resources.
1696 #ifdef FreeBSD_Bus_Io_Abstraction
1697 struct resource *irq_res;
1698 struct resource *io_res;
1699 struct resource *mmio_res;
1700 struct resource *ram_res;
1701 int ram_id;
1702 void *intr;
1703 #endif
1706 * Bus stuff.
1708 * My understanding of PCI is that all agents must share the
1709 * same addressing range and model.
1710 * But some hardware architecture guys provide complex and
1711 * brain-deaded stuff that makes shit.
1712 * This driver only support PCI compliant implementations and
1713 * deals with part of the BUS stuff complexity only to fit O/S
1714 * requirements.
1716 #ifdef FreeBSD_Bus_Io_Abstraction
1717 bus_space_handle_t io_bsh;
1718 bus_space_tag_t io_tag;
1719 bus_space_handle_t mmio_bsh;
1720 bus_space_tag_t mmio_tag;
1721 bus_space_handle_t ram_bsh;
1722 bus_space_tag_t ram_tag;
1723 #endif
1726 * DMA stuff.
1728 #ifdef FreeBSD_Bus_Dma_Abstraction
1729 bus_dma_tag_t bus_dmat; /* DMA tag from parent BUS */
1730 bus_dma_tag_t data_dmat; /* DMA tag for user data */
1731 #endif
1733 * Virtual and physical bus addresses of the chip.
1735 vm_offset_t mmio_va; /* MMIO kernel virtual address */
1736 vm_offset_t mmio_pa; /* MMIO CPU physical address */
1737 vm_offset_t mmio_ba; /* MMIO BUS address */
1738 int mmio_ws; /* MMIO Window size */
1740 vm_offset_t ram_va; /* RAM kernel virtual address */
1741 vm_offset_t ram_pa; /* RAM CPU physical address */
1742 vm_offset_t ram_ba; /* RAM BUS address */
1743 int ram_ws; /* RAM window size */
1744 u32 io_port; /* IO port address */
1747 * SCRIPTS virtual and physical bus addresses.
1748 * 'script' is loaded in the on-chip RAM if present.
1749 * 'scripth' stays in main memory for all chips except the
1750 * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM.
1752 u_char *scripta0; /* Copies of script and scripth */
1753 u_char *scriptb0; /* Copies of script and scripth */
1754 vm_offset_t scripta_ba; /* Actual script and scripth */
1755 vm_offset_t scriptb_ba; /* bus addresses. */
1756 vm_offset_t scriptb0_ba;
1757 u_short scripta_sz; /* Actual size of script A */
1758 u_short scriptb_sz; /* Actual size of script B */
1761 * Bus addresses, setup and patch methods for
1762 * the selected firmware.
1764 struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */
1765 struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */
1766 void (*fw_setup)(hcb_p np, struct sym_fw *fw);
1767 void (*fw_patch)(hcb_p np);
1768 char *fw_name;
1771 * General controller parameters and configuration.
1773 u_short device_id; /* PCI device id */
1774 u_char revision_id; /* PCI device revision id */
1775 u_int features; /* Chip features map */
1776 u_char myaddr; /* SCSI id of the adapter */
1777 u_char maxburst; /* log base 2 of dwords burst */
1778 u_char maxwide; /* Maximum transfer width */
1779 u_char minsync; /* Min sync period factor (ST) */
1780 u_char maxsync; /* Max sync period factor (ST) */
1781 u_char maxoffs; /* Max scsi offset (ST) */
1782 u_char minsync_dt; /* Min sync period factor (DT) */
1783 u_char maxsync_dt; /* Max sync period factor (DT) */
1784 u_char maxoffs_dt; /* Max scsi offset (DT) */
1785 u_char multiplier; /* Clock multiplier (1,2,4) */
1786 u_char clock_divn; /* Number of clock divisors */
1787 u32 clock_khz; /* SCSI clock frequency in KHz */
1788 u32 pciclk_khz; /* Estimated PCI clock in KHz */
1790 * Start queue management.
1791 * It is filled up by the host processor and accessed by the
1792 * SCRIPTS processor in order to start SCSI commands.
1794 volatile /* Prevent code optimizations */
1795 u32 *squeue; /* Start queue virtual address */
1796 u32 squeue_ba; /* Start queue BUS address */
1797 u_short squeueput; /* Next free slot of the queue */
1798 u_short actccbs; /* Number of allocated CCBs */
1801 * Command completion queue.
1802 * It is the same size as the start queue to avoid overflow.
1804 u_short dqueueget; /* Next position to scan */
1805 volatile /* Prevent code optimizations */
1806 u32 *dqueue; /* Completion (done) queue */
1807 u32 dqueue_ba; /* Done queue BUS address */
1810 * Miscellaneous buffers accessed by the scripts-processor.
1811 * They shall be DWORD aligned, because they may be read or
1812 * written with a script command.
1814 u_char msgout[8]; /* Buffer for MESSAGE OUT */
1815 u_char msgin [8]; /* Buffer for MESSAGE IN */
1816 u32 lastmsg; /* Last SCSI message sent */
1817 u_char scratch; /* Scratch for SCSI receive */
1820 * Miscellaneous configuration and status parameters.
1822 u_char usrflags; /* Miscellaneous user flags */
1823 u_char scsi_mode; /* Current SCSI BUS mode */
1824 u_char verbose; /* Verbosity for this controller*/
1825 u32 cache; /* Used for cache test at init. */
1828 * CCB lists and queue.
1830 ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */
1831 SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */
1832 SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */
1835 * During error handling and/or recovery,
1836 * active CCBs that are to be completed with
1837 * error or requeued are moved from the busy_ccbq
1838 * to the comp_ccbq prior to completion.
1840 SYM_QUEHEAD comp_ccbq;
1843 * CAM CCB pending queue.
1845 SYM_QUEHEAD cam_ccbq;
1848 * IMMEDIATE ARBITRATION (IARB) control.
1850 * We keep track in 'last_cp' of the last CCB that has been
1851 * queued to the SCRIPTS processor and clear 'last_cp' when
1852 * this CCB completes. If last_cp is not zero at the moment
1853 * we queue a new CCB, we set a flag in 'last_cp' that is
1854 * used by the SCRIPTS as a hint for setting IARB.
1855 * We donnot set more than 'iarb_max' consecutive hints for
1856 * IARB in order to leave devices a chance to reselect.
1857 * By the way, any non zero value of 'iarb_max' is unfair. :)
1859 #ifdef SYM_CONF_IARB_SUPPORT
1860 u_short iarb_max; /* Max. # consecutive IARB hints*/
1861 u_short iarb_count; /* Actual # of these hints */
1862 ccb_p last_cp;
1863 #endif
1866 * Command abort handling.
1867 * We need to synchronize tightly with the SCRIPTS
1868 * processor in order to handle things correctly.
1870 u_char abrt_msg[4]; /* Message to send buffer */
1871 struct sym_tblmove abrt_tbl; /* Table for the MOV of it */
1872 struct sym_tblsel abrt_sel; /* Sync params for selection */
1873 u_char istat_sem; /* Tells the chip to stop (SEM) */
1876 #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl))
1879 * Return the name of the controller.
1881 static __inline char *sym_name(hcb_p np)
1883 return np->inst_name;
1886 /*--------------------------------------------------------------------------*/
1887 /*------------------------------ FIRMWARES ---------------------------------*/
1888 /*--------------------------------------------------------------------------*/
1891 * This stuff will be moved to a separate source file when
1892 * the driver will be broken into several source modules.
1896 * Macros used for all firmwares.
1898 #define SYM_GEN_A(s, label) ((short) offsetof(s, label)),
1899 #define SYM_GEN_B(s, label) ((short) offsetof(s, label)),
1900 #define PADDR_A(label) SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label)
1901 #define PADDR_B(label) SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label)
1904 #ifdef SYM_CONF_GENERIC_SUPPORT
1906 * Allocate firmware #1 script area.
1908 #define SYM_FWA_SCR sym_fw1a_scr
1909 #define SYM_FWB_SCR sym_fw1b_scr
1910 #include "sym_fw1.h"
1911 struct sym_fwa_ofs sym_fw1a_ofs = {
1912 SYM_GEN_FW_A(struct SYM_FWA_SCR)
1914 struct sym_fwb_ofs sym_fw1b_ofs = {
1915 SYM_GEN_FW_B(struct SYM_FWB_SCR)
1917 #undef SYM_FWA_SCR
1918 #undef SYM_FWB_SCR
1919 #endif /* SYM_CONF_GENERIC_SUPPORT */
1922 * Allocate firmware #2 script area.
1924 #define SYM_FWA_SCR sym_fw2a_scr
1925 #define SYM_FWB_SCR sym_fw2b_scr
1926 #include "sym_fw2.h"
1927 struct sym_fwa_ofs sym_fw2a_ofs = {
1928 SYM_GEN_FW_A(struct SYM_FWA_SCR)
1930 struct sym_fwb_ofs sym_fw2b_ofs = {
1931 SYM_GEN_FW_B(struct SYM_FWB_SCR)
1932 SYM_GEN_B(struct SYM_FWB_SCR, start64)
1933 SYM_GEN_B(struct SYM_FWB_SCR, pm_handle)
1935 #undef SYM_FWA_SCR
1936 #undef SYM_FWB_SCR
1938 #undef SYM_GEN_A
1939 #undef SYM_GEN_B
1940 #undef PADDR_A
1941 #undef PADDR_B
1943 #ifdef SYM_CONF_GENERIC_SUPPORT
1945 * Patch routine for firmware #1.
1947 static void
1948 sym_fw1_patch(hcb_p np)
1950 struct sym_fw1a_scr *scripta0;
1951 struct sym_fw1b_scr *scriptb0;
1953 scripta0 = (struct sym_fw1a_scr *) np->scripta0;
1954 scriptb0 = (struct sym_fw1b_scr *) np->scriptb0;
1957 * Remove LED support if not needed.
1959 if (!(np->features & FE_LED0)) {
1960 scripta0->idle[0] = cpu_to_scr(SCR_NO_OP);
1961 scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP);
1962 scripta0->start[0] = cpu_to_scr(SCR_NO_OP);
1965 #ifdef SYM_CONF_IARB_SUPPORT
1967 * If user does not want to use IMMEDIATE ARBITRATION
1968 * when we are reselected while attempting to arbitrate,
1969 * patch the SCRIPTS accordingly with a SCRIPT NO_OP.
1971 if (!SYM_CONF_SET_IARB_ON_ARB_LOST)
1972 scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
1973 #endif
1975 * Patch some data in SCRIPTS.
1976 * - start and done queue initial bus address.
1977 * - target bus address table bus address.
1979 scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba);
1980 scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba);
1981 scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba);
1983 #endif /* SYM_CONF_GENERIC_SUPPORT */
1986 * Patch routine for firmware #2.
1988 static void
1989 sym_fw2_patch(hcb_p np)
1991 struct sym_fw2a_scr *scripta0;
1992 struct sym_fw2b_scr *scriptb0;
1994 scripta0 = (struct sym_fw2a_scr *) np->scripta0;
1995 scriptb0 = (struct sym_fw2b_scr *) np->scriptb0;
1998 * Remove LED support if not needed.
2000 if (!(np->features & FE_LED0)) {
2001 scripta0->idle[0] = cpu_to_scr(SCR_NO_OP);
2002 scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP);
2003 scripta0->start[0] = cpu_to_scr(SCR_NO_OP);
2006 #ifdef SYM_CONF_IARB_SUPPORT
2008 * If user does not want to use IMMEDIATE ARBITRATION
2009 * when we are reselected while attempting to arbitrate,
2010 * patch the SCRIPTS accordingly with a SCRIPT NO_OP.
2012 if (!SYM_CONF_SET_IARB_ON_ARB_LOST)
2013 scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
2014 #endif
2016 * Patch some variable in SCRIPTS.
2017 * - start and done queue initial bus address.
2018 * - target bus address table bus address.
2020 scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba);
2021 scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba);
2022 scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba);
2025 * Remove the load of SCNTL4 on reselection if not a C10.
2027 if (!(np->features & FE_C10)) {
2028 scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP);
2029 scripta0->resel_scntl4[1] = cpu_to_scr(0);
2033 * Remove a couple of work-arounds specific to C1010 if
2034 * they are not desirable. See `sym_fw2.h' for more details.
2036 if (!(np->device_id == PCI_ID_LSI53C1010_2 &&
2037 np->revision_id < 0x1 &&
2038 np->pciclk_khz < 60000)) {
2039 scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP);
2040 scripta0->datao_phase[1] = cpu_to_scr(0);
2042 if (!(np->device_id == PCI_ID_LSI53C1010 &&
2043 /* np->revision_id < 0xff */ 1)) {
2044 scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP);
2045 scripta0->sel_done[1] = cpu_to_scr(0);
2049 * Patch some other variables in SCRIPTS.
2050 * These ones are loaded by the SCRIPTS processor.
2052 scriptb0->pm0_data_addr[0] =
2053 cpu_to_scr(np->scripta_ba +
2054 offsetof(struct sym_fw2a_scr, pm0_data));
2055 scriptb0->pm1_data_addr[0] =
2056 cpu_to_scr(np->scripta_ba +
2057 offsetof(struct sym_fw2a_scr, pm1_data));
2061 * Fill the data area in scripts.
2062 * To be done for all firmwares.
2064 static void
2065 sym_fw_fill_data (u32 *in, u32 *out)
2067 int i;
2069 for (i = 0; i < SYM_CONF_MAX_SG; i++) {
2070 *in++ = SCR_CHMOV_TBL ^ SCR_DATA_IN;
2071 *in++ = offsetof (struct sym_dsb, data[i]);
2072 *out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT;
2073 *out++ = offsetof (struct sym_dsb, data[i]);
2078 * Setup useful script bus addresses.
2079 * To be done for all firmwares.
2081 static void
2082 sym_fw_setup_bus_addresses(hcb_p np, struct sym_fw *fw)
2084 u32 *pa;
2085 u_short *po;
2086 int i;
2089 * Build the bus address table for script A
2090 * from the script A offset table.
2092 po = (u_short *) fw->a_ofs;
2093 pa = (u32 *) &np->fwa_bas;
2094 for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++)
2095 pa[i] = np->scripta_ba + po[i];
2098 * Same for script B.
2100 po = (u_short *) fw->b_ofs;
2101 pa = (u32 *) &np->fwb_bas;
2102 for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++)
2103 pa[i] = np->scriptb_ba + po[i];
2106 #ifdef SYM_CONF_GENERIC_SUPPORT
2108 * Setup routine for firmware #1.
2110 static void
2111 sym_fw1_setup(hcb_p np, struct sym_fw *fw)
2113 struct sym_fw1a_scr *scripta0;
2114 struct sym_fw1b_scr *scriptb0;
2116 scripta0 = (struct sym_fw1a_scr *) np->scripta0;
2117 scriptb0 = (struct sym_fw1b_scr *) np->scriptb0;
2120 * Fill variable parts in scripts.
2122 sym_fw_fill_data(scripta0->data_in, scripta0->data_out);
2125 * Setup bus addresses used from the C code..
2127 sym_fw_setup_bus_addresses(np, fw);
2129 #endif /* SYM_CONF_GENERIC_SUPPORT */
2132 * Setup routine for firmware #2.
2134 static void
2135 sym_fw2_setup(hcb_p np, struct sym_fw *fw)
2137 struct sym_fw2a_scr *scripta0;
2138 struct sym_fw2b_scr *scriptb0;
2140 scripta0 = (struct sym_fw2a_scr *) np->scripta0;
2141 scriptb0 = (struct sym_fw2b_scr *) np->scriptb0;
2144 * Fill variable parts in scripts.
2146 sym_fw_fill_data(scripta0->data_in, scripta0->data_out);
2149 * Setup bus addresses used from the C code..
2151 sym_fw_setup_bus_addresses(np, fw);
2155 * Allocate firmware descriptors.
2157 #ifdef SYM_CONF_GENERIC_SUPPORT
2158 static struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic");
2159 #endif /* SYM_CONF_GENERIC_SUPPORT */
2160 static struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based");
2163 * Find the most appropriate firmware for a chip.
2165 static struct sym_fw *
2166 sym_find_firmware(struct sym_pci_chip *chip)
2168 if (chip->features & FE_LDSTR)
2169 return &sym_fw2;
2170 #ifdef SYM_CONF_GENERIC_SUPPORT
2171 else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_DAC)))
2172 return &sym_fw1;
2173 #endif
2174 else
2175 return 0;
2179 * Bind a script to physical addresses.
2181 static void sym_fw_bind_script (hcb_p np, u32 *start, int len)
2183 u32 opcode, new, old, tmp1, tmp2;
2184 u32 *end, *cur;
2185 int relocs;
2187 cur = start;
2188 end = start + len/4;
2190 while (cur < end) {
2192 opcode = *cur;
2195 * If we forget to change the length
2196 * in scripts, a field will be
2197 * padded with 0. This is an illegal
2198 * command.
2200 if (opcode == 0) {
2201 kprintf ("%s: ERROR0 IN SCRIPT at %d.\n",
2202 sym_name(np), (int) (cur-start));
2203 MDELAY (10000);
2204 ++cur;
2205 continue;
2209 * We use the bogus value 0xf00ff00f ;-)
2210 * to reserve data area in SCRIPTS.
2212 if (opcode == SCR_DATA_ZERO) {
2213 *cur++ = 0;
2214 continue;
2217 if (DEBUG_FLAGS & DEBUG_SCRIPT)
2218 kprintf ("%d: <%x>\n", (int) (cur-start),
2219 (unsigned)opcode);
2222 * We don't have to decode ALL commands
2224 switch (opcode >> 28) {
2225 case 0xf:
2227 * LOAD / STORE DSA relative, don't relocate.
2229 relocs = 0;
2230 break;
2231 case 0xe:
2233 * LOAD / STORE absolute.
2235 relocs = 1;
2236 break;
2237 case 0xc:
2239 * COPY has TWO arguments.
2241 relocs = 2;
2242 tmp1 = cur[1];
2243 tmp2 = cur[2];
2244 if ((tmp1 ^ tmp2) & 3) {
2245 kprintf ("%s: ERROR1 IN SCRIPT at %d.\n",
2246 sym_name(np), (int) (cur-start));
2247 MDELAY (10000);
2250 * If PREFETCH feature not enabled, remove
2251 * the NO FLUSH bit if present.
2253 if ((opcode & SCR_NO_FLUSH) &&
2254 !(np->features & FE_PFEN)) {
2255 opcode = (opcode & ~SCR_NO_FLUSH);
2257 break;
2258 case 0x0:
2260 * MOVE/CHMOV (absolute address)
2262 if (!(np->features & FE_WIDE))
2263 opcode = (opcode | OPC_MOVE);
2264 relocs = 1;
2265 break;
2266 case 0x1:
2268 * MOVE/CHMOV (table indirect)
2270 if (!(np->features & FE_WIDE))
2271 opcode = (opcode | OPC_MOVE);
2272 relocs = 0;
2273 break;
2274 case 0x8:
2276 * JUMP / CALL
2277 * dont't relocate if relative :-)
2279 if (opcode & 0x00800000)
2280 relocs = 0;
2281 else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/
2282 relocs = 2;
2283 else
2284 relocs = 1;
2285 break;
2286 case 0x4:
2287 case 0x5:
2288 case 0x6:
2289 case 0x7:
2290 relocs = 1;
2291 break;
2292 default:
2293 relocs = 0;
2294 break;
2298 * Scriptify:) the opcode.
2300 *cur++ = cpu_to_scr(opcode);
2303 * If no relocation, assume 1 argument
2304 * and just scriptize:) it.
2306 if (!relocs) {
2307 *cur = cpu_to_scr(*cur);
2308 ++cur;
2309 continue;
2313 * Otherwise performs all needed relocations.
2315 while (relocs--) {
2316 old = *cur;
2318 switch (old & RELOC_MASK) {
2319 case RELOC_REGISTER:
2320 new = (old & ~RELOC_MASK) + np->mmio_ba;
2321 break;
2322 case RELOC_LABEL_A:
2323 new = (old & ~RELOC_MASK) + np->scripta_ba;
2324 break;
2325 case RELOC_LABEL_B:
2326 new = (old & ~RELOC_MASK) + np->scriptb_ba;
2327 break;
2328 case RELOC_SOFTC:
2329 new = (old & ~RELOC_MASK) + np->hcb_ba;
2330 break;
2331 case 0:
2333 * Don't relocate a 0 address.
2334 * They are mostly used for patched or
2335 * script self-modified areas.
2337 if (old == 0) {
2338 new = old;
2339 break;
2341 /* fall through */
2342 default:
2343 new = 0;
2344 panic("sym_fw_bind_script: "
2345 "weird relocation %x\n", old);
2346 break;
2349 *cur++ = cpu_to_scr(new);
2354 /*--------------------------------------------------------------------------*/
2355 /*--------------------------- END OF FIRMWARES ----------------------------*/
2356 /*--------------------------------------------------------------------------*/
2359 * Function prototypes.
2361 static void sym_save_initial_setting (hcb_p np);
2362 static int sym_prepare_setting (hcb_p np, struct sym_nvram *nvram);
2363 static int sym_prepare_nego (hcb_p np, ccb_p cp, int nego, u_char *msgptr);
2364 static void sym_put_start_queue (hcb_p np, ccb_p cp);
2365 static void sym_chip_reset (hcb_p np);
2366 static void sym_soft_reset (hcb_p np);
2367 static void sym_start_reset (hcb_p np);
2368 static int sym_reset_scsi_bus (hcb_p np, int enab_int);
2369 static int sym_wakeup_done (hcb_p np);
2370 static void sym_flush_busy_queue (hcb_p np, int cam_status);
2371 static void sym_flush_comp_queue (hcb_p np, int cam_status);
2372 static void sym_init (hcb_p np, int reason);
2373 static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp,
2374 u_char *fakp);
2375 static void sym_setsync (hcb_p np, ccb_p cp, u_char ofs, u_char per,
2376 u_char div, u_char fak);
2377 static void sym_setwide (hcb_p np, ccb_p cp, u_char wide);
2378 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
2379 u_char per, u_char wide, u_char div, u_char fak);
2380 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
2381 u_char per, u_char wide, u_char div, u_char fak);
2382 static void sym_log_hard_error (hcb_p np, u_short sist, u_char dstat);
2383 static void sym_intr (void *arg);
2384 static void sym_poll (struct cam_sim *sim);
2385 static void sym_recover_scsi_int (hcb_p np, u_char hsts);
2386 static void sym_int_sto (hcb_p np);
2387 static void sym_int_udc (hcb_p np);
2388 static void sym_int_sbmc (hcb_p np);
2389 static void sym_int_par (hcb_p np, u_short sist);
2390 static void sym_int_ma (hcb_p np);
2391 static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun,
2392 int task);
2393 static void sym_sir_bad_scsi_status (hcb_p np, int num, ccb_p cp);
2394 static int sym_clear_tasks (hcb_p np, int status, int targ, int lun, int task);
2395 static void sym_sir_task_recovery (hcb_p np, int num);
2396 static int sym_evaluate_dp (hcb_p np, ccb_p cp, u32 scr, int *ofs);
2397 static void sym_modify_dp (hcb_p np, tcb_p tp, ccb_p cp, int ofs);
2398 static int sym_compute_residual (hcb_p np, ccb_p cp);
2399 static int sym_show_msg (u_char * msg);
2400 static void sym_print_msg (ccb_p cp, char *label, u_char *msg);
2401 static void sym_sync_nego (hcb_p np, tcb_p tp, ccb_p cp);
2402 static void sym_ppr_nego (hcb_p np, tcb_p tp, ccb_p cp);
2403 static void sym_wide_nego (hcb_p np, tcb_p tp, ccb_p cp);
2404 static void sym_nego_default (hcb_p np, tcb_p tp, ccb_p cp);
2405 static void sym_nego_rejected (hcb_p np, tcb_p tp, ccb_p cp);
2406 static void sym_int_sir (hcb_p np);
2407 static void sym_free_ccb (hcb_p np, ccb_p cp);
2408 static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order);
2409 static ccb_p sym_alloc_ccb (hcb_p np);
2410 static ccb_p sym_ccb_from_dsa (hcb_p np, u32 dsa);
2411 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln);
2412 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln);
2413 static int sym_snooptest (hcb_p np);
2414 static void sym_selectclock(hcb_p np, u_char scntl3);
2415 static void sym_getclock (hcb_p np, int mult);
2416 static int sym_getpciclock (hcb_p np);
2417 static void sym_complete_ok (hcb_p np, ccb_p cp);
2418 static void sym_complete_error (hcb_p np, ccb_p cp);
2419 static void sym_timeout (void *arg);
2420 static int sym_abort_scsiio (hcb_p np, union ccb *ccb, int timed_out);
2421 static void sym_reset_dev (hcb_p np, union ccb *ccb);
2422 static void sym_action (struct cam_sim *sim, union ccb *ccb);
2423 static void sym_action1 (struct cam_sim *sim, union ccb *ccb);
2424 static int sym_setup_cdb (hcb_p np, struct ccb_scsiio *csio, ccb_p cp);
2425 static void sym_setup_data_and_start (hcb_p np, struct ccb_scsiio *csio,
2426 ccb_p cp);
2427 #ifdef FreeBSD_Bus_Dma_Abstraction
2428 static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp,
2429 bus_dma_segment_t *psegs, int nsegs);
2430 #else
2431 static int sym_scatter_virtual (hcb_p np, ccb_p cp, vm_offset_t vaddr,
2432 vm_size_t len);
2433 static int sym_scatter_sg_virtual (hcb_p np, ccb_p cp,
2434 bus_dma_segment_t *psegs, int nsegs);
2435 static int sym_scatter_physical (hcb_p np, ccb_p cp, vm_offset_t paddr,
2436 vm_size_t len);
2437 #endif
2438 static int sym_scatter_sg_physical (hcb_p np, ccb_p cp,
2439 bus_dma_segment_t *psegs, int nsegs);
2440 static void sym_action2 (struct cam_sim *sim, union ccb *ccb);
2441 static void sym_update_trans (hcb_p np, tcb_p tp, struct sym_trans *tip,
2442 struct ccb_trans_settings *cts);
2443 static void sym_update_dflags(hcb_p np, u_char *flags,
2444 struct ccb_trans_settings *cts);
2446 #ifdef FreeBSD_Bus_Io_Abstraction
2447 static struct sym_pci_chip *sym_find_pci_chip (device_t dev);
2448 static int sym_pci_probe (device_t dev);
2449 static int sym_pci_attach (device_t dev);
2450 #else
2451 static struct sym_pci_chip *sym_find_pci_chip (pcici_t tag);
2452 static const char *sym_pci_probe (pcici_t tag, pcidi_t type);
2453 static void sym_pci_attach (pcici_t tag, int unit);
2454 static int sym_pci_attach2 (pcici_t tag, int unit);
2455 #endif
2457 static void sym_pci_free (hcb_p np);
2458 static int sym_cam_attach (hcb_p np);
2459 static void sym_cam_free (hcb_p np);
2461 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram);
2462 static void sym_nvram_setup_target (hcb_p np, int targ, struct sym_nvram *nvp);
2463 static int sym_read_nvram (hcb_p np, struct sym_nvram *nvp);
2466 * Print something which allows to retrieve the controler type,
2467 * unit, target, lun concerned by a kernel message.
2469 static void PRINT_TARGET (hcb_p np, int target)
2471 kprintf ("%s:%d:", sym_name(np), target);
2474 static void PRINT_LUN(hcb_p np, int target, int lun)
2476 kprintf ("%s:%d:%d:", sym_name(np), target, lun);
2479 static void PRINT_ADDR (ccb_p cp)
2481 if (cp && cp->cam_ccb)
2482 xpt_print_path(cp->cam_ccb->ccb_h.path);
2486 * Take into account this ccb in the freeze count.
2488 static void sym_freeze_cam_ccb(union ccb *ccb)
2490 if (!(ccb->ccb_h.flags & CAM_DEV_QFRZDIS)) {
2491 if (!(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
2492 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2493 xpt_freeze_devq(ccb->ccb_h.path, 1);
2499 * Set the status field of a CAM CCB.
2501 static __inline void sym_set_cam_status(union ccb *ccb, cam_status status)
2503 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2504 ccb->ccb_h.status |= status;
2508 * Get the status field of a CAM CCB.
2510 static __inline int sym_get_cam_status(union ccb *ccb)
2512 return ccb->ccb_h.status & CAM_STATUS_MASK;
2516 * Enqueue a CAM CCB.
2518 static void sym_enqueue_cam_ccb(hcb_p np, union ccb *ccb)
2520 assert(!(ccb->ccb_h.status & CAM_SIM_QUEUED));
2521 ccb->ccb_h.status = CAM_REQ_INPROG;
2523 callout_reset(&ccb->ccb_h.timeout_ch, ccb->ccb_h.timeout*hz/1000,
2524 sym_timeout, ccb);
2525 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2526 ccb->ccb_h.sym_hcb_ptr = np;
2528 sym_insque_tail(sym_qptr(&ccb->ccb_h.sim_links), &np->cam_ccbq);
2532 * Complete a pending CAM CCB.
2534 static void sym_xpt_done(hcb_p np, union ccb *ccb)
2536 if (ccb->ccb_h.status & CAM_SIM_QUEUED) {
2537 callout_stop(&ccb->ccb_h.timeout_ch);
2538 sym_remque(sym_qptr(&ccb->ccb_h.sim_links));
2539 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2540 ccb->ccb_h.sym_hcb_ptr = 0;
2542 if (ccb->ccb_h.flags & CAM_DEV_QFREEZE)
2543 sym_freeze_cam_ccb(ccb);
2544 xpt_done(ccb);
2547 static void sym_xpt_done2(hcb_p np, union ccb *ccb, int cam_status)
2549 sym_set_cam_status(ccb, cam_status);
2550 sym_xpt_done(np, ccb);
2554 * SYMBIOS chip clock divisor table.
2556 * Divisors are multiplied by 10,000,000 in order to make
2557 * calculations more simple.
2559 #define _5M 5000000
2560 static u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
2563 * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64,
2564 * 128 transfers. All chips support at least 16 transfers
2565 * bursts. The 825A, 875 and 895 chips support bursts of up
2566 * to 128 transfers and the 895A and 896 support bursts of up
2567 * to 64 transfers. All other chips support up to 16
2568 * transfers bursts.
2570 * For PCI 32 bit data transfers each transfer is a DWORD.
2571 * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers.
2573 * We use log base 2 (burst length) as internal code, with
2574 * value 0 meaning "burst disabled".
2578 * Burst length from burst code.
2580 #define burst_length(bc) (!(bc))? 0 : 1 << (bc)
2583 * Burst code from io register bits.
2585 #define burst_code(dmode, ctest4, ctest5) \
2586 (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1
2589 * Set initial io register bits from burst code.
2591 static __inline void sym_init_burst(hcb_p np, u_char bc)
2593 np->rv_ctest4 &= ~0x80;
2594 np->rv_dmode &= ~(0x3 << 6);
2595 np->rv_ctest5 &= ~0x4;
2597 if (!bc) {
2598 np->rv_ctest4 |= 0x80;
2600 else {
2601 --bc;
2602 np->rv_dmode |= ((bc & 0x3) << 6);
2603 np->rv_ctest5 |= (bc & 0x4);
2609 * Print out the list of targets that have some flag disabled by user.
2611 static void sym_print_targets_flag(hcb_p np, int mask, char *msg)
2613 int cnt;
2614 int i;
2616 for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
2617 if (i == np->myaddr)
2618 continue;
2619 if (np->target[i].usrflags & mask) {
2620 if (!cnt++)
2621 kprintf("%s: %s disabled for targets",
2622 sym_name(np), msg);
2623 kprintf(" %d", i);
2626 if (cnt)
2627 kprintf(".\n");
2631 * Save initial settings of some IO registers.
2632 * Assumed to have been set by BIOS.
2633 * We cannot reset the chip prior to reading the
2634 * IO registers, since informations will be lost.
2635 * Since the SCRIPTS processor may be running, this
2636 * is not safe on paper, but it seems to work quite
2637 * well. :)
2639 static void sym_save_initial_setting (hcb_p np)
2641 np->sv_scntl0 = INB(nc_scntl0) & 0x0a;
2642 np->sv_scntl3 = INB(nc_scntl3) & 0x07;
2643 np->sv_dmode = INB(nc_dmode) & 0xce;
2644 np->sv_dcntl = INB(nc_dcntl) & 0xa8;
2645 np->sv_ctest3 = INB(nc_ctest3) & 0x01;
2646 np->sv_ctest4 = INB(nc_ctest4) & 0x80;
2647 np->sv_gpcntl = INB(nc_gpcntl);
2648 np->sv_stest1 = INB(nc_stest1);
2649 np->sv_stest2 = INB(nc_stest2) & 0x20;
2650 np->sv_stest4 = INB(nc_stest4);
2651 if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */
2652 np->sv_scntl4 = INB(nc_scntl4);
2653 np->sv_ctest5 = INB(nc_ctest5) & 0x04;
2655 else
2656 np->sv_ctest5 = INB(nc_ctest5) & 0x24;
2660 * Prepare io register values used by sym_init() according
2661 * to selected and supported features.
2663 static int sym_prepare_setting(hcb_p np, struct sym_nvram *nvram)
2665 u_char burst_max;
2666 u32 period;
2667 int i;
2670 * Wide ?
2672 np->maxwide = (np->features & FE_WIDE)? 1 : 0;
2675 * Get the frequency of the chip's clock.
2677 if (np->features & FE_QUAD)
2678 np->multiplier = 4;
2679 else if (np->features & FE_DBLR)
2680 np->multiplier = 2;
2681 else
2682 np->multiplier = 1;
2684 np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000;
2685 np->clock_khz *= np->multiplier;
2687 if (np->clock_khz != 40000)
2688 sym_getclock(np, np->multiplier);
2691 * Divisor to be used for async (timer pre-scaler).
2693 i = np->clock_divn - 1;
2694 while (--i >= 0) {
2695 if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) {
2696 ++i;
2697 break;
2700 np->rv_scntl3 = i+1;
2703 * The C1010 uses hardwired divisors for async.
2704 * So, we just throw away, the async. divisor.:-)
2706 if (np->features & FE_C10)
2707 np->rv_scntl3 = 0;
2710 * Minimum synchronous period factor supported by the chip.
2711 * Btw, 'period' is in tenths of nanoseconds.
2713 period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz;
2714 if (period <= 250) np->minsync = 10;
2715 else if (period <= 303) np->minsync = 11;
2716 else if (period <= 500) np->minsync = 12;
2717 else np->minsync = (period + 40 - 1) / 40;
2720 * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
2722 if (np->minsync < 25 &&
2723 !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3)))
2724 np->minsync = 25;
2725 else if (np->minsync < 12 &&
2726 !(np->features & (FE_ULTRA2|FE_ULTRA3)))
2727 np->minsync = 12;
2730 * Maximum synchronous period factor supported by the chip.
2732 period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
2733 np->maxsync = period > 2540 ? 254 : period / 10;
2736 * If chip is a C1010, guess the sync limits in DT mode.
2738 if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) {
2739 if (np->clock_khz == 160000) {
2740 np->minsync_dt = 9;
2741 np->maxsync_dt = 50;
2742 np->maxoffs_dt = 62;
2747 * 64 bit addressing (895A/896/1010) ?
2749 if (np->features & FE_DAC)
2750 #if BITS_PER_LONG > 32
2751 np->rv_ccntl1 |= (XTIMOD | EXTIBMV);
2752 #else
2753 np->rv_ccntl1 |= (DDAC);
2754 #endif
2757 * Phase mismatch handled by SCRIPTS (895A/896/1010) ?
2759 if (np->features & FE_NOPM)
2760 np->rv_ccntl0 |= (ENPMJ);
2763 * C1010 Errata.
2764 * In dual channel mode, contention occurs if internal cycles
2765 * are used. Disable internal cycles.
2767 if (np->device_id == PCI_ID_LSI53C1010 &&
2768 np->revision_id < 0x2)
2769 np->rv_ccntl0 |= DILS;
2772 * Select burst length (dwords)
2774 burst_max = SYM_SETUP_BURST_ORDER;
2775 if (burst_max == 255)
2776 burst_max = burst_code(np->sv_dmode, np->sv_ctest4,
2777 np->sv_ctest5);
2778 if (burst_max > 7)
2779 burst_max = 7;
2780 if (burst_max > np->maxburst)
2781 burst_max = np->maxburst;
2784 * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2.
2785 * This chip and the 860 Rev 1 may wrongly use PCI cache line
2786 * based transactions on LOAD/STORE instructions. So we have
2787 * to prevent these chips from using such PCI transactions in
2788 * this driver. The generic ncr driver that does not use
2789 * LOAD/STORE instructions does not need this work-around.
2791 if ((np->device_id == PCI_ID_SYM53C810 &&
2792 np->revision_id >= 0x10 && np->revision_id <= 0x11) ||
2793 (np->device_id == PCI_ID_SYM53C860 &&
2794 np->revision_id <= 0x1))
2795 np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP);
2798 * Select all supported special features.
2799 * If we are using on-board RAM for scripts, prefetch (PFEN)
2800 * does not help, but burst op fetch (BOF) does.
2801 * Disabling PFEN makes sure BOF will be used.
2803 if (np->features & FE_ERL)
2804 np->rv_dmode |= ERL; /* Enable Read Line */
2805 if (np->features & FE_BOF)
2806 np->rv_dmode |= BOF; /* Burst Opcode Fetch */
2807 if (np->features & FE_ERMP)
2808 np->rv_dmode |= ERMP; /* Enable Read Multiple */
2809 #if 1
2810 if ((np->features & FE_PFEN) && !np->ram_ba)
2811 #else
2812 if (np->features & FE_PFEN)
2813 #endif
2814 np->rv_dcntl |= PFEN; /* Prefetch Enable */
2815 if (np->features & FE_CLSE)
2816 np->rv_dcntl |= CLSE; /* Cache Line Size Enable */
2817 if (np->features & FE_WRIE)
2818 np->rv_ctest3 |= WRIE; /* Write and Invalidate */
2819 if (np->features & FE_DFS)
2820 np->rv_ctest5 |= DFS; /* Dma Fifo Size */
2823 * Select some other
2825 if (SYM_SETUP_PCI_PARITY)
2826 np->rv_ctest4 |= MPEE; /* Master parity checking */
2827 if (SYM_SETUP_SCSI_PARITY)
2828 np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */
2831 * Get parity checking, host ID and verbose mode from NVRAM
2833 np->myaddr = 255;
2834 sym_nvram_setup_host (np, nvram);
2837 * Get SCSI addr of host adapter (set by bios?).
2839 if (np->myaddr == 255) {
2840 np->myaddr = INB(nc_scid) & 0x07;
2841 if (!np->myaddr)
2842 np->myaddr = SYM_SETUP_HOST_ID;
2846 * Prepare initial io register bits for burst length
2848 sym_init_burst(np, burst_max);
2851 * Set SCSI BUS mode.
2852 * - LVD capable chips (895/895A/896/1010) report the
2853 * current BUS mode through the STEST4 IO register.
2854 * - For previous generation chips (825/825A/875),
2855 * user has to tell us how to check against HVD,
2856 * since a 100% safe algorithm is not possible.
2858 np->scsi_mode = SMODE_SE;
2859 if (np->features & (FE_ULTRA2|FE_ULTRA3))
2860 np->scsi_mode = (np->sv_stest4 & SMODE);
2861 else if (np->features & FE_DIFF) {
2862 if (SYM_SETUP_SCSI_DIFF == 1) {
2863 if (np->sv_scntl3) {
2864 if (np->sv_stest2 & 0x20)
2865 np->scsi_mode = SMODE_HVD;
2867 else if (nvram->type == SYM_SYMBIOS_NVRAM) {
2868 if (!(INB(nc_gpreg) & 0x08))
2869 np->scsi_mode = SMODE_HVD;
2872 else if (SYM_SETUP_SCSI_DIFF == 2)
2873 np->scsi_mode = SMODE_HVD;
2875 if (np->scsi_mode == SMODE_HVD)
2876 np->rv_stest2 |= 0x20;
2879 * Set LED support from SCRIPTS.
2880 * Ignore this feature for boards known to use a
2881 * specific GPIO wiring and for the 895A, 896
2882 * and 1010 that drive the LED directly.
2884 if ((SYM_SETUP_SCSI_LED ||
2885 (nvram->type == SYM_SYMBIOS_NVRAM ||
2886 (nvram->type == SYM_TEKRAM_NVRAM &&
2887 np->device_id == PCI_ID_SYM53C895))) &&
2888 !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01))
2889 np->features |= FE_LED0;
2892 * Set irq mode.
2894 switch(SYM_SETUP_IRQ_MODE & 3) {
2895 case 2:
2896 np->rv_dcntl |= IRQM;
2897 break;
2898 case 1:
2899 np->rv_dcntl |= (np->sv_dcntl & IRQM);
2900 break;
2901 default:
2902 break;
2906 * Configure targets according to driver setup.
2907 * If NVRAM present get targets setup from NVRAM.
2909 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
2910 tcb_p tp = &np->target[i];
2912 tp->tinfo.user.scsi_version = tp->tinfo.current.scsi_version= 2;
2913 tp->tinfo.user.spi_version = tp->tinfo.current.spi_version = 2;
2914 tp->tinfo.user.period = np->minsync;
2915 tp->tinfo.user.offset = np->maxoffs;
2916 tp->tinfo.user.width = np->maxwide ? BUS_16_BIT : BUS_8_BIT;
2917 tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
2918 tp->usrtags = SYM_SETUP_MAX_TAG;
2920 sym_nvram_setup_target (np, i, nvram);
2923 * For now, guess PPR/DT support from the period
2924 * and BUS width.
2926 if (np->features & FE_ULTRA3) {
2927 if (tp->tinfo.user.period <= 9 &&
2928 tp->tinfo.user.width == BUS_16_BIT) {
2929 tp->tinfo.user.options |= PPR_OPT_DT;
2930 tp->tinfo.user.offset = np->maxoffs_dt;
2931 tp->tinfo.user.spi_version = 3;
2935 if (!tp->usrtags)
2936 tp->usrflags &= ~SYM_TAGS_ENABLED;
2940 * Let user know about the settings.
2942 i = nvram->type;
2943 kprintf("%s: %s NVRAM, ID %d, Fast-%d, %s, %s\n", sym_name(np),
2944 i == SYM_SYMBIOS_NVRAM ? "Symbios" :
2945 (i == SYM_TEKRAM_NVRAM ? "Tekram" : "No"),
2946 np->myaddr,
2947 (np->features & FE_ULTRA3) ? 80 :
2948 (np->features & FE_ULTRA2) ? 40 :
2949 (np->features & FE_ULTRA) ? 20 : 10,
2950 sym_scsi_bus_mode(np->scsi_mode),
2951 (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity");
2953 * Tell him more on demand.
2955 if (sym_verbose) {
2956 kprintf("%s: %s IRQ line driver%s\n",
2957 sym_name(np),
2958 np->rv_dcntl & IRQM ? "totem pole" : "open drain",
2959 np->ram_ba ? ", using on-chip SRAM" : "");
2960 kprintf("%s: using %s firmware.\n", sym_name(np), np->fw_name);
2961 if (np->features & FE_NOPM)
2962 kprintf("%s: handling phase mismatch from SCRIPTS.\n",
2963 sym_name(np));
2966 * And still more.
2968 if (sym_verbose > 1) {
2969 kprintf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
2970 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
2971 sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl,
2972 np->sv_ctest3, np->sv_ctest4, np->sv_ctest5);
2974 kprintf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
2975 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
2976 sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
2977 np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
2980 * Let user be aware of targets that have some disable flags set.
2982 sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT");
2983 if (sym_verbose)
2984 sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED,
2985 "SCAN FOR LUNS");
2987 return 0;
2991 * Prepare the next negotiation message if needed.
2993 * Fill in the part of message buffer that contains the
2994 * negotiation and the nego_status field of the CCB.
2995 * Returns the size of the message in bytes.
2998 static int sym_prepare_nego(hcb_p np, ccb_p cp, int nego, u_char *msgptr)
3000 tcb_p tp = &np->target[cp->target];
3001 int msglen = 0;
3004 * Early C1010 chips need a work-around for DT
3005 * data transfer to work.
3007 if (!(np->features & FE_U3EN))
3008 tp->tinfo.goal.options = 0;
3010 * negotiate using PPR ?
3012 if (tp->tinfo.goal.options & PPR_OPT_MASK)
3013 nego = NS_PPR;
3015 * negotiate wide transfers ?
3017 else if (tp->tinfo.current.width != tp->tinfo.goal.width)
3018 nego = NS_WIDE;
3020 * negotiate synchronous transfers?
3022 else if (tp->tinfo.current.period != tp->tinfo.goal.period ||
3023 tp->tinfo.current.offset != tp->tinfo.goal.offset)
3024 nego = NS_SYNC;
3026 switch (nego) {
3027 case NS_SYNC:
3028 msgptr[msglen++] = M_EXTENDED;
3029 msgptr[msglen++] = 3;
3030 msgptr[msglen++] = M_X_SYNC_REQ;
3031 msgptr[msglen++] = tp->tinfo.goal.period;
3032 msgptr[msglen++] = tp->tinfo.goal.offset;
3033 break;
3034 case NS_WIDE:
3035 msgptr[msglen++] = M_EXTENDED;
3036 msgptr[msglen++] = 2;
3037 msgptr[msglen++] = M_X_WIDE_REQ;
3038 msgptr[msglen++] = tp->tinfo.goal.width;
3039 break;
3040 case NS_PPR:
3041 msgptr[msglen++] = M_EXTENDED;
3042 msgptr[msglen++] = 6;
3043 msgptr[msglen++] = M_X_PPR_REQ;
3044 msgptr[msglen++] = tp->tinfo.goal.period;
3045 msgptr[msglen++] = 0;
3046 msgptr[msglen++] = tp->tinfo.goal.offset;
3047 msgptr[msglen++] = tp->tinfo.goal.width;
3048 msgptr[msglen++] = tp->tinfo.goal.options & PPR_OPT_DT;
3049 break;
3052 cp->nego_status = nego;
3054 if (nego) {
3055 tp->nego_cp = cp; /* Keep track a nego will be performed */
3056 if (DEBUG_FLAGS & DEBUG_NEGO) {
3057 sym_print_msg(cp, nego == NS_SYNC ? "sync msgout" :
3058 nego == NS_WIDE ? "wide msgout" :
3059 "ppr msgout", msgptr);
3063 return msglen;
3067 * Insert a job into the start queue.
3069 static void sym_put_start_queue(hcb_p np, ccb_p cp)
3071 u_short qidx;
3073 #ifdef SYM_CONF_IARB_SUPPORT
3075 * If the previously queued CCB is not yet done,
3076 * set the IARB hint. The SCRIPTS will go with IARB
3077 * for this job when starting the previous one.
3078 * We leave devices a chance to win arbitration by
3079 * not using more than 'iarb_max' consecutive
3080 * immediate arbitrations.
3082 if (np->last_cp && np->iarb_count < np->iarb_max) {
3083 np->last_cp->host_flags |= HF_HINT_IARB;
3084 ++np->iarb_count;
3086 else
3087 np->iarb_count = 0;
3088 np->last_cp = cp;
3089 #endif
3092 * Insert first the idle task and then our job.
3093 * The MB should ensure proper ordering.
3095 qidx = np->squeueput + 2;
3096 if (qidx >= MAX_QUEUE*2) qidx = 0;
3098 np->squeue [qidx] = cpu_to_scr(np->idletask_ba);
3099 MEMORY_BARRIER();
3100 np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba);
3102 np->squeueput = qidx;
3104 if (DEBUG_FLAGS & DEBUG_QUEUE)
3105 kprintf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput);
3108 * Script processor may be waiting for reselect.
3109 * Wake it up.
3111 MEMORY_BARRIER();
3112 OUTB (nc_istat, SIGP|np->istat_sem);
3117 * Soft reset the chip.
3119 * Raising SRST when the chip is running may cause
3120 * problems on dual function chips (see below).
3121 * On the other hand, LVD devices need some delay
3122 * to settle and report actual BUS mode in STEST4.
3124 static void sym_chip_reset (hcb_p np)
3126 OUTB (nc_istat, SRST);
3127 UDELAY (10);
3128 OUTB (nc_istat, 0);
3129 UDELAY(2000); /* For BUS MODE to settle */
3133 * Soft reset the chip.
3135 * Some 896 and 876 chip revisions may hang-up if we set
3136 * the SRST (soft reset) bit at the wrong time when SCRIPTS
3137 * are running.
3138 * So, we need to abort the current operation prior to
3139 * soft resetting the chip.
3141 static void sym_soft_reset (hcb_p np)
3143 u_char istat;
3144 int i;
3146 OUTB (nc_istat, CABRT);
3147 for (i = 1000000 ; i ; --i) {
3148 istat = INB (nc_istat);
3149 if (istat & SIP) {
3150 INW (nc_sist);
3151 continue;
3153 if (istat & DIP) {
3154 OUTB (nc_istat, 0);
3155 INB (nc_dstat);
3156 break;
3159 if (!i)
3160 kprintf("%s: unable to abort current chip operation.\n",
3161 sym_name(np));
3162 sym_chip_reset (np);
3166 * Start reset process.
3168 * The interrupt handler will reinitialize the chip.
3170 static void sym_start_reset(hcb_p np)
3172 (void) sym_reset_scsi_bus(np, 1);
3175 static int sym_reset_scsi_bus(hcb_p np, int enab_int)
3177 u32 term;
3178 int retv = 0;
3180 sym_soft_reset(np); /* Soft reset the chip */
3181 if (enab_int)
3182 OUTW (nc_sien, RST);
3184 * Enable Tolerant, reset IRQD if present and
3185 * properly set IRQ mode, prior to resetting the bus.
3187 OUTB (nc_stest3, TE);
3188 OUTB (nc_dcntl, (np->rv_dcntl & IRQM));
3189 OUTB (nc_scntl1, CRST);
3190 UDELAY (200);
3192 if (!SYM_SETUP_SCSI_BUS_CHECK)
3193 goto out;
3195 * Check for no terminators or SCSI bus shorts to ground.
3196 * Read SCSI data bus, data parity bits and control signals.
3197 * We are expecting RESET to be TRUE and other signals to be
3198 * FALSE.
3200 term = INB(nc_sstat0);
3201 term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */
3202 term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */
3203 ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */
3204 ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */
3205 INB(nc_sbcl); /* req ack bsy sel atn msg cd io */
3207 if (!(np->features & FE_WIDE))
3208 term &= 0x3ffff;
3210 if (term != (2<<7)) {
3211 kprintf("%s: suspicious SCSI data while resetting the BUS.\n",
3212 sym_name(np));
3213 kprintf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
3214 "0x%lx, expecting 0x%lx\n",
3215 sym_name(np),
3216 (np->features & FE_WIDE) ? "dp1,d15-8," : "",
3217 (u_long)term, (u_long)(2<<7));
3218 if (SYM_SETUP_SCSI_BUS_CHECK == 1)
3219 retv = 1;
3221 out:
3222 OUTB (nc_scntl1, 0);
3223 /* MDELAY(100); */
3224 return retv;
3228 * The chip may have completed jobs. Look at the DONE QUEUE.
3230 * On architectures that may reorder LOAD/STORE operations,
3231 * a memory barrier may be needed after the reading of the
3232 * so-called `flag' and prior to dealing with the data.
3234 static int sym_wakeup_done (hcb_p np)
3236 ccb_p cp;
3237 int i, n;
3238 u32 dsa;
3240 n = 0;
3241 i = np->dqueueget;
3242 while (1) {
3243 dsa = scr_to_cpu(np->dqueue[i]);
3244 if (!dsa)
3245 break;
3246 np->dqueue[i] = 0;
3247 if ((i = i+2) >= MAX_QUEUE*2)
3248 i = 0;
3250 cp = sym_ccb_from_dsa(np, dsa);
3251 if (cp) {
3252 MEMORY_BARRIER();
3253 sym_complete_ok (np, cp);
3254 ++n;
3256 else
3257 kprintf ("%s: bad DSA (%x) in done queue.\n",
3258 sym_name(np), (u_int) dsa);
3260 np->dqueueget = i;
3262 return n;
3266 * Complete all active CCBs with error.
3267 * Used on CHIP/SCSI RESET.
3269 static void sym_flush_busy_queue (hcb_p np, int cam_status)
3272 * Move all active CCBs to the COMP queue
3273 * and flush this queue.
3275 sym_que_splice(&np->busy_ccbq, &np->comp_ccbq);
3276 sym_que_init(&np->busy_ccbq);
3277 sym_flush_comp_queue(np, cam_status);
3281 * Start chip.
3283 * 'reason' means:
3284 * 0: initialisation.
3285 * 1: SCSI BUS RESET delivered or received.
3286 * 2: SCSI BUS MODE changed.
3288 static void sym_init (hcb_p np, int reason)
3290 int i;
3291 u32 phys;
3294 * Reset chip if asked, otherwise just clear fifos.
3296 if (reason == 1)
3297 sym_soft_reset(np);
3298 else {
3299 OUTB (nc_stest3, TE|CSF);
3300 OUTONB (nc_ctest3, CLF);
3304 * Clear Start Queue
3306 phys = np->squeue_ba;
3307 for (i = 0; i < MAX_QUEUE*2; i += 2) {
3308 np->squeue[i] = cpu_to_scr(np->idletask_ba);
3309 np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4);
3311 np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
3314 * Start at first entry.
3316 np->squeueput = 0;
3319 * Clear Done Queue
3321 phys = np->dqueue_ba;
3322 for (i = 0; i < MAX_QUEUE*2; i += 2) {
3323 np->dqueue[i] = 0;
3324 np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4);
3326 np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
3329 * Start at first entry.
3331 np->dqueueget = 0;
3334 * Install patches in scripts.
3335 * This also let point to first position the start
3336 * and done queue pointers used from SCRIPTS.
3338 np->fw_patch(np);
3341 * Wakeup all pending jobs.
3343 sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET);
3346 * Init chip.
3348 OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */
3349 UDELAY (2000); /* The 895 needs time for the bus mode to settle */
3351 OUTB (nc_scntl0, np->rv_scntl0 | 0xc0);
3352 /* full arb., ena parity, par->ATN */
3353 OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */
3355 sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */
3357 OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */
3358 OUTW (nc_respid, 1ul<<np->myaddr); /* Id to respond to */
3359 OUTB (nc_istat , SIGP ); /* Signal Process */
3360 OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */
3361 OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */
3363 OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */
3364 OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */
3365 OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */
3367 /* Extended Sreq/Sack filtering not supported on the C10 */
3368 if (np->features & FE_C10)
3369 OUTB (nc_stest2, np->rv_stest2);
3370 else
3371 OUTB (nc_stest2, EXT|np->rv_stest2);
3373 OUTB (nc_stest3, TE); /* TolerANT enable */
3374 OUTB (nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */
3377 * For now, disable AIP generation on C1010-66.
3379 if (np->device_id == PCI_ID_LSI53C1010_2)
3380 OUTB (nc_aipcntl1, DISAIP);
3383 * C10101 Errata.
3384 * Errant SGE's when in narrow. Write bits 4 & 5 of
3385 * STEST1 register to disable SGE. We probably should do
3386 * that from SCRIPTS for each selection/reselection, but
3387 * I just don't want. :)
3389 if (np->device_id == PCI_ID_LSI53C1010 &&
3390 /* np->revision_id < 0xff */ 1)
3391 OUTB (nc_stest1, INB(nc_stest1) | 0x30);
3394 * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2.
3395 * Disable overlapped arbitration for some dual function devices,
3396 * regardless revision id (kind of post-chip-design feature. ;-))
3398 if (np->device_id == PCI_ID_SYM53C875)
3399 OUTB (nc_ctest0, (1<<5));
3400 else if (np->device_id == PCI_ID_SYM53C896)
3401 np->rv_ccntl0 |= DPR;
3404 * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing
3405 * and/or hardware phase mismatch, since only such chips
3406 * seem to support those IO registers.
3408 if (np->features & (FE_DAC|FE_NOPM)) {
3409 OUTB (nc_ccntl0, np->rv_ccntl0);
3410 OUTB (nc_ccntl1, np->rv_ccntl1);
3414 * If phase mismatch handled by scripts (895A/896/1010),
3415 * set PM jump addresses.
3417 if (np->features & FE_NOPM) {
3418 OUTL (nc_pmjad1, SCRIPTB_BA (np, pm_handle));
3419 OUTL (nc_pmjad2, SCRIPTB_BA (np, pm_handle));
3423 * Enable GPIO0 pin for writing if LED support from SCRIPTS.
3424 * Also set GPIO5 and clear GPIO6 if hardware LED control.
3426 if (np->features & FE_LED0)
3427 OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01);
3428 else if (np->features & FE_LEDC)
3429 OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20);
3432 * enable ints
3434 OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR);
3435 OUTB (nc_dien , MDPE|BF|SSI|SIR|IID);
3438 * For 895/6 enable SBMC interrupt and save current SCSI bus mode.
3439 * Try to eat the spurious SBMC interrupt that may occur when
3440 * we reset the chip but not the SCSI BUS (at initialization).
3442 if (np->features & (FE_ULTRA2|FE_ULTRA3)) {
3443 OUTONW (nc_sien, SBMC);
3444 if (reason == 0) {
3445 MDELAY(100);
3446 INW (nc_sist);
3448 np->scsi_mode = INB (nc_stest4) & SMODE;
3452 * Fill in target structure.
3453 * Reinitialize usrsync.
3454 * Reinitialize usrwide.
3455 * Prepare sync negotiation according to actual SCSI bus mode.
3457 for (i=0;i<SYM_CONF_MAX_TARGET;i++) {
3458 tcb_p tp = &np->target[i];
3460 tp->to_reset = 0;
3461 tp->head.sval = 0;
3462 tp->head.wval = np->rv_scntl3;
3463 tp->head.uval = 0;
3465 tp->tinfo.current.period = 0;
3466 tp->tinfo.current.offset = 0;
3467 tp->tinfo.current.width = BUS_8_BIT;
3468 tp->tinfo.current.options = 0;
3472 * Download SCSI SCRIPTS to on-chip RAM if present,
3473 * and start script processor.
3475 if (np->ram_ba) {
3476 if (sym_verbose > 1)
3477 kprintf ("%s: Downloading SCSI SCRIPTS.\n",
3478 sym_name(np));
3479 if (np->ram_ws == 8192) {
3480 OUTRAM_OFF(4096, np->scriptb0, np->scriptb_sz);
3481 OUTL (nc_mmws, np->scr_ram_seg);
3482 OUTL (nc_mmrs, np->scr_ram_seg);
3483 OUTL (nc_sfs, np->scr_ram_seg);
3484 phys = SCRIPTB_BA (np, start64);
3486 else
3487 phys = SCRIPTA_BA (np, init);
3488 OUTRAM_OFF(0, np->scripta0, np->scripta_sz);
3490 else
3491 phys = SCRIPTA_BA (np, init);
3493 np->istat_sem = 0;
3495 OUTL (nc_dsa, np->hcb_ba);
3496 OUTL_DSP (phys);
3499 * Notify the XPT about the RESET condition.
3501 if (reason != 0)
3502 xpt_async(AC_BUS_RESET, np->path, NULL);
3506 * Get clock factor and sync divisor for a given
3507 * synchronous factor period.
3509 static int
3510 sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp)
3512 u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */
3513 int div = np->clock_divn; /* Number of divisors supported */
3514 u32 fak; /* Sync factor in sxfer */
3515 u32 per; /* Period in tenths of ns */
3516 u32 kpc; /* (per * clk) */
3517 int ret;
3520 * Compute the synchronous period in tenths of nano-seconds
3522 if (dt && sfac <= 9) per = 125;
3523 else if (sfac <= 10) per = 250;
3524 else if (sfac == 11) per = 303;
3525 else if (sfac == 12) per = 500;
3526 else per = 40 * sfac;
3527 ret = per;
3529 kpc = per * clk;
3530 if (dt)
3531 kpc <<= 1;
3534 * For earliest C10 revision 0, we cannot use extra
3535 * clocks for the setting of the SCSI clocking.
3536 * Note that this limits the lowest sync data transfer
3537 * to 5 Mega-transfers per second and may result in
3538 * using higher clock divisors.
3540 #if 1
3541 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) {
3543 * Look for the lowest clock divisor that allows an
3544 * output speed not faster than the period.
3546 while (div > 0) {
3547 --div;
3548 if (kpc > (div_10M[div] << 2)) {
3549 ++div;
3550 break;
3553 fak = 0; /* No extra clocks */
3554 if (div == np->clock_divn) { /* Are we too fast ? */
3555 ret = -1;
3557 *divp = div;
3558 *fakp = fak;
3559 return ret;
3561 #endif
3564 * Look for the greatest clock divisor that allows an
3565 * input speed faster than the period.
3567 while (div-- > 0)
3568 if (kpc >= (div_10M[div] << 2)) break;
3571 * Calculate the lowest clock factor that allows an output
3572 * speed not faster than the period, and the max output speed.
3573 * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT.
3574 * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT.
3576 if (dt) {
3577 fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2;
3578 /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */
3580 else {
3581 fak = (kpc - 1) / div_10M[div] + 1 - 4;
3582 /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */
3586 * Check against our hardware limits, or bugs :).
3588 if (fak < 0) {fak = 0; ret = -1;}
3589 if (fak > 2) {fak = 2; ret = -1;}
3592 * Compute and return sync parameters.
3594 *divp = div;
3595 *fakp = fak;
3597 return ret;
3601 * Tell the SCSI layer about the new transfer parameters.
3603 static void
3604 sym_xpt_async_transfer_neg(hcb_p np, int target, u_int spi_valid)
3606 struct ccb_trans_settings cts;
3607 struct cam_path *path;
3608 int sts;
3609 tcb_p tp = &np->target[target];
3611 sts = xpt_create_path(&path, NULL, cam_sim_path(np->sim), target,
3612 CAM_LUN_WILDCARD);
3613 if (sts != CAM_REQ_CMP)
3614 return;
3616 bzero(&cts, sizeof(cts));
3618 #define cts__scsi (cts.proto_specific.scsi)
3619 #define cts__spi (cts.xport_specific.spi)
3621 cts.type = CTS_TYPE_CURRENT_SETTINGS;
3622 cts.protocol = PROTO_SCSI;
3623 cts.transport = XPORT_SPI;
3624 cts.protocol_version = tp->tinfo.current.scsi_version;
3625 cts.transport_version = tp->tinfo.current.spi_version;
3627 cts__spi.valid = spi_valid;
3628 if (spi_valid & CTS_SPI_VALID_SYNC_RATE)
3629 cts__spi.sync_period = tp->tinfo.current.period;
3630 if (spi_valid & CTS_SPI_VALID_SYNC_OFFSET)
3631 cts__spi.sync_offset = tp->tinfo.current.offset;
3632 if (spi_valid & CTS_SPI_VALID_BUS_WIDTH)
3633 cts__spi.bus_width = tp->tinfo.current.width;
3634 if (spi_valid & CTS_SPI_VALID_PPR_OPTIONS)
3635 cts__spi.ppr_options = tp->tinfo.current.options;
3636 #undef cts__spi
3637 #undef cts__scsi
3638 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
3639 xpt_async(AC_TRANSFER_NEG, path, &cts);
3640 xpt_free_path(path);
3643 #define SYM_SPI_VALID_WDTR \
3644 CTS_SPI_VALID_BUS_WIDTH | \
3645 CTS_SPI_VALID_SYNC_RATE | \
3646 CTS_SPI_VALID_SYNC_OFFSET
3647 #define SYM_SPI_VALID_SDTR \
3648 CTS_SPI_VALID_SYNC_RATE | \
3649 CTS_SPI_VALID_SYNC_OFFSET
3650 #define SYM_SPI_VALID_PPR \
3651 CTS_SPI_VALID_PPR_OPTIONS | \
3652 CTS_SPI_VALID_BUS_WIDTH | \
3653 CTS_SPI_VALID_SYNC_RATE | \
3654 CTS_SPI_VALID_SYNC_OFFSET
3657 * We received a WDTR.
3658 * Let everything be aware of the changes.
3660 static void sym_setwide(hcb_p np, ccb_p cp, u_char wide)
3662 tcb_p tp = &np->target[cp->target];
3664 sym_settrans(np, cp, 0, 0, 0, wide, 0, 0);
3667 * Tell the SCSI layer about the new transfer parameters.
3669 tp->tinfo.goal.width = tp->tinfo.current.width = wide;
3670 tp->tinfo.current.offset = 0;
3671 tp->tinfo.current.period = 0;
3672 tp->tinfo.current.options = 0;
3674 sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_WDTR);
3678 * We received a SDTR.
3679 * Let everything be aware of the changes.
3681 static void
3682 sym_setsync(hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak)
3684 tcb_p tp = &np->target[cp->target];
3685 u_char wide = (cp->phys.select.sel_scntl3 & EWS) ? 1 : 0;
3687 sym_settrans(np, cp, 0, ofs, per, wide, div, fak);
3690 * Tell the SCSI layer about the new transfer parameters.
3692 tp->tinfo.goal.period = tp->tinfo.current.period = per;
3693 tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs;
3694 tp->tinfo.goal.options = tp->tinfo.current.options = 0;
3696 sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_SDTR);
3700 * We received a PPR.
3701 * Let everything be aware of the changes.
3703 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
3704 u_char per, u_char wide, u_char div, u_char fak)
3706 tcb_p tp = &np->target[cp->target];
3708 sym_settrans(np, cp, dt, ofs, per, wide, div, fak);
3711 * Tell the SCSI layer about the new transfer parameters.
3713 tp->tinfo.goal.width = tp->tinfo.current.width = wide;
3714 tp->tinfo.goal.period = tp->tinfo.current.period = per;
3715 tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs;
3716 tp->tinfo.goal.options = tp->tinfo.current.options = dt;
3718 sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_PPR);
3722 * Switch trans mode for current job and it's target.
3724 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs,
3725 u_char per, u_char wide, u_char div, u_char fak)
3727 SYM_QUEHEAD *qp;
3728 union ccb *ccb;
3729 tcb_p tp;
3730 u_char target = INB (nc_sdid) & 0x0f;
3731 u_char sval, wval, uval;
3733 assert (cp);
3734 if (!cp) return;
3735 ccb = cp->cam_ccb;
3736 assert (ccb);
3737 if (!ccb) return;
3738 assert (target == (cp->target & 0xf));
3739 tp = &np->target[target];
3741 sval = tp->head.sval;
3742 wval = tp->head.wval;
3743 uval = tp->head.uval;
3745 #if 0
3746 kprintf("XXXX sval=%x wval=%x uval=%x (%x)\n",
3747 sval, wval, uval, np->rv_scntl3);
3748 #endif
3750 * Set the offset.
3752 if (!(np->features & FE_C10))
3753 sval = (sval & ~0x1f) | ofs;
3754 else
3755 sval = (sval & ~0x3f) | ofs;
3758 * Set the sync divisor and extra clock factor.
3760 if (ofs != 0) {
3761 wval = (wval & ~0x70) | ((div+1) << 4);
3762 if (!(np->features & FE_C10))
3763 sval = (sval & ~0xe0) | (fak << 5);
3764 else {
3765 uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT);
3766 if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT);
3767 if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT);
3772 * Set the bus width.
3774 wval = wval & ~EWS;
3775 if (wide != 0)
3776 wval |= EWS;
3779 * Set misc. ultra enable bits.
3781 if (np->features & FE_C10) {
3782 uval = uval & ~(U3EN|AIPCKEN);
3783 if (dt) {
3784 assert(np->features & FE_U3EN);
3785 uval |= U3EN;
3788 else {
3789 wval = wval & ~ULTRA;
3790 if (per <= 12) wval |= ULTRA;
3794 * Stop there if sync parameters are unchanged.
3796 if (tp->head.sval == sval &&
3797 tp->head.wval == wval &&
3798 tp->head.uval == uval)
3799 return;
3800 tp->head.sval = sval;
3801 tp->head.wval = wval;
3802 tp->head.uval = uval;
3805 * Disable extended Sreq/Sack filtering if per < 50.
3806 * Not supported on the C1010.
3808 if (per < 50 && !(np->features & FE_C10))
3809 OUTOFFB (nc_stest2, EXT);
3812 * set actual value and sync_status
3814 OUTB (nc_sxfer, tp->head.sval);
3815 OUTB (nc_scntl3, tp->head.wval);
3817 if (np->features & FE_C10) {
3818 OUTB (nc_scntl4, tp->head.uval);
3822 * patch ALL busy ccbs of this target.
3824 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
3825 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
3826 if (cp->target != target)
3827 continue;
3828 cp->phys.select.sel_scntl3 = tp->head.wval;
3829 cp->phys.select.sel_sxfer = tp->head.sval;
3830 if (np->features & FE_C10) {
3831 cp->phys.select.sel_scntl4 = tp->head.uval;
3837 * log message for real hard errors
3839 * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc).
3840 * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf.
3842 * exception register:
3843 * ds: dstat
3844 * si: sist
3846 * SCSI bus lines:
3847 * so: control lines as driven by chip.
3848 * si: control lines as seen by chip.
3849 * sd: scsi data lines as seen by chip.
3851 * wide/fastmode:
3852 * sxfer: (see the manual)
3853 * scntl3: (see the manual)
3855 * current script command:
3856 * dsp: script adress (relative to start of script).
3857 * dbc: first word of script command.
3859 * First 24 register of the chip:
3860 * r0..rf
3862 static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat)
3864 u32 dsp;
3865 int script_ofs;
3866 int script_size;
3867 char *script_name;
3868 u_char *script_base;
3869 int i;
3871 dsp = INL (nc_dsp);
3873 if (dsp > np->scripta_ba &&
3874 dsp <= np->scripta_ba + np->scripta_sz) {
3875 script_ofs = dsp - np->scripta_ba;
3876 script_size = np->scripta_sz;
3877 script_base = (u_char *) np->scripta0;
3878 script_name = "scripta";
3880 else if (np->scriptb_ba < dsp &&
3881 dsp <= np->scriptb_ba + np->scriptb_sz) {
3882 script_ofs = dsp - np->scriptb_ba;
3883 script_size = np->scriptb_sz;
3884 script_base = (u_char *) np->scriptb0;
3885 script_name = "scriptb";
3886 } else {
3887 script_ofs = dsp;
3888 script_size = 0;
3889 script_base = 0;
3890 script_name = "mem";
3893 kprintf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n",
3894 sym_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist,
3895 (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl),
3896 (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer),
3897 (unsigned)INB (nc_scntl3), script_name, script_ofs,
3898 (unsigned)INL (nc_dbc));
3900 if (((script_ofs & 3) == 0) &&
3901 (unsigned)script_ofs < script_size) {
3902 kprintf ("%s: script cmd = %08x\n", sym_name(np),
3903 scr_to_cpu((int) *(u32 *)(script_base + script_ofs)));
3906 kprintf ("%s: regdump:", sym_name(np));
3907 for (i=0; i<24;i++)
3908 kprintf (" %02x", (unsigned)INB_OFF(i));
3909 kprintf (".\n");
3912 * PCI BUS error, read the PCI ststus register.
3914 if (dstat & (MDPE|BF)) {
3915 u_short pci_sts;
3916 #ifdef FreeBSD_Bus_Io_Abstraction
3917 pci_sts = pci_read_config(np->device, PCIR_STATUS, 2);
3918 #else
3919 pci_sts = pci_cfgread(np->pci_tag, PCIR_STATUS, 2);
3920 #endif
3921 if (pci_sts & 0xf900) {
3922 #ifdef FreeBSD_Bus_Io_Abstraction
3923 pci_write_config(np->device, PCIR_STATUS, pci_sts, 2);
3924 #else
3925 pci_cfgwrite(np->pci_tag, PCIR_STATUS, pci_sts, 2);
3926 #endif
3927 kprintf("%s: PCI STATUS = 0x%04x\n",
3928 sym_name(np), pci_sts & 0xf900);
3934 * chip interrupt handler
3936 * In normal situations, interrupt conditions occur one at
3937 * a time. But when something bad happens on the SCSI BUS,
3938 * the chip may raise several interrupt flags before
3939 * stopping and interrupting the CPU. The additionnal
3940 * interrupt flags are stacked in some extra registers
3941 * after the SIP and/or DIP flag has been raised in the
3942 * ISTAT. After the CPU has read the interrupt condition
3943 * flag from SIST or DSTAT, the chip unstacks the other
3944 * interrupt flags and sets the corresponding bits in
3945 * SIST or DSTAT. Since the chip starts stacking once the
3946 * SIP or DIP flag is set, there is a small window of time
3947 * where the stacking does not occur.
3949 * Typically, multiple interrupt conditions may happen in
3950 * the following situations:
3952 * - SCSI parity error + Phase mismatch (PAR|MA)
3953 * When an parity error is detected in input phase
3954 * and the device switches to msg-in phase inside a
3955 * block MOV.
3956 * - SCSI parity error + Unexpected disconnect (PAR|UDC)
3957 * When a stupid device does not want to handle the
3958 * recovery of an SCSI parity error.
3959 * - Some combinations of STO, PAR, UDC, ...
3960 * When using non compliant SCSI stuff, when user is
3961 * doing non compliant hot tampering on the BUS, when
3962 * something really bad happens to a device, etc ...
3964 * The heuristic suggested by SYMBIOS to handle
3965 * multiple interrupts is to try unstacking all
3966 * interrupts conditions and to handle them on some
3967 * priority based on error severity.
3968 * This will work when the unstacking has been
3969 * successful, but we cannot be 100 % sure of that,
3970 * since the CPU may have been faster to unstack than
3971 * the chip is able to stack. Hmmm ... But it seems that
3972 * such a situation is very unlikely to happen.
3974 * If this happen, for example STO caught by the CPU
3975 * then UDC happenning before the CPU have restarted
3976 * the SCRIPTS, the driver may wrongly complete the
3977 * same command on UDC, since the SCRIPTS didn't restart
3978 * and the DSA still points to the same command.
3979 * We avoid this situation by setting the DSA to an
3980 * invalid value when the CCB is completed and before
3981 * restarting the SCRIPTS.
3983 * Another issue is that we need some section of our
3984 * recovery procedures to be somehow uninterruptible but
3985 * the SCRIPTS processor does not provides such a
3986 * feature. For this reason, we handle recovery preferently
3987 * from the C code and check against some SCRIPTS critical
3988 * sections from the C code.
3990 * Hopefully, the interrupt handling of the driver is now
3991 * able to resist to weird BUS error conditions, but donnot
3992 * ask me for any guarantee that it will never fail. :-)
3993 * Use at your own decision and risk.
3996 static void sym_intr1 (hcb_p np)
3998 u_char istat, istatc;
3999 u_char dstat;
4000 u_short sist;
4003 * interrupt on the fly ?
4005 * A `dummy read' is needed to ensure that the
4006 * clear of the INTF flag reaches the device
4007 * before the scanning of the DONE queue.
4009 istat = INB (nc_istat);
4010 if (istat & INTF) {
4011 OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem);
4012 istat = INB (nc_istat); /* DUMMY READ */
4013 if (DEBUG_FLAGS & DEBUG_TINY) kprintf ("F ");
4014 (void)sym_wakeup_done (np);
4017 if (!(istat & (SIP|DIP)))
4018 return;
4020 #if 0 /* We should never get this one */
4021 if (istat & CABRT)
4022 OUTB (nc_istat, CABRT);
4023 #endif
4026 * PAR and MA interrupts may occur at the same time,
4027 * and we need to know of both in order to handle
4028 * this situation properly. We try to unstack SCSI
4029 * interrupts for that reason. BTW, I dislike a LOT
4030 * such a loop inside the interrupt routine.
4031 * Even if DMA interrupt stacking is very unlikely to
4032 * happen, we also try unstacking these ones, since
4033 * this has no performance impact.
4035 sist = 0;
4036 dstat = 0;
4037 istatc = istat;
4038 do {
4039 if (istatc & SIP)
4040 sist |= INW (nc_sist);
4041 if (istatc & DIP)
4042 dstat |= INB (nc_dstat);
4043 istatc = INB (nc_istat);
4044 istat |= istatc;
4045 } while (istatc & (SIP|DIP));
4047 if (DEBUG_FLAGS & DEBUG_TINY)
4048 kprintf ("<%d|%x:%x|%x:%x>",
4049 (int)INB(nc_scr0),
4050 dstat,sist,
4051 (unsigned)INL(nc_dsp),
4052 (unsigned)INL(nc_dbc));
4054 * On paper, a memory barrier may be needed here.
4055 * And since we are paranoid ... :)
4057 MEMORY_BARRIER();
4060 * First, interrupts we want to service cleanly.
4062 * Phase mismatch (MA) is the most frequent interrupt
4063 * for chip earlier than the 896 and so we have to service
4064 * it as quickly as possible.
4065 * A SCSI parity error (PAR) may be combined with a phase
4066 * mismatch condition (MA).
4067 * Programmed interrupts (SIR) are used to call the C code
4068 * from SCRIPTS.
4069 * The single step interrupt (SSI) is not used in this
4070 * driver.
4072 if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) &&
4073 !(dstat & (MDPE|BF|ABRT|IID))) {
4074 if (sist & PAR) sym_int_par (np, sist);
4075 else if (sist & MA) sym_int_ma (np);
4076 else if (dstat & SIR) sym_int_sir (np);
4077 else if (dstat & SSI) OUTONB_STD ();
4078 else goto unknown_int;
4079 return;
4083 * Now, interrupts that donnot happen in normal
4084 * situations and that we may need to recover from.
4086 * On SCSI RESET (RST), we reset everything.
4087 * On SCSI BUS MODE CHANGE (SBMC), we complete all
4088 * active CCBs with RESET status, prepare all devices
4089 * for negotiating again and restart the SCRIPTS.
4090 * On STO and UDC, we complete the CCB with the corres-
4091 * ponding status and restart the SCRIPTS.
4093 if (sist & RST) {
4094 xpt_print_path(np->path);
4095 kprintf("SCSI BUS reset detected.\n");
4096 sym_init (np, 1);
4097 return;
4100 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
4101 OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
4103 if (!(sist & (GEN|HTH|SGE)) &&
4104 !(dstat & (MDPE|BF|ABRT|IID))) {
4105 if (sist & SBMC) sym_int_sbmc (np);
4106 else if (sist & STO) sym_int_sto (np);
4107 else if (sist & UDC) sym_int_udc (np);
4108 else goto unknown_int;
4109 return;
4113 * Now, interrupts we are not able to recover cleanly.
4115 * Log message for hard errors.
4116 * Reset everything.
4119 sym_log_hard_error(np, sist, dstat);
4121 if ((sist & (GEN|HTH|SGE)) ||
4122 (dstat & (MDPE|BF|ABRT|IID))) {
4123 sym_start_reset(np);
4124 return;
4127 unknown_int:
4129 * We just miss the cause of the interrupt. :(
4130 * Print a message. The timeout will do the real work.
4132 kprintf( "%s: unknown interrupt(s) ignored, "
4133 "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n",
4134 sym_name(np), istat, dstat, sist);
4137 static void sym_intr(void *arg)
4139 if (DEBUG_FLAGS & DEBUG_TINY) kprintf ("[");
4140 sym_intr1((hcb_p) arg);
4141 if (DEBUG_FLAGS & DEBUG_TINY) kprintf ("]");
4142 return;
4145 static void sym_poll(struct cam_sim *sim)
4147 crit_enter();
4148 sym_intr(cam_sim_softc(sim));
4149 crit_exit();
4154 * generic recovery from scsi interrupt
4156 * The doc says that when the chip gets an SCSI interrupt,
4157 * it tries to stop in an orderly fashion, by completing
4158 * an instruction fetch that had started or by flushing
4159 * the DMA fifo for a write to memory that was executing.
4160 * Such a fashion is not enough to know if the instruction
4161 * that was just before the current DSP value has been
4162 * executed or not.
4164 * There are some small SCRIPTS sections that deal with
4165 * the start queue and the done queue that may break any
4166 * assomption from the C code if we are interrupted
4167 * inside, so we reset if this happens. Btw, since these
4168 * SCRIPTS sections are executed while the SCRIPTS hasn't
4169 * started SCSI operations, it is very unlikely to happen.
4171 * All the driver data structures are supposed to be
4172 * allocated from the same 4 GB memory window, so there
4173 * is a 1 to 1 relationship between DSA and driver data
4174 * structures. Since we are careful :) to invalidate the
4175 * DSA when we complete a command or when the SCRIPTS
4176 * pushes a DSA into a queue, we can trust it when it
4177 * points to a CCB.
4179 static void sym_recover_scsi_int (hcb_p np, u_char hsts)
4181 u32 dsp = INL (nc_dsp);
4182 u32 dsa = INL (nc_dsa);
4183 ccb_p cp = sym_ccb_from_dsa(np, dsa);
4186 * If we haven't been interrupted inside the SCRIPTS
4187 * critical pathes, we can safely restart the SCRIPTS
4188 * and trust the DSA value if it matches a CCB.
4190 if ((!(dsp > SCRIPTA_BA (np, getjob_begin) &&
4191 dsp < SCRIPTA_BA (np, getjob_end) + 1)) &&
4192 (!(dsp > SCRIPTA_BA (np, ungetjob) &&
4193 dsp < SCRIPTA_BA (np, reselect) + 1)) &&
4194 (!(dsp > SCRIPTB_BA (np, sel_for_abort) &&
4195 dsp < SCRIPTB_BA (np, sel_for_abort_1) + 1)) &&
4196 (!(dsp > SCRIPTA_BA (np, done) &&
4197 dsp < SCRIPTA_BA (np, done_end) + 1))) {
4198 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
4199 OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
4201 * If we have a CCB, let the SCRIPTS call us back for
4202 * the handling of the error with SCRATCHA filled with
4203 * STARTPOS. This way, we will be able to freeze the
4204 * device queue and requeue awaiting IOs.
4206 if (cp) {
4207 cp->host_status = hsts;
4208 OUTL_DSP (SCRIPTA_BA (np, complete_error));
4211 * Otherwise just restart the SCRIPTS.
4213 else {
4214 OUTL (nc_dsa, 0xffffff);
4215 OUTL_DSP (SCRIPTA_BA (np, start));
4218 else
4219 goto reset_all;
4221 return;
4223 reset_all:
4224 sym_start_reset(np);
4228 * chip exception handler for selection timeout
4230 void sym_int_sto (hcb_p np)
4232 u32 dsp = INL (nc_dsp);
4234 if (DEBUG_FLAGS & DEBUG_TINY) kprintf ("T");
4236 if (dsp == SCRIPTA_BA (np, wf_sel_done) + 8)
4237 sym_recover_scsi_int(np, HS_SEL_TIMEOUT);
4238 else
4239 sym_start_reset(np);
4243 * chip exception handler for unexpected disconnect
4245 void sym_int_udc (hcb_p np)
4247 kprintf ("%s: unexpected disconnect\n", sym_name(np));
4248 sym_recover_scsi_int(np, HS_UNEXPECTED);
4252 * chip exception handler for SCSI bus mode change
4254 * spi2-r12 11.2.3 says a transceiver mode change must
4255 * generate a reset event and a device that detects a reset
4256 * event shall initiate a hard reset. It says also that a
4257 * device that detects a mode change shall set data transfer
4258 * mode to eight bit asynchronous, etc...
4259 * So, just reinitializing all except chip should be enough.
4261 static void sym_int_sbmc (hcb_p np)
4263 u_char scsi_mode = INB (nc_stest4) & SMODE;
4266 * Notify user.
4268 xpt_print_path(np->path);
4269 kprintf("SCSI BUS mode change from %s to %s.\n",
4270 sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode));
4273 * Should suspend command processing for a few seconds and
4274 * reinitialize all except the chip.
4276 sym_init (np, 2);
4280 * chip exception handler for SCSI parity error.
4282 * When the chip detects a SCSI parity error and is
4283 * currently executing a (CH)MOV instruction, it does
4284 * not interrupt immediately, but tries to finish the
4285 * transfer of the current scatter entry before
4286 * interrupting. The following situations may occur:
4288 * - The complete scatter entry has been transferred
4289 * without the device having changed phase.
4290 * The chip will then interrupt with the DSP pointing
4291 * to the instruction that follows the MOV.
4293 * - A phase mismatch occurs before the MOV finished
4294 * and phase errors are to be handled by the C code.
4295 * The chip will then interrupt with both PAR and MA
4296 * conditions set.
4298 * - A phase mismatch occurs before the MOV finished and
4299 * phase errors are to be handled by SCRIPTS.
4300 * The chip will load the DSP with the phase mismatch
4301 * JUMP address and interrupt the host processor.
4303 static void sym_int_par (hcb_p np, u_short sist)
4305 u_char hsts = INB (HS_PRT);
4306 u32 dsp = INL (nc_dsp);
4307 u32 dbc = INL (nc_dbc);
4308 u32 dsa = INL (nc_dsa);
4309 u_char sbcl = INB (nc_sbcl);
4310 u_char cmd = dbc >> 24;
4311 int phase = cmd & 7;
4312 ccb_p cp = sym_ccb_from_dsa(np, dsa);
4314 kprintf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
4315 sym_name(np), hsts, dbc, sbcl);
4318 * Check that the chip is connected to the SCSI BUS.
4320 if (!(INB (nc_scntl1) & ISCON)) {
4321 sym_recover_scsi_int(np, HS_UNEXPECTED);
4322 return;
4326 * If the nexus is not clearly identified, reset the bus.
4327 * We will try to do better later.
4329 if (!cp)
4330 goto reset_all;
4333 * Check instruction was a MOV, direction was INPUT and
4334 * ATN is asserted.
4336 if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8))
4337 goto reset_all;
4340 * Keep track of the parity error.
4342 OUTONB (HF_PRT, HF_EXT_ERR);
4343 cp->xerr_status |= XE_PARITY_ERR;
4346 * Prepare the message to send to the device.
4348 np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR;
4351 * If the old phase was DATA IN phase, we have to deal with
4352 * the 3 situations described above.
4353 * For other input phases (MSG IN and STATUS), the device
4354 * must resend the whole thing that failed parity checking
4355 * or signal error. So, jumping to dispatcher should be OK.
4357 if (phase == 1 || phase == 5) {
4358 /* Phase mismatch handled by SCRIPTS */
4359 if (dsp == SCRIPTB_BA (np, pm_handle))
4360 OUTL_DSP (dsp);
4361 /* Phase mismatch handled by the C code */
4362 else if (sist & MA)
4363 sym_int_ma (np);
4364 /* No phase mismatch occurred */
4365 else {
4366 OUTL (nc_temp, dsp);
4367 OUTL_DSP (SCRIPTA_BA (np, dispatch));
4370 else
4371 OUTL_DSP (SCRIPTA_BA (np, clrack));
4372 return;
4374 reset_all:
4375 sym_start_reset(np);
4376 return;
4380 * chip exception handler for phase errors.
4382 * We have to construct a new transfer descriptor,
4383 * to transfer the rest of the current block.
4385 static void sym_int_ma (hcb_p np)
4387 u32 dbc;
4388 u32 rest;
4389 u32 dsp;
4390 u32 dsa;
4391 u32 nxtdsp;
4392 u32 *vdsp;
4393 u32 oadr, olen;
4394 u32 *tblp;
4395 u32 newcmd;
4396 u_int delta;
4397 u_char cmd;
4398 u_char hflags, hflags0;
4399 struct sym_pmc *pm;
4400 ccb_p cp;
4402 dsp = INL (nc_dsp);
4403 dbc = INL (nc_dbc);
4404 dsa = INL (nc_dsa);
4406 cmd = dbc >> 24;
4407 rest = dbc & 0xffffff;
4408 delta = 0;
4411 * locate matching cp if any.
4413 cp = sym_ccb_from_dsa(np, dsa);
4416 * Donnot take into account dma fifo and various buffers in
4417 * INPUT phase since the chip flushes everything before
4418 * raising the MA interrupt for interrupted INPUT phases.
4419 * For DATA IN phase, we will check for the SWIDE later.
4421 if ((cmd & 7) != 1 && (cmd & 7) != 5) {
4422 u_char ss0, ss2;
4424 if (np->features & FE_DFBC)
4425 delta = INW (nc_dfbc);
4426 else {
4427 u32 dfifo;
4430 * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership.
4432 dfifo = INL(nc_dfifo);
4435 * Calculate remaining bytes in DMA fifo.
4436 * (CTEST5 = dfifo >> 16)
4438 if (dfifo & (DFS << 16))
4439 delta = ((((dfifo >> 8) & 0x300) |
4440 (dfifo & 0xff)) - rest) & 0x3ff;
4441 else
4442 delta = ((dfifo & 0xff) - rest) & 0x7f;
4446 * The data in the dma fifo has not been transfered to
4447 * the target -> add the amount to the rest
4448 * and clear the data.
4449 * Check the sstat2 register in case of wide transfer.
4451 rest += delta;
4452 ss0 = INB (nc_sstat0);
4453 if (ss0 & OLF) rest++;
4454 if (!(np->features & FE_C10))
4455 if (ss0 & ORF) rest++;
4456 if (cp && (cp->phys.select.sel_scntl3 & EWS)) {
4457 ss2 = INB (nc_sstat2);
4458 if (ss2 & OLF1) rest++;
4459 if (!(np->features & FE_C10))
4460 if (ss2 & ORF1) rest++;
4464 * Clear fifos.
4466 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */
4467 OUTB (nc_stest3, TE|CSF); /* scsi fifo */
4471 * log the information
4473 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
4474 kprintf ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7,
4475 (unsigned) rest, (unsigned) delta);
4478 * try to find the interrupted script command,
4479 * and the address at which to continue.
4481 vdsp = 0;
4482 nxtdsp = 0;
4483 if (dsp > np->scripta_ba &&
4484 dsp <= np->scripta_ba + np->scripta_sz) {
4485 vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8));
4486 nxtdsp = dsp;
4488 else if (dsp > np->scriptb_ba &&
4489 dsp <= np->scriptb_ba + np->scriptb_sz) {
4490 vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8));
4491 nxtdsp = dsp;
4495 * log the information
4497 if (DEBUG_FLAGS & DEBUG_PHASE) {
4498 kprintf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
4499 cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd);
4502 if (!vdsp) {
4503 kprintf ("%s: interrupted SCRIPT address not found.\n",
4504 sym_name (np));
4505 goto reset_all;
4508 if (!cp) {
4509 kprintf ("%s: SCSI phase error fixup: CCB already dequeued.\n",
4510 sym_name (np));
4511 goto reset_all;
4515 * get old startaddress and old length.
4517 oadr = scr_to_cpu(vdsp[1]);
4519 if (cmd & 0x10) { /* Table indirect */
4520 tblp = (u32 *) ((char*) &cp->phys + oadr);
4521 olen = scr_to_cpu(tblp[0]);
4522 oadr = scr_to_cpu(tblp[1]);
4523 } else {
4524 tblp = (u32 *) 0;
4525 olen = scr_to_cpu(vdsp[0]) & 0xffffff;
4528 if (DEBUG_FLAGS & DEBUG_PHASE) {
4529 kprintf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
4530 (unsigned) (scr_to_cpu(vdsp[0]) >> 24),
4531 tblp,
4532 (unsigned) olen,
4533 (unsigned) oadr);
4537 * check cmd against assumed interrupted script command.
4538 * If dt data phase, the MOVE instruction hasn't bit 4 of
4539 * the phase.
4541 if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) {
4542 PRINT_ADDR(cp);
4543 kprintf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
4544 (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24);
4546 goto reset_all;
4550 * if old phase not dataphase, leave here.
4552 if (cmd & 2) {
4553 PRINT_ADDR(cp);
4554 kprintf ("phase change %x-%x %d@%08x resid=%d.\n",
4555 cmd&7, INB(nc_sbcl)&7, (unsigned)olen,
4556 (unsigned)oadr, (unsigned)rest);
4557 goto unexpected_phase;
4561 * Choose the correct PM save area.
4563 * Look at the PM_SAVE SCRIPT if you want to understand
4564 * this stuff. The equivalent code is implemented in
4565 * SCRIPTS for the 895A, 896 and 1010 that are able to
4566 * handle PM from the SCRIPTS processor.
4568 hflags0 = INB (HF_PRT);
4569 hflags = hflags0;
4571 if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) {
4572 if (hflags & HF_IN_PM0)
4573 nxtdsp = scr_to_cpu(cp->phys.pm0.ret);
4574 else if (hflags & HF_IN_PM1)
4575 nxtdsp = scr_to_cpu(cp->phys.pm1.ret);
4577 if (hflags & HF_DP_SAVED)
4578 hflags ^= HF_ACT_PM;
4581 if (!(hflags & HF_ACT_PM)) {
4582 pm = &cp->phys.pm0;
4583 newcmd = SCRIPTA_BA (np, pm0_data);
4585 else {
4586 pm = &cp->phys.pm1;
4587 newcmd = SCRIPTA_BA (np, pm1_data);
4590 hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED);
4591 if (hflags != hflags0)
4592 OUTB (HF_PRT, hflags);
4595 * fillin the phase mismatch context
4597 pm->sg.addr = cpu_to_scr(oadr + olen - rest);
4598 pm->sg.size = cpu_to_scr(rest);
4599 pm->ret = cpu_to_scr(nxtdsp);
4602 * If we have a SWIDE,
4603 * - prepare the address to write the SWIDE from SCRIPTS,
4604 * - compute the SCRIPTS address to restart from,
4605 * - move current data pointer context by one byte.
4607 nxtdsp = SCRIPTA_BA (np, dispatch);
4608 if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) &&
4609 (INB (nc_scntl2) & WSR)) {
4610 u32 tmp;
4613 * Set up the table indirect for the MOVE
4614 * of the residual byte and adjust the data
4615 * pointer context.
4617 tmp = scr_to_cpu(pm->sg.addr);
4618 cp->phys.wresid.addr = cpu_to_scr(tmp);
4619 pm->sg.addr = cpu_to_scr(tmp + 1);
4620 tmp = scr_to_cpu(pm->sg.size);
4621 cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1);
4622 pm->sg.size = cpu_to_scr(tmp - 1);
4625 * If only the residual byte is to be moved,
4626 * no PM context is needed.
4628 if ((tmp&0xffffff) == 1)
4629 newcmd = pm->ret;
4632 * Prepare the address of SCRIPTS that will
4633 * move the residual byte to memory.
4635 nxtdsp = SCRIPTB_BA (np, wsr_ma_helper);
4638 if (DEBUG_FLAGS & DEBUG_PHASE) {
4639 PRINT_ADDR(cp);
4640 kprintf ("PM %x %x %x / %x %x %x.\n",
4641 hflags0, hflags, newcmd,
4642 (unsigned)scr_to_cpu(pm->sg.addr),
4643 (unsigned)scr_to_cpu(pm->sg.size),
4644 (unsigned)scr_to_cpu(pm->ret));
4648 * Restart the SCRIPTS processor.
4650 OUTL (nc_temp, newcmd);
4651 OUTL_DSP (nxtdsp);
4652 return;
4655 * Unexpected phase changes that occurs when the current phase
4656 * is not a DATA IN or DATA OUT phase are due to error conditions.
4657 * Such event may only happen when the SCRIPTS is using a
4658 * multibyte SCSI MOVE.
4660 * Phase change Some possible cause
4662 * COMMAND --> MSG IN SCSI parity error detected by target.
4663 * COMMAND --> STATUS Bad command or refused by target.
4664 * MSG OUT --> MSG IN Message rejected by target.
4665 * MSG OUT --> COMMAND Bogus target that discards extended
4666 * negotiation messages.
4668 * The code below does not care of the new phase and so
4669 * trusts the target. Why to annoy it ?
4670 * If the interrupted phase is COMMAND phase, we restart at
4671 * dispatcher.
4672 * If a target does not get all the messages after selection,
4673 * the code assumes blindly that the target discards extended
4674 * messages and clears the negotiation status.
4675 * If the target does not want all our response to negotiation,
4676 * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids
4677 * bloat for such a should_not_happen situation).
4678 * In all other situation, we reset the BUS.
4679 * Are these assumptions reasonnable ? (Wait and see ...)
4681 unexpected_phase:
4682 dsp -= 8;
4683 nxtdsp = 0;
4685 switch (cmd & 7) {
4686 case 2: /* COMMAND phase */
4687 nxtdsp = SCRIPTA_BA (np, dispatch);
4688 break;
4689 #if 0
4690 case 3: /* STATUS phase */
4691 nxtdsp = SCRIPTA_BA (np, dispatch);
4692 break;
4693 #endif
4694 case 6: /* MSG OUT phase */
4696 * If the device may want to use untagged when we want
4697 * tagged, we prepare an IDENTIFY without disc. granted,
4698 * since we will not be able to handle reselect.
4699 * Otherwise, we just don't care.
4701 if (dsp == SCRIPTA_BA (np, send_ident)) {
4702 if (cp->tag != NO_TAG && olen - rest <= 3) {
4703 cp->host_status = HS_BUSY;
4704 np->msgout[0] = M_IDENTIFY | cp->lun;
4705 nxtdsp = SCRIPTB_BA (np, ident_break_atn);
4707 else
4708 nxtdsp = SCRIPTB_BA (np, ident_break);
4710 else if (dsp == SCRIPTB_BA (np, send_wdtr) ||
4711 dsp == SCRIPTB_BA (np, send_sdtr) ||
4712 dsp == SCRIPTB_BA (np, send_ppr)) {
4713 nxtdsp = SCRIPTB_BA (np, nego_bad_phase);
4715 break;
4716 #if 0
4717 case 7: /* MSG IN phase */
4718 nxtdsp = SCRIPTA_BA (np, clrack);
4719 break;
4720 #endif
4723 if (nxtdsp) {
4724 OUTL_DSP (nxtdsp);
4725 return;
4728 reset_all:
4729 sym_start_reset(np);
4733 * Dequeue from the START queue all CCBs that match
4734 * a given target/lun/task condition (-1 means all),
4735 * and move them from the BUSY queue to the COMP queue
4736 * with CAM_REQUEUE_REQ status condition.
4737 * This function is used during error handling/recovery.
4738 * It is called with SCRIPTS not running.
4740 static int
4741 sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task)
4743 int j;
4744 ccb_p cp;
4747 * Make sure the starting index is within range.
4749 assert((i >= 0) && (i < 2*MAX_QUEUE));
4752 * Walk until end of START queue and dequeue every job
4753 * that matches the target/lun/task condition.
4755 j = i;
4756 while (i != np->squeueput) {
4757 cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i]));
4758 assert(cp);
4759 #ifdef SYM_CONF_IARB_SUPPORT
4760 /* Forget hints for IARB, they may be no longer relevant */
4761 cp->host_flags &= ~HF_HINT_IARB;
4762 #endif
4763 if ((target == -1 || cp->target == target) &&
4764 (lun == -1 || cp->lun == lun) &&
4765 (task == -1 || cp->tag == task)) {
4766 sym_set_cam_status(cp->cam_ccb, CAM_REQUEUE_REQ);
4767 sym_remque(&cp->link_ccbq);
4768 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
4770 else {
4771 if (i != j)
4772 np->squeue[j] = np->squeue[i];
4773 if ((j += 2) >= MAX_QUEUE*2) j = 0;
4775 if ((i += 2) >= MAX_QUEUE*2) i = 0;
4777 if (i != j) /* Copy back the idle task if needed */
4778 np->squeue[j] = np->squeue[i];
4779 np->squeueput = j; /* Update our current start queue pointer */
4781 return (i - j) / 2;
4785 * Complete all CCBs queued to the COMP queue.
4787 * These CCBs are assumed:
4788 * - Not to be referenced either by devices or
4789 * SCRIPTS-related queues and datas.
4790 * - To have to be completed with an error condition
4791 * or requeued.
4793 * The device queue freeze count is incremented
4794 * for each CCB that does not prevent this.
4795 * This function is called when all CCBs involved
4796 * in error handling/recovery have been reaped.
4798 static void
4799 sym_flush_comp_queue(hcb_p np, int cam_status)
4801 SYM_QUEHEAD *qp;
4802 ccb_p cp;
4804 while ((qp = sym_remque_head(&np->comp_ccbq)) != 0) {
4805 union ccb *ccb;
4806 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
4807 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
4808 /* Leave quiet CCBs waiting for resources */
4809 if (cp->host_status == HS_WAIT)
4810 continue;
4811 ccb = cp->cam_ccb;
4812 if (cam_status)
4813 sym_set_cam_status(ccb, cam_status);
4814 sym_free_ccb(np, cp);
4815 sym_freeze_cam_ccb(ccb);
4816 sym_xpt_done(np, ccb);
4821 * chip handler for bad SCSI status condition
4823 * In case of bad SCSI status, we unqueue all the tasks
4824 * currently queued to the controller but not yet started
4825 * and then restart the SCRIPTS processor immediately.
4827 * QUEUE FULL and BUSY conditions are handled the same way.
4828 * Basically all the not yet started tasks are requeued in
4829 * device queue and the queue is frozen until a completion.
4831 * For CHECK CONDITION and COMMAND TERMINATED status, we use
4832 * the CCB of the failed command to prepare a REQUEST SENSE
4833 * SCSI command and queue it to the controller queue.
4835 * SCRATCHA is assumed to have been loaded with STARTPOS
4836 * before the SCRIPTS called the C code.
4838 static void sym_sir_bad_scsi_status(hcb_p np, int num, ccb_p cp)
4840 tcb_p tp = &np->target[cp->target];
4841 u32 startp;
4842 u_char s_status = cp->ssss_status;
4843 u_char h_flags = cp->host_flags;
4844 int msglen;
4845 int nego;
4846 int i;
4849 * Compute the index of the next job to start from SCRIPTS.
4851 i = (INL (nc_scratcha) - np->squeue_ba) / 4;
4854 * The last CCB queued used for IARB hint may be
4855 * no longer relevant. Forget it.
4857 #ifdef SYM_CONF_IARB_SUPPORT
4858 if (np->last_cp)
4859 np->last_cp = 0;
4860 #endif
4863 * Now deal with the SCSI status.
4865 switch(s_status) {
4866 case S_BUSY:
4867 case S_QUEUE_FULL:
4868 if (sym_verbose >= 2) {
4869 PRINT_ADDR(cp);
4870 kprintf (s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n");
4872 default: /* S_INT, S_INT_COND_MET, S_CONFLICT */
4873 sym_complete_error (np, cp);
4874 break;
4875 case S_TERMINATED:
4876 case S_CHECK_COND:
4878 * If we get an SCSI error when requesting sense, give up.
4880 if (h_flags & HF_SENSE) {
4881 sym_complete_error (np, cp);
4882 break;
4886 * Dequeue all queued CCBs for that device not yet started,
4887 * and restart the SCRIPTS processor immediately.
4889 (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
4890 OUTL_DSP (SCRIPTA_BA (np, start));
4893 * Save some info of the actual IO.
4894 * Compute the data residual.
4896 cp->sv_scsi_status = cp->ssss_status;
4897 cp->sv_xerr_status = cp->xerr_status;
4898 cp->sv_resid = sym_compute_residual(np, cp);
4901 * Prepare all needed data structures for
4902 * requesting sense data.
4906 * identify message
4908 cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun;
4909 msglen = 1;
4912 * If we are currently using anything different from
4913 * async. 8 bit data transfers with that target,
4914 * start a negotiation, since the device may want
4915 * to report us a UNIT ATTENTION condition due to
4916 * a cause we currently ignore, and we donnot want
4917 * to be stuck with WIDE and/or SYNC data transfer.
4919 * cp->nego_status is filled by sym_prepare_nego().
4921 cp->nego_status = 0;
4922 nego = 0;
4923 if (tp->tinfo.current.options & PPR_OPT_MASK)
4924 nego = NS_PPR;
4925 else if (tp->tinfo.current.width != BUS_8_BIT)
4926 nego = NS_WIDE;
4927 else if (tp->tinfo.current.offset != 0)
4928 nego = NS_SYNC;
4929 if (nego)
4930 msglen +=
4931 sym_prepare_nego (np,cp, nego, &cp->scsi_smsg2[msglen]);
4933 * Message table indirect structure.
4935 cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg2));
4936 cp->phys.smsg.size = cpu_to_scr(msglen);
4939 * sense command
4941 cp->phys.cmd.addr = cpu_to_scr(CCB_BA (cp, sensecmd));
4942 cp->phys.cmd.size = cpu_to_scr(6);
4945 * patch requested size into sense command
4947 cp->sensecmd[0] = 0x03;
4948 cp->sensecmd[1] = cp->lun << 5;
4949 if (tp->tinfo.current.scsi_version > 2 || cp->lun > 7)
4950 cp->sensecmd[1] = 0;
4951 cp->sensecmd[4] = SYM_SNS_BBUF_LEN;
4952 cp->data_len = SYM_SNS_BBUF_LEN;
4955 * sense data
4957 bzero(cp->sns_bbuf, SYM_SNS_BBUF_LEN);
4958 cp->phys.sense.addr = cpu_to_scr(vtobus(cp->sns_bbuf));
4959 cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN);
4962 * requeue the command.
4964 startp = SCRIPTB_BA (np, sdata_in);
4966 cp->phys.head.savep = cpu_to_scr(startp);
4967 cp->phys.head.goalp = cpu_to_scr(startp + 16);
4968 cp->phys.head.lastp = cpu_to_scr(startp);
4969 cp->startp = cpu_to_scr(startp);
4971 cp->actualquirks = SYM_QUIRK_AUTOSAVE;
4972 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
4973 cp->ssss_status = S_ILLEGAL;
4974 cp->host_flags = (HF_SENSE|HF_DATA_IN);
4975 cp->xerr_status = 0;
4976 cp->extra_bytes = 0;
4978 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select));
4981 * Requeue the command.
4983 sym_put_start_queue(np, cp);
4986 * Give back to upper layer everything we have dequeued.
4988 sym_flush_comp_queue(np, 0);
4989 break;
4994 * After a device has accepted some management message
4995 * as BUS DEVICE RESET, ABORT TASK, etc ..., or when
4996 * a device signals a UNIT ATTENTION condition, some
4997 * tasks are thrown away by the device. We are required
4998 * to reflect that on our tasks list since the device
4999 * will never complete these tasks.
5001 * This function move from the BUSY queue to the COMP
5002 * queue all disconnected CCBs for a given target that
5003 * match the following criteria:
5004 * - lun=-1 means any logical UNIT otherwise a given one.
5005 * - task=-1 means any task, otherwise a given one.
5007 static int
5008 sym_clear_tasks(hcb_p np, int cam_status, int target, int lun, int task)
5010 SYM_QUEHEAD qtmp, *qp;
5011 int i = 0;
5012 ccb_p cp;
5015 * Move the entire BUSY queue to our temporary queue.
5017 sym_que_init(&qtmp);
5018 sym_que_splice(&np->busy_ccbq, &qtmp);
5019 sym_que_init(&np->busy_ccbq);
5022 * Put all CCBs that matches our criteria into
5023 * the COMP queue and put back other ones into
5024 * the BUSY queue.
5026 while ((qp = sym_remque_head(&qtmp)) != 0) {
5027 union ccb *ccb;
5028 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
5029 ccb = cp->cam_ccb;
5030 if (cp->host_status != HS_DISCONNECT ||
5031 cp->target != target ||
5032 (lun != -1 && cp->lun != lun) ||
5033 (task != -1 &&
5034 (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) {
5035 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
5036 continue;
5038 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
5040 /* Preserve the software timeout condition */
5041 if (sym_get_cam_status(ccb) != CAM_CMD_TIMEOUT)
5042 sym_set_cam_status(ccb, cam_status);
5043 ++i;
5044 #if 0
5045 kprintf("XXXX TASK @%p CLEARED\n", cp);
5046 #endif
5048 return i;
5052 * chip handler for TASKS recovery
5054 * We cannot safely abort a command, while the SCRIPTS
5055 * processor is running, since we just would be in race
5056 * with it.
5058 * As long as we have tasks to abort, we keep the SEM
5059 * bit set in the ISTAT. When this bit is set, the
5060 * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED)
5061 * each time it enters the scheduler.
5063 * If we have to reset a target, clear tasks of a unit,
5064 * or to perform the abort of a disconnected job, we
5065 * restart the SCRIPTS for selecting the target. Once
5066 * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED).
5067 * If it loses arbitration, the SCRIPTS will interrupt again
5068 * the next time it will enter its scheduler, and so on ...
5070 * On SIR_TARGET_SELECTED, we scan for the more
5071 * appropriate thing to do:
5073 * - If nothing, we just sent a M_ABORT message to the
5074 * target to get rid of the useless SCSI bus ownership.
5075 * According to the specs, no tasks shall be affected.
5076 * - If the target is to be reset, we send it a M_RESET
5077 * message.
5078 * - If a logical UNIT is to be cleared , we send the
5079 * IDENTIFY(lun) + M_ABORT.
5080 * - If an untagged task is to be aborted, we send the
5081 * IDENTIFY(lun) + M_ABORT.
5082 * - If a tagged task is to be aborted, we send the
5083 * IDENTIFY(lun) + task attributes + M_ABORT_TAG.
5085 * Once our 'kiss of death' :) message has been accepted
5086 * by the target, the SCRIPTS interrupts again
5087 * (SIR_ABORT_SENT). On this interrupt, we complete
5088 * all the CCBs that should have been aborted by the
5089 * target according to our message.
5091 static void sym_sir_task_recovery(hcb_p np, int num)
5093 SYM_QUEHEAD *qp;
5094 ccb_p cp;
5095 tcb_p tp;
5096 int target=-1, lun=-1, task;
5097 int i, k;
5099 switch(num) {
5101 * The SCRIPTS processor stopped before starting
5102 * the next command in order to allow us to perform
5103 * some task recovery.
5105 case SIR_SCRIPT_STOPPED:
5107 * Do we have any target to reset or unit to clear ?
5109 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
5110 tp = &np->target[i];
5111 if (tp->to_reset ||
5112 (tp->lun0p && tp->lun0p->to_clear)) {
5113 target = i;
5114 break;
5116 if (!tp->lunmp)
5117 continue;
5118 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
5119 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
5120 target = i;
5121 break;
5124 if (target != -1)
5125 break;
5129 * If not, walk the busy queue for any
5130 * disconnected CCB to be aborted.
5132 if (target == -1) {
5133 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
5134 cp = sym_que_entry(qp,struct sym_ccb,link_ccbq);
5135 if (cp->host_status != HS_DISCONNECT)
5136 continue;
5137 if (cp->to_abort) {
5138 target = cp->target;
5139 break;
5145 * If some target is to be selected,
5146 * prepare and start the selection.
5148 if (target != -1) {
5149 tp = &np->target[target];
5150 np->abrt_sel.sel_id = target;
5151 np->abrt_sel.sel_scntl3 = tp->head.wval;
5152 np->abrt_sel.sel_sxfer = tp->head.sval;
5153 OUTL(nc_dsa, np->hcb_ba);
5154 OUTL_DSP (SCRIPTB_BA (np, sel_for_abort));
5155 return;
5159 * Now look for a CCB to abort that haven't started yet.
5160 * Btw, the SCRIPTS processor is still stopped, so
5161 * we are not in race.
5163 i = 0;
5164 cp = 0;
5165 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
5166 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
5167 if (cp->host_status != HS_BUSY &&
5168 cp->host_status != HS_NEGOTIATE)
5169 continue;
5170 if (!cp->to_abort)
5171 continue;
5172 #ifdef SYM_CONF_IARB_SUPPORT
5174 * If we are using IMMEDIATE ARBITRATION, we donnot
5175 * want to cancel the last queued CCB, since the
5176 * SCRIPTS may have anticipated the selection.
5178 if (cp == np->last_cp) {
5179 cp->to_abort = 0;
5180 continue;
5182 #endif
5183 i = 1; /* Means we have found some */
5184 break;
5186 if (!i) {
5188 * We are done, so we donnot need
5189 * to synchronize with the SCRIPTS anylonger.
5190 * Remove the SEM flag from the ISTAT.
5192 np->istat_sem = 0;
5193 OUTB (nc_istat, SIGP);
5194 break;
5197 * Compute index of next position in the start
5198 * queue the SCRIPTS intends to start and dequeue
5199 * all CCBs for that device that haven't been started.
5201 i = (INL (nc_scratcha) - np->squeue_ba) / 4;
5202 i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
5205 * Make sure at least our IO to abort has been dequeued.
5207 assert(i && sym_get_cam_status(cp->cam_ccb) == CAM_REQUEUE_REQ);
5210 * Keep track in cam status of the reason of the abort.
5212 if (cp->to_abort == 2)
5213 sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT);
5214 else
5215 sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED);
5218 * Complete with error everything that we have dequeued.
5220 sym_flush_comp_queue(np, 0);
5221 break;
5223 * The SCRIPTS processor has selected a target
5224 * we may have some manual recovery to perform for.
5226 case SIR_TARGET_SELECTED:
5227 target = (INB (nc_sdid) & 0xf);
5228 tp = &np->target[target];
5230 np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg));
5233 * If the target is to be reset, prepare a
5234 * M_RESET message and clear the to_reset flag
5235 * since we donnot expect this operation to fail.
5237 if (tp->to_reset) {
5238 np->abrt_msg[0] = M_RESET;
5239 np->abrt_tbl.size = 1;
5240 tp->to_reset = 0;
5241 break;
5245 * Otherwise, look for some logical unit to be cleared.
5247 if (tp->lun0p && tp->lun0p->to_clear)
5248 lun = 0;
5249 else if (tp->lunmp) {
5250 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
5251 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
5252 lun = k;
5253 break;
5259 * If a logical unit is to be cleared, prepare
5260 * an IDENTIFY(lun) + ABORT MESSAGE.
5262 if (lun != -1) {
5263 lcb_p lp = sym_lp(np, tp, lun);
5264 lp->to_clear = 0; /* We donnot expect to fail here */
5265 np->abrt_msg[0] = M_IDENTIFY | lun;
5266 np->abrt_msg[1] = M_ABORT;
5267 np->abrt_tbl.size = 2;
5268 break;
5272 * Otherwise, look for some disconnected job to
5273 * abort for this target.
5275 i = 0;
5276 cp = 0;
5277 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
5278 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
5279 if (cp->host_status != HS_DISCONNECT)
5280 continue;
5281 if (cp->target != target)
5282 continue;
5283 if (!cp->to_abort)
5284 continue;
5285 i = 1; /* Means we have some */
5286 break;
5290 * If we have none, probably since the device has
5291 * completed the command before we won abitration,
5292 * send a M_ABORT message without IDENTIFY.
5293 * According to the specs, the device must just
5294 * disconnect the BUS and not abort any task.
5296 if (!i) {
5297 np->abrt_msg[0] = M_ABORT;
5298 np->abrt_tbl.size = 1;
5299 break;
5303 * We have some task to abort.
5304 * Set the IDENTIFY(lun)
5306 np->abrt_msg[0] = M_IDENTIFY | cp->lun;
5309 * If we want to abort an untagged command, we
5310 * will send a IDENTIFY + M_ABORT.
5311 * Otherwise (tagged command), we will send
5312 * a IDENTITFY + task attributes + ABORT TAG.
5314 if (cp->tag == NO_TAG) {
5315 np->abrt_msg[1] = M_ABORT;
5316 np->abrt_tbl.size = 2;
5318 else {
5319 np->abrt_msg[1] = cp->scsi_smsg[1];
5320 np->abrt_msg[2] = cp->scsi_smsg[2];
5321 np->abrt_msg[3] = M_ABORT_TAG;
5322 np->abrt_tbl.size = 4;
5325 * Keep track of software timeout condition, since the
5326 * peripheral driver may not count retries on abort
5327 * conditions not due to timeout.
5329 if (cp->to_abort == 2)
5330 sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT);
5331 cp->to_abort = 0; /* We donnot expect to fail here */
5332 break;
5335 * The target has accepted our message and switched
5336 * to BUS FREE phase as we expected.
5338 case SIR_ABORT_SENT:
5339 target = (INB (nc_sdid) & 0xf);
5340 tp = &np->target[target];
5343 ** If we didn't abort anything, leave here.
5345 if (np->abrt_msg[0] == M_ABORT)
5346 break;
5349 * If we sent a M_RESET, then a hardware reset has
5350 * been performed by the target.
5351 * - Reset everything to async 8 bit
5352 * - Tell ourself to negotiate next time :-)
5353 * - Prepare to clear all disconnected CCBs for
5354 * this target from our task list (lun=task=-1)
5356 lun = -1;
5357 task = -1;
5358 if (np->abrt_msg[0] == M_RESET) {
5359 tp->head.sval = 0;
5360 tp->head.wval = np->rv_scntl3;
5361 tp->head.uval = 0;
5362 tp->tinfo.current.period = 0;
5363 tp->tinfo.current.offset = 0;
5364 tp->tinfo.current.width = BUS_8_BIT;
5365 tp->tinfo.current.options = 0;
5369 * Otherwise, check for the LUN and TASK(s)
5370 * concerned by the cancelation.
5371 * If it is not ABORT_TAG then it is CLEAR_QUEUE
5372 * or an ABORT message :-)
5374 else {
5375 lun = np->abrt_msg[0] & 0x3f;
5376 if (np->abrt_msg[1] == M_ABORT_TAG)
5377 task = np->abrt_msg[2];
5381 * Complete all the CCBs the device should have
5382 * aborted due to our 'kiss of death' message.
5384 i = (INL (nc_scratcha) - np->squeue_ba) / 4;
5385 (void) sym_dequeue_from_squeue(np, i, target, lun, -1);
5386 (void) sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task);
5387 sym_flush_comp_queue(np, 0);
5390 * If we sent a BDR, make uper layer aware of that.
5392 if (np->abrt_msg[0] == M_RESET)
5393 xpt_async(AC_SENT_BDR, np->path, NULL);
5394 break;
5398 * Print to the log the message we intend to send.
5400 if (num == SIR_TARGET_SELECTED) {
5401 PRINT_TARGET(np, target);
5402 sym_printl_hex("control msgout:", np->abrt_msg,
5403 np->abrt_tbl.size);
5404 np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size);
5408 * Let the SCRIPTS processor continue.
5410 OUTONB_STD ();
5414 * Gerard's alchemy:) that deals with with the data
5415 * pointer for both MDP and the residual calculation.
5417 * I didn't want to bloat the code by more than 200
5418 * lignes for the handling of both MDP and the residual.
5419 * This has been achieved by using a data pointer
5420 * representation consisting in an index in the data
5421 * array (dp_sg) and a negative offset (dp_ofs) that
5422 * have the following meaning:
5424 * - dp_sg = SYM_CONF_MAX_SG
5425 * we are at the end of the data script.
5426 * - dp_sg < SYM_CONF_MAX_SG
5427 * dp_sg points to the next entry of the scatter array
5428 * we want to transfer.
5429 * - dp_ofs < 0
5430 * dp_ofs represents the residual of bytes of the
5431 * previous entry scatter entry we will send first.
5432 * - dp_ofs = 0
5433 * no residual to send first.
5435 * The function sym_evaluate_dp() accepts an arbitray
5436 * offset (basically from the MDP message) and returns
5437 * the corresponding values of dp_sg and dp_ofs.
5440 static int sym_evaluate_dp(hcb_p np, ccb_p cp, u32 scr, int *ofs)
5442 u32 dp_scr;
5443 int dp_ofs, dp_sg, dp_sgmin;
5444 int tmp;
5445 struct sym_pmc *pm;
5448 * Compute the resulted data pointer in term of a script
5449 * address within some DATA script and a signed byte offset.
5451 dp_scr = scr;
5452 dp_ofs = *ofs;
5453 if (dp_scr == SCRIPTA_BA (np, pm0_data))
5454 pm = &cp->phys.pm0;
5455 else if (dp_scr == SCRIPTA_BA (np, pm1_data))
5456 pm = &cp->phys.pm1;
5457 else
5458 pm = 0;
5460 if (pm) {
5461 dp_scr = scr_to_cpu(pm->ret);
5462 dp_ofs -= scr_to_cpu(pm->sg.size);
5466 * If we are auto-sensing, then we are done.
5468 if (cp->host_flags & HF_SENSE) {
5469 *ofs = dp_ofs;
5470 return 0;
5474 * Deduce the index of the sg entry.
5475 * Keep track of the index of the first valid entry.
5476 * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the
5477 * end of the data.
5479 tmp = scr_to_cpu(cp->phys.head.goalp);
5480 dp_sg = SYM_CONF_MAX_SG;
5481 if (dp_scr != tmp)
5482 dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4);
5483 dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
5486 * Move to the sg entry the data pointer belongs to.
5488 * If we are inside the data area, we expect result to be:
5490 * Either,
5491 * dp_ofs = 0 and dp_sg is the index of the sg entry
5492 * the data pointer belongs to (or the end of the data)
5493 * Or,
5494 * dp_ofs < 0 and dp_sg is the index of the sg entry
5495 * the data pointer belongs to + 1.
5497 if (dp_ofs < 0) {
5498 int n;
5499 while (dp_sg > dp_sgmin) {
5500 --dp_sg;
5501 tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
5502 n = dp_ofs + (tmp & 0xffffff);
5503 if (n > 0) {
5504 ++dp_sg;
5505 break;
5507 dp_ofs = n;
5510 else if (dp_ofs > 0) {
5511 while (dp_sg < SYM_CONF_MAX_SG) {
5512 tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
5513 dp_ofs -= (tmp & 0xffffff);
5514 ++dp_sg;
5515 if (dp_ofs <= 0)
5516 break;
5521 * Make sure the data pointer is inside the data area.
5522 * If not, return some error.
5524 if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0))
5525 goto out_err;
5526 else if (dp_sg > SYM_CONF_MAX_SG ||
5527 (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0))
5528 goto out_err;
5531 * Save the extreme pointer if needed.
5533 if (dp_sg > cp->ext_sg ||
5534 (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) {
5535 cp->ext_sg = dp_sg;
5536 cp->ext_ofs = dp_ofs;
5540 * Return data.
5542 *ofs = dp_ofs;
5543 return dp_sg;
5545 out_err:
5546 return -1;
5550 * chip handler for MODIFY DATA POINTER MESSAGE
5552 * We also call this function on IGNORE WIDE RESIDUE
5553 * messages that do not match a SWIDE full condition.
5554 * Btw, we assume in that situation that such a message
5555 * is equivalent to a MODIFY DATA POINTER (offset=-1).
5558 static void sym_modify_dp(hcb_p np, tcb_p tp, ccb_p cp, int ofs)
5560 int dp_ofs = ofs;
5561 u32 dp_scr = INL (nc_temp);
5562 u32 dp_ret;
5563 u32 tmp;
5564 u_char hflags;
5565 int dp_sg;
5566 struct sym_pmc *pm;
5569 * Not supported for auto-sense.
5571 if (cp->host_flags & HF_SENSE)
5572 goto out_reject;
5575 * Apply our alchemy:) (see comments in sym_evaluate_dp()),
5576 * to the resulted data pointer.
5578 dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs);
5579 if (dp_sg < 0)
5580 goto out_reject;
5583 * And our alchemy:) allows to easily calculate the data
5584 * script address we want to return for the next data phase.
5586 dp_ret = cpu_to_scr(cp->phys.head.goalp);
5587 dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4);
5590 * If offset / scatter entry is zero we donnot need
5591 * a context for the new current data pointer.
5593 if (dp_ofs == 0) {
5594 dp_scr = dp_ret;
5595 goto out_ok;
5599 * Get a context for the new current data pointer.
5601 hflags = INB (HF_PRT);
5603 if (hflags & HF_DP_SAVED)
5604 hflags ^= HF_ACT_PM;
5606 if (!(hflags & HF_ACT_PM)) {
5607 pm = &cp->phys.pm0;
5608 dp_scr = SCRIPTA_BA (np, pm0_data);
5610 else {
5611 pm = &cp->phys.pm1;
5612 dp_scr = SCRIPTA_BA (np, pm1_data);
5615 hflags &= ~(HF_DP_SAVED);
5617 OUTB (HF_PRT, hflags);
5620 * Set up the new current data pointer.
5621 * ofs < 0 there, and for the next data phase, we
5622 * want to transfer part of the data of the sg entry
5623 * corresponding to index dp_sg-1 prior to returning
5624 * to the main data script.
5626 pm->ret = cpu_to_scr(dp_ret);
5627 tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr);
5628 tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs;
5629 pm->sg.addr = cpu_to_scr(tmp);
5630 pm->sg.size = cpu_to_scr(-dp_ofs);
5632 out_ok:
5633 OUTL (nc_temp, dp_scr);
5634 OUTL_DSP (SCRIPTA_BA (np, clrack));
5635 return;
5637 out_reject:
5638 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
5643 * chip calculation of the data residual.
5645 * As I used to say, the requirement of data residual
5646 * in SCSI is broken, useless and cannot be achieved
5647 * without huge complexity.
5648 * But most OSes and even the official CAM require it.
5649 * When stupidity happens to be so widely spread inside
5650 * a community, it gets hard to convince.
5652 * Anyway, I don't care, since I am not going to use
5653 * any software that considers this data residual as
5654 * a relevant information. :)
5657 static int sym_compute_residual(hcb_p np, ccb_p cp)
5659 int dp_sg, dp_sgmin, resid = 0;
5660 int dp_ofs = 0;
5663 * Check for some data lost or just thrown away.
5664 * We are not required to be quite accurate in this
5665 * situation. Btw, if we are odd for output and the
5666 * device claims some more data, it may well happen
5667 * than our residual be zero. :-)
5669 if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) {
5670 if (cp->xerr_status & XE_EXTRA_DATA)
5671 resid -= cp->extra_bytes;
5672 if (cp->xerr_status & XE_SODL_UNRUN)
5673 ++resid;
5674 if (cp->xerr_status & XE_SWIDE_OVRUN)
5675 --resid;
5679 * If all data has been transferred,
5680 * there is no residual.
5682 if (cp->phys.head.lastp == cp->phys.head.goalp)
5683 return resid;
5686 * If no data transfer occurs, or if the data
5687 * pointer is weird, return full residual.
5689 if (cp->startp == cp->phys.head.lastp ||
5690 sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp),
5691 &dp_ofs) < 0) {
5692 return cp->data_len;
5696 * If we were auto-sensing, then we are done.
5698 if (cp->host_flags & HF_SENSE) {
5699 return -dp_ofs;
5703 * We are now full comfortable in the computation
5704 * of the data residual (2's complement).
5706 dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
5707 resid = -cp->ext_ofs;
5708 for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) {
5709 u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
5710 resid += (tmp & 0xffffff);
5714 * Hopefully, the result is not too wrong.
5716 return resid;
5720 * Print out the content of a SCSI message.
5723 static int sym_show_msg (u_char * msg)
5725 u_char i;
5726 kprintf ("%x",*msg);
5727 if (*msg==M_EXTENDED) {
5728 for (i=1;i<8;i++) {
5729 if (i-1>msg[1]) break;
5730 kprintf ("-%x",msg[i]);
5732 return (i+1);
5733 } else if ((*msg & 0xf0) == 0x20) {
5734 kprintf ("-%x",msg[1]);
5735 return (2);
5737 return (1);
5740 static void sym_print_msg (ccb_p cp, char *label, u_char *msg)
5742 PRINT_ADDR(cp);
5743 if (label)
5744 kprintf ("%s: ", label);
5746 (void) sym_show_msg (msg);
5747 kprintf (".\n");
5751 * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER.
5753 * When we try to negotiate, we append the negotiation message
5754 * to the identify and (maybe) simple tag message.
5755 * The host status field is set to HS_NEGOTIATE to mark this
5756 * situation.
5758 * If the target doesn't answer this message immediately
5759 * (as required by the standard), the SIR_NEGO_FAILED interrupt
5760 * will be raised eventually.
5761 * The handler removes the HS_NEGOTIATE status, and sets the
5762 * negotiated value to the default (async / nowide).
5764 * If we receive a matching answer immediately, we check it
5765 * for validity, and set the values.
5767 * If we receive a Reject message immediately, we assume the
5768 * negotiation has failed, and fall back to standard values.
5770 * If we receive a negotiation message while not in HS_NEGOTIATE
5771 * state, it's a target initiated negotiation. We prepare a
5772 * (hopefully) valid answer, set our parameters, and send back
5773 * this answer to the target.
5775 * If the target doesn't fetch the answer (no message out phase),
5776 * we assume the negotiation has failed, and fall back to default
5777 * settings (SIR_NEGO_PROTO interrupt).
5779 * When we set the values, we adjust them in all ccbs belonging
5780 * to this target, in the controller's register, and in the "phys"
5781 * field of the controller's struct sym_hcb.
5785 * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message.
5787 static void sym_sync_nego(hcb_p np, tcb_p tp, ccb_p cp)
5789 u_char chg, ofs, per, fak, div;
5790 int req = 1;
5793 * Synchronous request message received.
5795 if (DEBUG_FLAGS & DEBUG_NEGO) {
5796 sym_print_msg(cp, "sync msgin", np->msgin);
5800 * request or answer ?
5802 if (INB (HS_PRT) == HS_NEGOTIATE) {
5803 OUTB (HS_PRT, HS_BUSY);
5804 if (cp->nego_status && cp->nego_status != NS_SYNC)
5805 goto reject_it;
5806 req = 0;
5810 * get requested values.
5812 chg = 0;
5813 per = np->msgin[3];
5814 ofs = np->msgin[4];
5817 * check values against our limits.
5819 if (ofs) {
5820 if (ofs > np->maxoffs)
5821 {chg = 1; ofs = np->maxoffs;}
5822 if (req) {
5823 if (ofs > tp->tinfo.user.offset)
5824 {chg = 1; ofs = tp->tinfo.user.offset;}
5828 if (ofs) {
5829 if (per < np->minsync)
5830 {chg = 1; per = np->minsync;}
5831 if (req) {
5832 if (per < tp->tinfo.user.period)
5833 {chg = 1; per = tp->tinfo.user.period;}
5837 div = fak = 0;
5838 if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0)
5839 goto reject_it;
5841 if (DEBUG_FLAGS & DEBUG_NEGO) {
5842 PRINT_ADDR(cp);
5843 kprintf ("sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n",
5844 ofs, per, div, fak, chg);
5848 * This was an answer message
5850 if (req == 0) {
5851 if (chg) /* Answer wasn't acceptable. */
5852 goto reject_it;
5853 sym_setsync (np, cp, ofs, per, div, fak);
5854 OUTL_DSP (SCRIPTA_BA (np, clrack));
5855 return;
5859 * It was a request. Set value and
5860 * prepare an answer message
5862 sym_setsync (np, cp, ofs, per, div, fak);
5864 np->msgout[0] = M_EXTENDED;
5865 np->msgout[1] = 3;
5866 np->msgout[2] = M_X_SYNC_REQ;
5867 np->msgout[3] = per;
5868 np->msgout[4] = ofs;
5870 cp->nego_status = NS_SYNC;
5872 if (DEBUG_FLAGS & DEBUG_NEGO) {
5873 sym_print_msg(cp, "sync msgout", np->msgout);
5876 np->msgin [0] = M_NOOP;
5878 OUTL_DSP (SCRIPTB_BA (np, sdtr_resp));
5879 return;
5880 reject_it:
5881 sym_setsync (np, cp, 0, 0, 0, 0);
5882 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
5886 * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message.
5888 static void sym_ppr_nego(hcb_p np, tcb_p tp, ccb_p cp)
5890 u_char chg, ofs, per, fak, dt, div, wide;
5891 int req = 1;
5894 * Synchronous request message received.
5896 if (DEBUG_FLAGS & DEBUG_NEGO) {
5897 sym_print_msg(cp, "ppr msgin", np->msgin);
5901 * get requested values.
5903 chg = 0;
5904 per = np->msgin[3];
5905 ofs = np->msgin[5];
5906 wide = np->msgin[6];
5907 dt = np->msgin[7] & PPR_OPT_DT;
5910 * request or answer ?
5912 if (INB (HS_PRT) == HS_NEGOTIATE) {
5913 OUTB (HS_PRT, HS_BUSY);
5914 if (cp->nego_status && cp->nego_status != NS_PPR)
5915 goto reject_it;
5916 req = 0;
5920 * check values against our limits.
5922 if (wide > np->maxwide)
5923 {chg = 1; wide = np->maxwide;}
5924 if (!wide || !(np->features & FE_ULTRA3))
5925 dt &= ~PPR_OPT_DT;
5926 if (req) {
5927 if (wide > tp->tinfo.user.width)
5928 {chg = 1; wide = tp->tinfo.user.width;}
5931 if (!(np->features & FE_U3EN)) /* Broken U3EN bit not supported */
5932 dt &= ~PPR_OPT_DT;
5934 if (dt != (np->msgin[7] & PPR_OPT_MASK)) chg = 1;
5936 if (ofs) {
5937 if (dt) {
5938 if (ofs > np->maxoffs_dt)
5939 {chg = 1; ofs = np->maxoffs_dt;}
5941 else if (ofs > np->maxoffs)
5942 {chg = 1; ofs = np->maxoffs;}
5943 if (req) {
5944 if (ofs > tp->tinfo.user.offset)
5945 {chg = 1; ofs = tp->tinfo.user.offset;}
5949 if (ofs) {
5950 if (dt) {
5951 if (per < np->minsync_dt)
5952 {chg = 1; per = np->minsync_dt;}
5954 else if (per < np->minsync)
5955 {chg = 1; per = np->minsync;}
5956 if (req) {
5957 if (per < tp->tinfo.user.period)
5958 {chg = 1; per = tp->tinfo.user.period;}
5962 div = fak = 0;
5963 if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0)
5964 goto reject_it;
5966 if (DEBUG_FLAGS & DEBUG_NEGO) {
5967 PRINT_ADDR(cp);
5968 kprintf ("ppr: "
5969 "dt=%x ofs=%d per=%d wide=%d div=%d fak=%d chg=%d.\n",
5970 dt, ofs, per, wide, div, fak, chg);
5974 * It was an answer.
5976 if (req == 0) {
5977 if (chg) /* Answer wasn't acceptable */
5978 goto reject_it;
5979 sym_setpprot (np, cp, dt, ofs, per, wide, div, fak);
5980 OUTL_DSP (SCRIPTA_BA (np, clrack));
5981 return;
5985 * It was a request. Set value and
5986 * prepare an answer message
5988 sym_setpprot (np, cp, dt, ofs, per, wide, div, fak);
5990 np->msgout[0] = M_EXTENDED;
5991 np->msgout[1] = 6;
5992 np->msgout[2] = M_X_PPR_REQ;
5993 np->msgout[3] = per;
5994 np->msgout[4] = 0;
5995 np->msgout[5] = ofs;
5996 np->msgout[6] = wide;
5997 np->msgout[7] = dt;
5999 cp->nego_status = NS_PPR;
6001 if (DEBUG_FLAGS & DEBUG_NEGO) {
6002 sym_print_msg(cp, "ppr msgout", np->msgout);
6005 np->msgin [0] = M_NOOP;
6007 OUTL_DSP (SCRIPTB_BA (np, ppr_resp));
6008 return;
6009 reject_it:
6010 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0);
6011 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
6013 * If it was a device response that should result in
6014 * ST, we may want to try a legacy negotiation later.
6016 if (!req && !dt) {
6017 tp->tinfo.goal.options = 0;
6018 tp->tinfo.goal.width = wide;
6019 tp->tinfo.goal.period = per;
6020 tp->tinfo.goal.offset = ofs;
6022 return;
6026 * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message.
6028 static void sym_wide_nego(hcb_p np, tcb_p tp, ccb_p cp)
6030 u_char chg, wide;
6031 int req = 1;
6034 * Wide request message received.
6036 if (DEBUG_FLAGS & DEBUG_NEGO) {
6037 sym_print_msg(cp, "wide msgin", np->msgin);
6041 * Is it an request from the device?
6043 if (INB (HS_PRT) == HS_NEGOTIATE) {
6044 OUTB (HS_PRT, HS_BUSY);
6045 if (cp->nego_status && cp->nego_status != NS_WIDE)
6046 goto reject_it;
6047 req = 0;
6051 * get requested values.
6053 chg = 0;
6054 wide = np->msgin[3];
6057 * check values against driver limits.
6059 if (wide > np->maxwide)
6060 {chg = 1; wide = np->maxwide;}
6061 if (req) {
6062 if (wide > tp->tinfo.user.width)
6063 {chg = 1; wide = tp->tinfo.user.width;}
6066 if (DEBUG_FLAGS & DEBUG_NEGO) {
6067 PRINT_ADDR(cp);
6068 kprintf ("wdtr: wide=%d chg=%d.\n", wide, chg);
6072 * This was an answer message
6074 if (req == 0) {
6075 if (chg) /* Answer wasn't acceptable. */
6076 goto reject_it;
6077 sym_setwide (np, cp, wide);
6080 * Negotiate for SYNC immediately after WIDE response.
6081 * This allows to negotiate for both WIDE and SYNC on
6082 * a single SCSI command (Suggested by Justin Gibbs).
6084 if (tp->tinfo.goal.offset) {
6085 np->msgout[0] = M_EXTENDED;
6086 np->msgout[1] = 3;
6087 np->msgout[2] = M_X_SYNC_REQ;
6088 np->msgout[3] = tp->tinfo.goal.period;
6089 np->msgout[4] = tp->tinfo.goal.offset;
6091 if (DEBUG_FLAGS & DEBUG_NEGO) {
6092 sym_print_msg(cp, "sync msgout", np->msgout);
6095 cp->nego_status = NS_SYNC;
6096 OUTB (HS_PRT, HS_NEGOTIATE);
6097 OUTL_DSP (SCRIPTB_BA (np, sdtr_resp));
6098 return;
6101 OUTL_DSP (SCRIPTA_BA (np, clrack));
6102 return;
6106 * It was a request, set value and
6107 * prepare an answer message
6109 sym_setwide (np, cp, wide);
6111 np->msgout[0] = M_EXTENDED;
6112 np->msgout[1] = 2;
6113 np->msgout[2] = M_X_WIDE_REQ;
6114 np->msgout[3] = wide;
6116 np->msgin [0] = M_NOOP;
6118 cp->nego_status = NS_WIDE;
6120 if (DEBUG_FLAGS & DEBUG_NEGO) {
6121 sym_print_msg(cp, "wide msgout", np->msgout);
6124 OUTL_DSP (SCRIPTB_BA (np, wdtr_resp));
6125 return;
6126 reject_it:
6127 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
6131 * Reset SYNC or WIDE to default settings.
6133 * Called when a negotiation does not succeed either
6134 * on rejection or on protocol error.
6136 * If it was a PPR that made problems, we may want to
6137 * try a legacy negotiation later.
6139 static void sym_nego_default(hcb_p np, tcb_p tp, ccb_p cp)
6142 * any error in negotiation:
6143 * fall back to default mode.
6145 switch (cp->nego_status) {
6146 case NS_PPR:
6147 #if 0
6148 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0);
6149 #else
6150 tp->tinfo.goal.options = 0;
6151 if (tp->tinfo.goal.period < np->minsync)
6152 tp->tinfo.goal.period = np->minsync;
6153 if (tp->tinfo.goal.offset > np->maxoffs)
6154 tp->tinfo.goal.offset = np->maxoffs;
6155 #endif
6156 break;
6157 case NS_SYNC:
6158 sym_setsync (np, cp, 0, 0, 0, 0);
6159 break;
6160 case NS_WIDE:
6161 sym_setwide (np, cp, 0);
6162 break;
6164 np->msgin [0] = M_NOOP;
6165 np->msgout[0] = M_NOOP;
6166 cp->nego_status = 0;
6170 * chip handler for MESSAGE REJECT received in response to
6171 * a WIDE or SYNCHRONOUS negotiation.
6173 static void sym_nego_rejected(hcb_p np, tcb_p tp, ccb_p cp)
6175 sym_nego_default(np, tp, cp);
6176 OUTB (HS_PRT, HS_BUSY);
6180 * chip exception handler for programmed interrupts.
6182 void sym_int_sir (hcb_p np)
6184 u_char num = INB (nc_dsps);
6185 u32 dsa = INL (nc_dsa);
6186 ccb_p cp = sym_ccb_from_dsa(np, dsa);
6187 u_char target = INB (nc_sdid) & 0x0f;
6188 tcb_p tp = &np->target[target];
6189 int tmp;
6191 if (DEBUG_FLAGS & DEBUG_TINY) kprintf ("I#%d", num);
6193 switch (num) {
6195 * Command has been completed with error condition
6196 * or has been auto-sensed.
6198 case SIR_COMPLETE_ERROR:
6199 sym_complete_error(np, cp);
6200 return;
6202 * The C code is currently trying to recover from something.
6203 * Typically, user want to abort some command.
6205 case SIR_SCRIPT_STOPPED:
6206 case SIR_TARGET_SELECTED:
6207 case SIR_ABORT_SENT:
6208 sym_sir_task_recovery(np, num);
6209 return;
6211 * The device didn't go to MSG OUT phase after having
6212 * been selected with ATN. We donnot want to handle
6213 * that.
6215 case SIR_SEL_ATN_NO_MSG_OUT:
6216 kprintf ("%s:%d: No MSG OUT phase after selection with ATN.\n",
6217 sym_name (np), target);
6218 goto out_stuck;
6220 * The device didn't switch to MSG IN phase after
6221 * having reseleted the initiator.
6223 case SIR_RESEL_NO_MSG_IN:
6224 kprintf ("%s:%d: No MSG IN phase after reselection.\n",
6225 sym_name (np), target);
6226 goto out_stuck;
6228 * After reselection, the device sent a message that wasn't
6229 * an IDENTIFY.
6231 case SIR_RESEL_NO_IDENTIFY:
6232 kprintf ("%s:%d: No IDENTIFY after reselection.\n",
6233 sym_name (np), target);
6234 goto out_stuck;
6236 * The device reselected a LUN we donnot know about.
6238 case SIR_RESEL_BAD_LUN:
6239 np->msgout[0] = M_RESET;
6240 goto out;
6242 * The device reselected for an untagged nexus and we
6243 * haven't any.
6245 case SIR_RESEL_BAD_I_T_L:
6246 np->msgout[0] = M_ABORT;
6247 goto out;
6249 * The device reselected for a tagged nexus that we donnot
6250 * have.
6252 case SIR_RESEL_BAD_I_T_L_Q:
6253 np->msgout[0] = M_ABORT_TAG;
6254 goto out;
6256 * The SCRIPTS let us know that the device has grabbed
6257 * our message and will abort the job.
6259 case SIR_RESEL_ABORTED:
6260 np->lastmsg = np->msgout[0];
6261 np->msgout[0] = M_NOOP;
6262 kprintf ("%s:%d: message %x sent on bad reselection.\n",
6263 sym_name (np), target, np->lastmsg);
6264 goto out;
6266 * The SCRIPTS let us know that a message has been
6267 * successfully sent to the device.
6269 case SIR_MSG_OUT_DONE:
6270 np->lastmsg = np->msgout[0];
6271 np->msgout[0] = M_NOOP;
6272 /* Should we really care of that */
6273 if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) {
6274 if (cp) {
6275 cp->xerr_status &= ~XE_PARITY_ERR;
6276 if (!cp->xerr_status)
6277 OUTOFFB (HF_PRT, HF_EXT_ERR);
6280 goto out;
6282 * The device didn't send a GOOD SCSI status.
6283 * We may have some work to do prior to allow
6284 * the SCRIPTS processor to continue.
6286 case SIR_BAD_SCSI_STATUS:
6287 if (!cp)
6288 goto out;
6289 sym_sir_bad_scsi_status(np, num, cp);
6290 return;
6292 * We are asked by the SCRIPTS to prepare a
6293 * REJECT message.
6295 case SIR_REJECT_TO_SEND:
6296 sym_print_msg(cp, "M_REJECT to send for ", np->msgin);
6297 np->msgout[0] = M_REJECT;
6298 goto out;
6300 * We have been ODD at the end of a DATA IN
6301 * transfer and the device didn't send a
6302 * IGNORE WIDE RESIDUE message.
6303 * It is a data overrun condition.
6305 case SIR_SWIDE_OVERRUN:
6306 if (cp) {
6307 OUTONB (HF_PRT, HF_EXT_ERR);
6308 cp->xerr_status |= XE_SWIDE_OVRUN;
6310 goto out;
6312 * We have been ODD at the end of a DATA OUT
6313 * transfer.
6314 * It is a data underrun condition.
6316 case SIR_SODL_UNDERRUN:
6317 if (cp) {
6318 OUTONB (HF_PRT, HF_EXT_ERR);
6319 cp->xerr_status |= XE_SODL_UNRUN;
6321 goto out;
6323 * The device wants us to tranfer more data than
6324 * expected or in the wrong direction.
6325 * The number of extra bytes is in scratcha.
6326 * It is a data overrun condition.
6328 case SIR_DATA_OVERRUN:
6329 if (cp) {
6330 OUTONB (HF_PRT, HF_EXT_ERR);
6331 cp->xerr_status |= XE_EXTRA_DATA;
6332 cp->extra_bytes += INL (nc_scratcha);
6334 goto out;
6336 * The device switched to an illegal phase (4/5).
6338 case SIR_BAD_PHASE:
6339 if (cp) {
6340 OUTONB (HF_PRT, HF_EXT_ERR);
6341 cp->xerr_status |= XE_BAD_PHASE;
6343 goto out;
6345 * We received a message.
6347 case SIR_MSG_RECEIVED:
6348 if (!cp)
6349 goto out_stuck;
6350 switch (np->msgin [0]) {
6352 * We received an extended message.
6353 * We handle MODIFY DATA POINTER, SDTR, WDTR
6354 * and reject all other extended messages.
6356 case M_EXTENDED:
6357 switch (np->msgin [2]) {
6358 case M_X_MODIFY_DP:
6359 if (DEBUG_FLAGS & DEBUG_POINTER)
6360 sym_print_msg(cp,"modify DP",np->msgin);
6361 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
6362 (np->msgin[5]<<8) + (np->msgin[6]);
6363 sym_modify_dp(np, tp, cp, tmp);
6364 return;
6365 case M_X_SYNC_REQ:
6366 sym_sync_nego(np, tp, cp);
6367 return;
6368 case M_X_PPR_REQ:
6369 sym_ppr_nego(np, tp, cp);
6370 return;
6371 case M_X_WIDE_REQ:
6372 sym_wide_nego(np, tp, cp);
6373 return;
6374 default:
6375 goto out_reject;
6377 break;
6379 * We received a 1/2 byte message not handled from SCRIPTS.
6380 * We are only expecting MESSAGE REJECT and IGNORE WIDE
6381 * RESIDUE messages that haven't been anticipated by
6382 * SCRIPTS on SWIDE full condition. Unanticipated IGNORE
6383 * WIDE RESIDUE messages are aliased as MODIFY DP (-1).
6385 case M_IGN_RESIDUE:
6386 if (DEBUG_FLAGS & DEBUG_POINTER)
6387 sym_print_msg(cp,"ign wide residue", np->msgin);
6388 sym_modify_dp(np, tp, cp, -1);
6389 return;
6390 case M_REJECT:
6391 if (INB (HS_PRT) == HS_NEGOTIATE)
6392 sym_nego_rejected(np, tp, cp);
6393 else {
6394 PRINT_ADDR(cp);
6395 kprintf ("M_REJECT received (%x:%x).\n",
6396 scr_to_cpu(np->lastmsg), np->msgout[0]);
6398 goto out_clrack;
6399 break;
6400 default:
6401 goto out_reject;
6403 break;
6405 * We received an unknown message.
6406 * Ignore all MSG IN phases and reject it.
6408 case SIR_MSG_WEIRD:
6409 sym_print_msg(cp, "WEIRD message received", np->msgin);
6410 OUTL_DSP (SCRIPTB_BA (np, msg_weird));
6411 return;
6413 * Negotiation failed.
6414 * Target does not send us the reply.
6415 * Remove the HS_NEGOTIATE status.
6417 case SIR_NEGO_FAILED:
6418 OUTB (HS_PRT, HS_BUSY);
6420 * Negotiation failed.
6421 * Target does not want answer message.
6423 case SIR_NEGO_PROTO:
6424 sym_nego_default(np, tp, cp);
6425 goto out;
6428 out:
6429 OUTONB_STD ();
6430 return;
6431 out_reject:
6432 OUTL_DSP (SCRIPTB_BA (np, msg_bad));
6433 return;
6434 out_clrack:
6435 OUTL_DSP (SCRIPTA_BA (np, clrack));
6436 return;
6437 out_stuck:
6442 * Acquire a control block
6444 static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order)
6446 tcb_p tp = &np->target[tn];
6447 lcb_p lp = sym_lp(np, tp, ln);
6448 u_short tag = NO_TAG;
6449 SYM_QUEHEAD *qp;
6450 ccb_p cp = (ccb_p) 0;
6453 * Look for a free CCB
6455 if (sym_que_empty(&np->free_ccbq))
6456 (void) sym_alloc_ccb(np);
6457 qp = sym_remque_head(&np->free_ccbq);
6458 if (!qp)
6459 goto out;
6460 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
6463 * If the LCB is not yet available and the LUN
6464 * has been probed ok, try to allocate the LCB.
6466 if (!lp && sym_is_bit(tp->lun_map, ln)) {
6467 lp = sym_alloc_lcb(np, tn, ln);
6468 if (!lp)
6469 goto out_free;
6473 * If the LCB is not available here, then the
6474 * logical unit is not yet discovered. For those
6475 * ones only accept 1 SCSI IO per logical unit,
6476 * since we cannot allow disconnections.
6478 if (!lp) {
6479 if (!sym_is_bit(tp->busy0_map, ln))
6480 sym_set_bit(tp->busy0_map, ln);
6481 else
6482 goto out_free;
6483 } else {
6485 * If we have been asked for a tagged command.
6487 if (tag_order) {
6489 * Debugging purpose.
6491 assert(lp->busy_itl == 0);
6493 * Allocate resources for tags if not yet.
6495 if (!lp->cb_tags) {
6496 sym_alloc_lcb_tags(np, tn, ln);
6497 if (!lp->cb_tags)
6498 goto out_free;
6501 * Get a tag for this SCSI IO and set up
6502 * the CCB bus address for reselection,
6503 * and count it for this LUN.
6504 * Toggle reselect path to tagged.
6506 if (lp->busy_itlq < SYM_CONF_MAX_TASK) {
6507 tag = lp->cb_tags[lp->ia_tag];
6508 if (++lp->ia_tag == SYM_CONF_MAX_TASK)
6509 lp->ia_tag = 0;
6510 lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba);
6511 ++lp->busy_itlq;
6512 lp->head.resel_sa =
6513 cpu_to_scr(SCRIPTA_BA (np, resel_tag));
6515 else
6516 goto out_free;
6519 * This command will not be tagged.
6520 * If we already have either a tagged or untagged
6521 * one, refuse to overlap this untagged one.
6523 else {
6525 * Debugging purpose.
6527 assert(lp->busy_itl == 0 && lp->busy_itlq == 0);
6529 * Count this nexus for this LUN.
6530 * Set up the CCB bus address for reselection.
6531 * Toggle reselect path to untagged.
6533 if (++lp->busy_itl == 1) {
6534 lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba);
6535 lp->head.resel_sa =
6536 cpu_to_scr(SCRIPTA_BA (np, resel_no_tag));
6538 else
6539 goto out_free;
6543 * Put the CCB into the busy queue.
6545 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
6548 * Remember all informations needed to free this CCB.
6550 cp->to_abort = 0;
6551 cp->tag = tag;
6552 cp->target = tn;
6553 cp->lun = ln;
6555 if (DEBUG_FLAGS & DEBUG_TAGS) {
6556 PRINT_LUN(np, tn, ln);
6557 kprintf ("ccb @%p using tag %d.\n", cp, tag);
6560 out:
6561 return cp;
6562 out_free:
6563 sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
6564 return (ccb_p) 0;
6568 * Release one control block
6570 static void sym_free_ccb (hcb_p np, ccb_p cp)
6572 tcb_p tp = &np->target[cp->target];
6573 lcb_p lp = sym_lp(np, tp, cp->lun);
6575 if (DEBUG_FLAGS & DEBUG_TAGS) {
6576 PRINT_LUN(np, cp->target, cp->lun);
6577 kprintf ("ccb @%p freeing tag %d.\n", cp, cp->tag);
6581 * If LCB available,
6583 if (lp) {
6585 * If tagged, release the tag, set the relect path
6587 if (cp->tag != NO_TAG) {
6589 * Free the tag value.
6591 lp->cb_tags[lp->if_tag] = cp->tag;
6592 if (++lp->if_tag == SYM_CONF_MAX_TASK)
6593 lp->if_tag = 0;
6595 * Make the reselect path invalid,
6596 * and uncount this CCB.
6598 lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba);
6599 --lp->busy_itlq;
6600 } else { /* Untagged */
6602 * Make the reselect path invalid,
6603 * and uncount this CCB.
6605 lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
6606 --lp->busy_itl;
6609 * If no JOB active, make the LUN reselect path invalid.
6611 if (lp->busy_itlq == 0 && lp->busy_itl == 0)
6612 lp->head.resel_sa =
6613 cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun));
6616 * Otherwise, we only accept 1 IO per LUN.
6617 * Clear the bit that keeps track of this IO.
6619 else
6620 sym_clr_bit(tp->busy0_map, cp->lun);
6623 * We donnot queue more than 1 ccb per target
6624 * with negotiation at any time. If this ccb was
6625 * used for negotiation, clear this info in the tcb.
6627 if (cp == tp->nego_cp)
6628 tp->nego_cp = 0;
6630 #ifdef SYM_CONF_IARB_SUPPORT
6632 * If we just complete the last queued CCB,
6633 * clear this info that is no longer relevant.
6635 if (cp == np->last_cp)
6636 np->last_cp = 0;
6637 #endif
6639 #ifdef FreeBSD_Bus_Dma_Abstraction
6641 * Unmap user data from DMA map if needed.
6643 if (cp->dmamapped) {
6644 bus_dmamap_unload(np->data_dmat, cp->dmamap);
6645 cp->dmamapped = 0;
6647 #endif
6650 * Make this CCB available.
6652 cp->cam_ccb = 0;
6653 cp->host_status = HS_IDLE;
6654 sym_remque(&cp->link_ccbq);
6655 sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
6659 * Allocate a CCB from memory and initialize its fixed part.
6661 static ccb_p sym_alloc_ccb(hcb_p np)
6663 ccb_p cp = 0;
6664 int hcode;
6667 * Prevent from allocating more CCBs than we can
6668 * queue to the controller.
6670 if (np->actccbs >= SYM_CONF_MAX_START)
6671 return 0;
6674 * Allocate memory for this CCB.
6676 cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB");
6677 if (!cp)
6678 goto out_free;
6681 * Allocate a bounce buffer for sense data.
6683 cp->sns_bbuf = sym_calloc_dma(SYM_SNS_BBUF_LEN, "SNS_BBUF");
6684 if (!cp->sns_bbuf)
6685 goto out_free;
6688 * Allocate a map for the DMA of user data.
6690 #ifdef FreeBSD_Bus_Dma_Abstraction
6691 if (bus_dmamap_create(np->data_dmat, 0, &cp->dmamap))
6692 goto out_free;
6693 #endif
6695 * Count it.
6697 np->actccbs++;
6700 * Compute the bus address of this ccb.
6702 cp->ccb_ba = vtobus(cp);
6705 * Insert this ccb into the hashed list.
6707 hcode = CCB_HASH_CODE(cp->ccb_ba);
6708 cp->link_ccbh = np->ccbh[hcode];
6709 np->ccbh[hcode] = cp;
6712 * Initialyze the start and restart actions.
6714 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, idle));
6715 cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l));
6718 * Initilialyze some other fields.
6720 cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2]));
6723 * Chain into free ccb queue.
6725 sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
6727 return cp;
6728 out_free:
6729 if (cp) {
6730 if (cp->sns_bbuf)
6731 sym_mfree_dma(cp->sns_bbuf,SYM_SNS_BBUF_LEN,"SNS_BBUF");
6732 sym_mfree_dma(cp, sizeof(*cp), "CCB");
6734 return 0;
6738 * Look up a CCB from a DSA value.
6740 static ccb_p sym_ccb_from_dsa(hcb_p np, u32 dsa)
6742 int hcode;
6743 ccb_p cp;
6745 hcode = CCB_HASH_CODE(dsa);
6746 cp = np->ccbh[hcode];
6747 while (cp) {
6748 if (cp->ccb_ba == dsa)
6749 break;
6750 cp = cp->link_ccbh;
6753 return cp;
6757 * Target control block initialisation.
6758 * Nothing important to do at the moment.
6760 static void sym_init_tcb (hcb_p np, u_char tn)
6763 * Check some alignments required by the chip.
6765 assert (((offsetof(struct sym_reg, nc_sxfer) ^
6766 offsetof(struct sym_tcb, head.sval)) &3) == 0);
6767 assert (((offsetof(struct sym_reg, nc_scntl3) ^
6768 offsetof(struct sym_tcb, head.wval)) &3) == 0);
6772 * Lun control block allocation and initialization.
6774 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln)
6776 tcb_p tp = &np->target[tn];
6777 lcb_p lp = sym_lp(np, tp, ln);
6780 * Already done, just return.
6782 if (lp)
6783 return lp;
6785 * Check against some race.
6787 assert(!sym_is_bit(tp->busy0_map, ln));
6790 * Initialize the target control block if not yet.
6792 sym_init_tcb (np, tn);
6795 * Allocate the LCB bus address array.
6796 * Compute the bus address of this table.
6798 if (ln && !tp->luntbl) {
6799 int i;
6801 tp->luntbl = sym_calloc_dma(256, "LUNTBL");
6802 if (!tp->luntbl)
6803 goto fail;
6804 for (i = 0 ; i < 64 ; i++)
6805 tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
6806 tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl));
6810 * Allocate the table of pointers for LUN(s) > 0, if needed.
6812 if (ln && !tp->lunmp) {
6813 tp->lunmp = sym_calloc(SYM_CONF_MAX_LUN * sizeof(lcb_p),
6814 "LUNMP");
6815 if (!tp->lunmp)
6816 goto fail;
6820 * Allocate the lcb.
6821 * Make it available to the chip.
6823 lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB");
6824 if (!lp)
6825 goto fail;
6826 if (ln) {
6827 tp->lunmp[ln] = lp;
6828 tp->luntbl[ln] = cpu_to_scr(vtobus(lp));
6830 else {
6831 tp->lun0p = lp;
6832 tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
6836 * Let the itl task point to error handling.
6838 lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
6841 * Set the reselect pattern to our default. :)
6843 lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun));
6846 * Set user capabilities.
6848 lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
6850 fail:
6851 return lp;
6855 * Allocate LCB resources for tagged command queuing.
6857 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln)
6859 tcb_p tp = &np->target[tn];
6860 lcb_p lp = sym_lp(np, tp, ln);
6861 int i;
6864 * If LCB not available, try to allocate it.
6866 if (!lp && !(lp = sym_alloc_lcb(np, tn, ln)))
6867 goto fail;
6870 * Allocate the task table and and the tag allocation
6871 * circular buffer. We want both or none.
6873 lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
6874 if (!lp->itlq_tbl)
6875 goto fail;
6876 lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS");
6877 if (!lp->cb_tags) {
6878 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
6879 lp->itlq_tbl = 0;
6880 goto fail;
6884 * Initialize the task table with invalid entries.
6886 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
6887 lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba);
6890 * Fill up the tag buffer with tag numbers.
6892 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
6893 lp->cb_tags[i] = i;
6896 * Make the task table available to SCRIPTS,
6897 * And accept tagged commands now.
6899 lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl));
6900 fail:
6905 * Test the pci bus snoop logic :-(
6907 * Has to be called with interrupts disabled.
6909 #ifndef SYM_CONF_IOMAPPED
6910 static int sym_regtest (hcb_p np)
6912 volatile u32 data;
6914 * chip registers may NOT be cached.
6915 * write 0xffffffff to a read only register area,
6916 * and try to read it back.
6918 data = 0xffffffff;
6919 OUTL_OFF(offsetof(struct sym_reg, nc_dstat), data);
6920 data = INL_OFF(offsetof(struct sym_reg, nc_dstat));
6921 #if 1
6922 if (data == 0xffffffff) {
6923 #else
6924 if ((data & 0xe2f0fffd) != 0x02000080) {
6925 #endif
6926 kprintf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
6927 (unsigned) data);
6928 return (0x10);
6930 return (0);
6932 #endif
6934 static int sym_snooptest (hcb_p np)
6936 u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat;
6937 int i, err=0;
6938 #ifndef SYM_CONF_IOMAPPED
6939 err |= sym_regtest (np);
6940 if (err) return (err);
6941 #endif
6942 restart_test:
6944 * Enable Master Parity Checking as we intend
6945 * to enable it for normal operations.
6947 OUTB (nc_ctest4, (np->rv_ctest4 & MPEE));
6949 * init
6951 pc = SCRIPTB0_BA (np, snooptest);
6952 host_wr = 1;
6953 sym_wr = 2;
6955 * Set memory and register.
6957 np->cache = cpu_to_scr(host_wr);
6958 OUTL (nc_temp, sym_wr);
6960 * Start script (exchange values)
6962 OUTL (nc_dsa, np->hcb_ba);
6963 OUTL_DSP (pc);
6965 * Wait 'til done (with timeout)
6967 for (i=0; i<SYM_SNOOP_TIMEOUT; i++)
6968 if (INB(nc_istat) & (INTF|SIP|DIP))
6969 break;
6970 if (i>=SYM_SNOOP_TIMEOUT) {
6971 kprintf ("CACHE TEST FAILED: timeout.\n");
6972 return (0x20);
6975 * Check for fatal DMA errors.
6977 dstat = INB (nc_dstat);
6978 #if 1 /* Band aiding for broken hardwares that fail PCI parity */
6979 if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) {
6980 kprintf ("%s: PCI DATA PARITY ERROR DETECTED - "
6981 "DISABLING MASTER DATA PARITY CHECKING.\n",
6982 sym_name(np));
6983 np->rv_ctest4 &= ~MPEE;
6984 goto restart_test;
6986 #endif
6987 if (dstat & (MDPE|BF|IID)) {
6988 kprintf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat);
6989 return (0x80);
6992 * Save termination position.
6994 pc = INL (nc_dsp);
6996 * Read memory and register.
6998 host_rd = scr_to_cpu(np->cache);
6999 sym_rd = INL (nc_scratcha);
7000 sym_bk = INL (nc_temp);
7003 * Check termination position.
7005 if (pc != SCRIPTB0_BA (np, snoopend)+8) {
7006 kprintf ("CACHE TEST FAILED: script execution failed.\n");
7007 kprintf ("start=%08lx, pc=%08lx, end=%08lx\n",
7008 (u_long) SCRIPTB0_BA (np, snooptest), (u_long) pc,
7009 (u_long) SCRIPTB0_BA (np, snoopend) +8);
7010 return (0x40);
7013 * Show results.
7015 if (host_wr != sym_rd) {
7016 kprintf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n",
7017 (int) host_wr, (int) sym_rd);
7018 err |= 1;
7020 if (host_rd != sym_wr) {
7021 kprintf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n",
7022 (int) sym_wr, (int) host_rd);
7023 err |= 2;
7025 if (sym_bk != sym_wr) {
7026 kprintf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n",
7027 (int) sym_wr, (int) sym_bk);
7028 err |= 4;
7031 return (err);
7035 * Determine the chip's clock frequency.
7037 * This is essential for the negotiation of the synchronous
7038 * transfer rate.
7040 * Note: we have to return the correct value.
7041 * THERE IS NO SAFE DEFAULT VALUE.
7043 * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
7044 * 53C860 and 53C875 rev. 1 support fast20 transfers but
7045 * do not have a clock doubler and so are provided with a
7046 * 80 MHz clock. All other fast20 boards incorporate a doubler
7047 * and so should be delivered with a 40 MHz clock.
7048 * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base
7049 * clock and provide a clock quadrupler (160 Mhz).
7053 * Select SCSI clock frequency
7055 static void sym_selectclock(hcb_p np, u_char scntl3)
7058 * If multiplier not present or not selected, leave here.
7060 if (np->multiplier <= 1) {
7061 OUTB(nc_scntl3, scntl3);
7062 return;
7065 if (sym_verbose >= 2)
7066 kprintf ("%s: enabling clock multiplier\n", sym_name(np));
7068 OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */
7070 * Wait for the LCKFRQ bit to be set if supported by the chip.
7071 * Otherwise wait 20 micro-seconds.
7073 if (np->features & FE_LCKFRQ) {
7074 int i = 20;
7075 while (!(INB(nc_stest4) & LCKFRQ) && --i > 0)
7076 UDELAY (20);
7077 if (!i)
7078 kprintf("%s: the chip cannot lock the frequency\n",
7079 sym_name(np));
7080 } else
7081 UDELAY (20);
7082 OUTB(nc_stest3, HSC); /* Halt the scsi clock */
7083 OUTB(nc_scntl3, scntl3);
7084 OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */
7085 OUTB(nc_stest3, 0x00); /* Restart scsi clock */
7089 * calculate SCSI clock frequency (in KHz)
7091 static unsigned getfreq (hcb_p np, int gen)
7093 unsigned int ms = 0;
7094 unsigned int f;
7097 * Measure GEN timer delay in order
7098 * to calculate SCSI clock frequency
7100 * This code will never execute too
7101 * many loop iterations (if DELAY is
7102 * reasonably correct). It could get
7103 * too low a delay (too high a freq.)
7104 * if the CPU is slow executing the
7105 * loop for some reason (an NMI, for
7106 * example). For this reason we will
7107 * if multiple measurements are to be
7108 * performed trust the higher delay
7109 * (lower frequency returned).
7111 OUTW (nc_sien , 0); /* mask all scsi interrupts */
7112 (void) INW (nc_sist); /* clear pending scsi interrupt */
7113 OUTB (nc_dien , 0); /* mask all dma interrupts */
7114 (void) INW (nc_sist); /* another one, just to be sure :) */
7115 OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */
7116 OUTB (nc_stime1, 0); /* disable general purpose timer */
7117 OUTB (nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */
7118 while (!(INW(nc_sist) & GEN) && ms++ < 100000)
7119 UDELAY (1000); /* count ms */
7120 OUTB (nc_stime1, 0); /* disable general purpose timer */
7122 * set prescaler to divide by whatever 0 means
7123 * 0 ought to choose divide by 2, but appears
7124 * to set divide by 3.5 mode in my 53c810 ...
7126 OUTB (nc_scntl3, 0);
7129 * adjust for prescaler, and convert into KHz
7131 f = ms ? ((1 << gen) * 4340) / ms : 0;
7133 if (sym_verbose >= 2)
7134 kprintf ("%s: Delay (GEN=%d): %u msec, %u KHz\n",
7135 sym_name(np), gen, ms, f);
7137 return f;
7140 static unsigned sym_getfreq (hcb_p np)
7142 u_int f1, f2;
7143 int gen = 11;
7145 (void) getfreq (np, gen); /* throw away first result */
7146 f1 = getfreq (np, gen);
7147 f2 = getfreq (np, gen);
7148 if (f1 > f2) f1 = f2; /* trust lower result */
7149 return f1;
7153 * Get/probe chip SCSI clock frequency
7155 static void sym_getclock (hcb_p np, int mult)
7157 unsigned char scntl3 = np->sv_scntl3;
7158 unsigned char stest1 = np->sv_stest1;
7159 unsigned f1;
7162 * For the C10 core, assume 40 MHz.
7164 if (np->features & FE_C10) {
7165 np->multiplier = mult;
7166 np->clock_khz = 40000 * mult;
7167 return;
7170 np->multiplier = 1;
7171 f1 = 40000;
7173 * True with 875/895/896/895A with clock multiplier selected
7175 if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
7176 if (sym_verbose >= 2)
7177 kprintf ("%s: clock multiplier found\n", sym_name(np));
7178 np->multiplier = mult;
7182 * If multiplier not found or scntl3 not 7,5,3,
7183 * reset chip and get frequency from general purpose timer.
7184 * Otherwise trust scntl3 BIOS setting.
7186 if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
7187 OUTB (nc_stest1, 0); /* make sure doubler is OFF */
7188 f1 = sym_getfreq (np);
7190 if (sym_verbose)
7191 kprintf ("%s: chip clock is %uKHz\n", sym_name(np), f1);
7193 if (f1 < 45000) f1 = 40000;
7194 else if (f1 < 55000) f1 = 50000;
7195 else f1 = 80000;
7197 if (f1 < 80000 && mult > 1) {
7198 if (sym_verbose >= 2)
7199 kprintf ("%s: clock multiplier assumed\n",
7200 sym_name(np));
7201 np->multiplier = mult;
7203 } else {
7204 if ((scntl3 & 7) == 3) f1 = 40000;
7205 else if ((scntl3 & 7) == 5) f1 = 80000;
7206 else f1 = 160000;
7208 f1 /= np->multiplier;
7212 * Compute controller synchronous parameters.
7214 f1 *= np->multiplier;
7215 np->clock_khz = f1;
7219 * Get/probe PCI clock frequency
7221 static int sym_getpciclock (hcb_p np)
7223 int f = 0;
7226 * For the C1010-33, this doesn't work.
7227 * For the C1010-66, this will be tested when I'll have
7228 * such a beast to play with.
7230 if (!(np->features & FE_C10)) {
7231 OUTB (nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */
7232 f = (int) sym_getfreq (np);
7233 OUTB (nc_stest1, 0);
7235 np->pciclk_khz = f;
7237 return f;
7240 /*============= DRIVER ACTION/COMPLETION ====================*/
7243 * Print something that tells about extended errors.
7245 static void sym_print_xerr(ccb_p cp, int x_status)
7247 if (x_status & XE_PARITY_ERR) {
7248 PRINT_ADDR(cp);
7249 kprintf ("unrecovered SCSI parity error.\n");
7251 if (x_status & XE_EXTRA_DATA) {
7252 PRINT_ADDR(cp);
7253 kprintf ("extraneous data discarded.\n");
7255 if (x_status & XE_BAD_PHASE) {
7256 PRINT_ADDR(cp);
7257 kprintf ("illegal scsi phase (4/5).\n");
7259 if (x_status & XE_SODL_UNRUN) {
7260 PRINT_ADDR(cp);
7261 kprintf ("ODD transfer in DATA OUT phase.\n");
7263 if (x_status & XE_SWIDE_OVRUN) {
7264 PRINT_ADDR(cp);
7265 kprintf ("ODD transfer in DATA IN phase.\n");
7270 * Choose the more appropriate CAM status if
7271 * the IO encountered an extended error.
7273 static int sym_xerr_cam_status(int cam_status, int x_status)
7275 if (x_status) {
7276 if (x_status & XE_PARITY_ERR)
7277 cam_status = CAM_UNCOR_PARITY;
7278 else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN))
7279 cam_status = CAM_DATA_RUN_ERR;
7280 else if (x_status & XE_BAD_PHASE)
7281 cam_status = CAM_REQ_CMP_ERR;
7282 else
7283 cam_status = CAM_REQ_CMP_ERR;
7285 return cam_status;
7289 * Complete execution of a SCSI command with extented
7290 * error, SCSI status error, or having been auto-sensed.
7292 * The SCRIPTS processor is not running there, so we
7293 * can safely access IO registers and remove JOBs from
7294 * the START queue.
7295 * SCRATCHA is assumed to have been loaded with STARTPOS
7296 * before the SCRIPTS called the C code.
7298 static void sym_complete_error (hcb_p np, ccb_p cp)
7300 struct ccb_scsiio *csio;
7301 u_int cam_status;
7302 int i;
7305 * Paranoid check. :)
7307 if (!cp || !cp->cam_ccb)
7308 return;
7310 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) {
7311 kprintf ("CCB=%lx STAT=%x/%x/%x DEV=%d/%d\n", (unsigned long)cp,
7312 cp->host_status, cp->ssss_status, cp->host_flags,
7313 cp->target, cp->lun);
7314 MDELAY(100);
7318 * Get CAM command pointer.
7320 csio = &cp->cam_ccb->csio;
7323 * Check for extended errors.
7325 if (cp->xerr_status) {
7326 if (sym_verbose)
7327 sym_print_xerr(cp, cp->xerr_status);
7328 if (cp->host_status == HS_COMPLETE)
7329 cp->host_status = HS_COMP_ERR;
7333 * Calculate the residual.
7335 csio->sense_resid = 0;
7336 csio->resid = sym_compute_residual(np, cp);
7338 if (!SYM_CONF_RESIDUAL_SUPPORT) {/* If user does not want residuals */
7339 csio->resid = 0; /* throw them away. :) */
7340 cp->sv_resid = 0;
7343 if (cp->host_flags & HF_SENSE) { /* Auto sense */
7344 csio->scsi_status = cp->sv_scsi_status; /* Restore status */
7345 csio->sense_resid = csio->resid; /* Swap residuals */
7346 csio->resid = cp->sv_resid;
7347 cp->sv_resid = 0;
7348 if (sym_verbose && cp->sv_xerr_status)
7349 sym_print_xerr(cp, cp->sv_xerr_status);
7350 if (cp->host_status == HS_COMPLETE &&
7351 cp->ssss_status == S_GOOD &&
7352 cp->xerr_status == 0) {
7353 cam_status = sym_xerr_cam_status(CAM_SCSI_STATUS_ERROR,
7354 cp->sv_xerr_status);
7355 cam_status |= CAM_AUTOSNS_VALID;
7357 * Bounce back the sense data to user and
7358 * fix the residual.
7360 bzero(&csio->sense_data, csio->sense_len);
7361 bcopy(cp->sns_bbuf, &csio->sense_data,
7362 MIN(csio->sense_len, SYM_SNS_BBUF_LEN));
7363 csio->sense_resid += csio->sense_len;
7364 csio->sense_resid -= SYM_SNS_BBUF_LEN;
7365 #if 0
7367 * If the device reports a UNIT ATTENTION condition
7368 * due to a RESET condition, we should consider all
7369 * disconnect CCBs for this unit as aborted.
7371 if (1) {
7372 u_char *p;
7373 p = (u_char *) csio->sense_data;
7374 if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29)
7375 sym_clear_tasks(np, CAM_REQ_ABORTED,
7376 cp->target,cp->lun, -1);
7378 #endif
7380 else
7381 cam_status = CAM_AUTOSENSE_FAIL;
7383 else if (cp->host_status == HS_COMPLETE) { /* Bad SCSI status */
7384 csio->scsi_status = cp->ssss_status;
7385 cam_status = CAM_SCSI_STATUS_ERROR;
7387 else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */
7388 cam_status = CAM_SEL_TIMEOUT;
7389 else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/
7390 cam_status = CAM_UNEXP_BUSFREE;
7391 else { /* Extended error */
7392 if (sym_verbose) {
7393 PRINT_ADDR(cp);
7394 kprintf ("COMMAND FAILED (%x %x %x).\n",
7395 cp->host_status, cp->ssss_status,
7396 cp->xerr_status);
7398 csio->scsi_status = cp->ssss_status;
7400 * Set the most appropriate value for CAM status.
7402 cam_status = sym_xerr_cam_status(CAM_REQ_CMP_ERR,
7403 cp->xerr_status);
7407 * Dequeue all queued CCBs for that device
7408 * not yet started by SCRIPTS.
7410 i = (INL (nc_scratcha) - np->squeue_ba) / 4;
7411 (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
7414 * Restart the SCRIPTS processor.
7416 OUTL_DSP (SCRIPTA_BA (np, start));
7418 #ifdef FreeBSD_Bus_Dma_Abstraction
7420 * Synchronize DMA map if needed.
7422 if (cp->dmamapped) {
7423 bus_dmamap_sync(np->data_dmat, cp->dmamap,
7424 (bus_dmasync_op_t)(cp->dmamapped == SYM_DMA_READ ?
7425 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
7427 #endif
7429 * Add this one to the COMP queue.
7430 * Complete all those commands with either error
7431 * or requeue condition.
7433 sym_set_cam_status((union ccb *) csio, cam_status);
7434 sym_remque(&cp->link_ccbq);
7435 sym_insque_head(&cp->link_ccbq, &np->comp_ccbq);
7436 sym_flush_comp_queue(np, 0);
7440 * Complete execution of a successful SCSI command.
7442 * Only successful commands go to the DONE queue,
7443 * since we need to have the SCRIPTS processor
7444 * stopped on any error condition.
7445 * The SCRIPTS processor is running while we are
7446 * completing successful commands.
7448 static void sym_complete_ok (hcb_p np, ccb_p cp)
7450 struct ccb_scsiio *csio;
7451 tcb_p tp;
7452 lcb_p lp;
7455 * Paranoid check. :)
7457 if (!cp || !cp->cam_ccb)
7458 return;
7459 assert (cp->host_status == HS_COMPLETE);
7462 * Get command, target and lun pointers.
7464 csio = &cp->cam_ccb->csio;
7465 tp = &np->target[cp->target];
7466 lp = sym_lp(np, tp, cp->lun);
7469 * Assume device discovered on first success.
7471 if (!lp)
7472 sym_set_bit(tp->lun_map, cp->lun);
7475 * If all data have been transferred, given than no
7476 * extended error did occur, there is no residual.
7478 csio->resid = 0;
7479 if (cp->phys.head.lastp != cp->phys.head.goalp)
7480 csio->resid = sym_compute_residual(np, cp);
7483 * Wrong transfer residuals may be worse than just always
7484 * returning zero. User can disable this feature from
7485 * sym_conf.h. Residual support is enabled by default.
7487 if (!SYM_CONF_RESIDUAL_SUPPORT)
7488 csio->resid = 0;
7490 #ifdef FreeBSD_Bus_Dma_Abstraction
7492 * Synchronize DMA map if needed.
7494 if (cp->dmamapped) {
7495 bus_dmamap_sync(np->data_dmat, cp->dmamap,
7496 (bus_dmasync_op_t)(cp->dmamapped == SYM_DMA_READ ?
7497 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
7499 #endif
7501 * Set status and complete the command.
7503 csio->scsi_status = cp->ssss_status;
7504 sym_set_cam_status((union ccb *) csio, CAM_REQ_CMP);
7505 sym_free_ccb (np, cp);
7506 sym_xpt_done(np, (union ccb *) csio);
7510 * Our timeout handler.
7512 static void sym_timeout1(void *arg)
7514 union ccb *ccb = (union ccb *) arg;
7515 hcb_p np = ccb->ccb_h.sym_hcb_ptr;
7518 * Check that the CAM CCB is still queued.
7520 if (!np)
7521 return;
7523 switch(ccb->ccb_h.func_code) {
7524 case XPT_SCSI_IO:
7525 (void) sym_abort_scsiio(np, ccb, 1);
7526 break;
7527 default:
7528 break;
7532 static void sym_timeout(void *arg)
7534 crit_enter();
7535 sym_timeout1(arg);
7536 crit_exit();
7540 * Abort an SCSI IO.
7542 static int sym_abort_scsiio(hcb_p np, union ccb *ccb, int timed_out)
7544 ccb_p cp;
7545 SYM_QUEHEAD *qp;
7548 * Look up our CCB control block.
7550 cp = 0;
7551 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
7552 ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq);
7553 if (cp2->cam_ccb == ccb) {
7554 cp = cp2;
7555 break;
7558 if (!cp || cp->host_status == HS_WAIT)
7559 return -1;
7562 * If a previous abort didn't succeed in time,
7563 * perform a BUS reset.
7565 if (cp->to_abort) {
7566 sym_reset_scsi_bus(np, 1);
7567 return 0;
7571 * Mark the CCB for abort and allow time for.
7573 cp->to_abort = timed_out ? 2 : 1;
7574 callout_reset(&ccb->ccb_h.timeout_ch, 10 * hz, sym_timeout, ccb);
7577 * Tell the SCRIPTS processor to stop and synchronize with us.
7579 np->istat_sem = SEM;
7580 OUTB (nc_istat, SIGP|SEM);
7581 return 0;
7585 * Reset a SCSI device (all LUNs of a target).
7587 static void sym_reset_dev(hcb_p np, union ccb *ccb)
7589 tcb_p tp;
7590 struct ccb_hdr *ccb_h = &ccb->ccb_h;
7592 if (ccb_h->target_id == np->myaddr ||
7593 ccb_h->target_id >= SYM_CONF_MAX_TARGET ||
7594 ccb_h->target_lun >= SYM_CONF_MAX_LUN) {
7595 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE);
7596 return;
7599 tp = &np->target[ccb_h->target_id];
7601 tp->to_reset = 1;
7602 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
7604 np->istat_sem = SEM;
7605 OUTB (nc_istat, SIGP|SEM);
7606 return;
7610 * SIM action entry point.
7612 static void sym_action(struct cam_sim *sim, union ccb *ccb)
7614 crit_enter();
7615 sym_action1(sim, ccb);
7616 crit_exit();
7619 static void sym_action1(struct cam_sim *sim, union ccb *ccb)
7621 hcb_p np;
7622 tcb_p tp;
7623 lcb_p lp;
7624 ccb_p cp;
7625 int tmp;
7626 u_char idmsg, *msgptr;
7627 u_int msglen;
7628 struct ccb_scsiio *csio;
7629 struct ccb_hdr *ccb_h;
7631 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("sym_action\n"));
7634 * Retrieve our controller data structure.
7636 np = (hcb_p) cam_sim_softc(sim);
7639 * The common case is SCSI IO.
7640 * We deal with other ones elsewhere.
7642 if (ccb->ccb_h.func_code != XPT_SCSI_IO) {
7643 sym_action2(sim, ccb);
7644 return;
7646 csio = &ccb->csio;
7647 ccb_h = &csio->ccb_h;
7650 * Work around races.
7652 if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
7653 xpt_done(ccb);
7654 return;
7658 * Minimal checkings, so that we will not
7659 * go outside our tables.
7661 if (ccb_h->target_id == np->myaddr ||
7662 ccb_h->target_id >= SYM_CONF_MAX_TARGET ||
7663 ccb_h->target_lun >= SYM_CONF_MAX_LUN) {
7664 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE);
7665 return;
7669 * Retreive the target and lun descriptors.
7671 tp = &np->target[ccb_h->target_id];
7672 lp = sym_lp(np, tp, ccb_h->target_lun);
7675 * Complete the 1st INQUIRY command with error
7676 * condition if the device is flagged NOSCAN
7677 * at BOOT in the NVRAM. This may speed up
7678 * the boot and maintain coherency with BIOS
7679 * device numbering. Clearing the flag allows
7680 * user to rescan skipped devices later.
7681 * We also return error for devices not flagged
7682 * for SCAN LUNS in the NVRAM since some mono-lun
7683 * devices behave badly when asked for some non
7684 * zero LUN. Btw, this is an absolute hack.:-)
7686 if (!(ccb_h->flags & CAM_CDB_PHYS) &&
7687 (0x12 == ((ccb_h->flags & CAM_CDB_POINTER) ?
7688 csio->cdb_io.cdb_ptr[0] : csio->cdb_io.cdb_bytes[0]))) {
7689 if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) ||
7690 ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) &&
7691 ccb_h->target_lun != 0)) {
7692 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
7693 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE);
7694 return;
7699 * Get a control block for this IO.
7701 tmp = ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0);
7702 cp = sym_get_ccb(np, ccb_h->target_id, ccb_h->target_lun, tmp);
7703 if (!cp) {
7704 sym_xpt_done2(np, ccb, CAM_RESRC_UNAVAIL);
7705 return;
7709 * Keep track of the IO in our CCB.
7711 cp->cam_ccb = ccb;
7714 * Build the IDENTIFY message.
7716 idmsg = M_IDENTIFY | cp->lun;
7717 if (cp->tag != NO_TAG || (lp && (lp->current_flags & SYM_DISC_ENABLED)))
7718 idmsg |= 0x40;
7720 msgptr = cp->scsi_smsg;
7721 msglen = 0;
7722 msgptr[msglen++] = idmsg;
7725 * Build the tag message if present.
7727 if (cp->tag != NO_TAG) {
7728 u_char order = csio->tag_action;
7730 switch(order) {
7731 case M_ORDERED_TAG:
7732 break;
7733 case M_HEAD_TAG:
7734 break;
7735 default:
7736 order = M_SIMPLE_TAG;
7738 msgptr[msglen++] = order;
7741 * For less than 128 tags, actual tags are numbered
7742 * 1,3,5,..2*MAXTAGS+1,since we may have to deal
7743 * with devices that have problems with #TAG 0 or too
7744 * great #TAG numbers. For more tags (up to 256),
7745 * we use directly our tag number.
7747 #if SYM_CONF_MAX_TASK > (512/4)
7748 msgptr[msglen++] = cp->tag;
7749 #else
7750 msgptr[msglen++] = (cp->tag << 1) + 1;
7751 #endif
7755 * Build a negotiation message if needed.
7756 * (nego_status is filled by sym_prepare_nego())
7758 cp->nego_status = 0;
7759 if (tp->tinfo.current.width != tp->tinfo.goal.width ||
7760 tp->tinfo.current.period != tp->tinfo.goal.period ||
7761 tp->tinfo.current.offset != tp->tinfo.goal.offset ||
7762 tp->tinfo.current.options != tp->tinfo.goal.options) {
7763 if (!tp->nego_cp && lp)
7764 msglen += sym_prepare_nego(np, cp, 0, msgptr + msglen);
7768 * Fill in our ccb
7772 * Startqueue
7774 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select));
7775 cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA (np, resel_dsa));
7778 * select
7780 cp->phys.select.sel_id = cp->target;
7781 cp->phys.select.sel_scntl3 = tp->head.wval;
7782 cp->phys.select.sel_sxfer = tp->head.sval;
7783 cp->phys.select.sel_scntl4 = tp->head.uval;
7786 * message
7788 cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg));
7789 cp->phys.smsg.size = cpu_to_scr(msglen);
7792 * command
7794 if (sym_setup_cdb(np, csio, cp) < 0) {
7795 sym_free_ccb(np, cp);
7796 sym_xpt_done(np, ccb);
7797 return;
7801 * status
7803 #if 0 /* Provision */
7804 cp->actualquirks = tp->quirks;
7805 #endif
7806 cp->actualquirks = SYM_QUIRK_AUTOSAVE;
7807 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
7808 cp->ssss_status = S_ILLEGAL;
7809 cp->xerr_status = 0;
7810 cp->host_flags = 0;
7811 cp->extra_bytes = 0;
7814 * extreme data pointer.
7815 * shall be positive, so -1 is lower than lowest.:)
7817 cp->ext_sg = -1;
7818 cp->ext_ofs = 0;
7821 * Build the data descriptor block
7822 * and start the IO.
7824 sym_setup_data_and_start(np, csio, cp);
7828 * Setup buffers and pointers that address the CDB.
7829 * I bet, physical CDBs will never be used on the planet,
7830 * since they can be bounced without significant overhead.
7832 static int sym_setup_cdb(hcb_p np, struct ccb_scsiio *csio, ccb_p cp)
7834 struct ccb_hdr *ccb_h;
7835 u32 cmd_ba;
7836 int cmd_len;
7838 ccb_h = &csio->ccb_h;
7841 * CDB is 16 bytes max.
7843 if (csio->cdb_len > sizeof(cp->cdb_buf)) {
7844 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
7845 return -1;
7847 cmd_len = csio->cdb_len;
7849 if (ccb_h->flags & CAM_CDB_POINTER) {
7850 /* CDB is a pointer */
7851 if (!(ccb_h->flags & CAM_CDB_PHYS)) {
7852 /* CDB pointer is virtual */
7853 bcopy(csio->cdb_io.cdb_ptr, cp->cdb_buf, cmd_len);
7854 cmd_ba = CCB_BA (cp, cdb_buf[0]);
7855 } else {
7856 /* CDB pointer is physical */
7857 #if 0
7858 cmd_ba = ((u32)csio->cdb_io.cdb_ptr) & 0xffffffff;
7859 #else
7860 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
7861 return -1;
7862 #endif
7864 } else {
7865 /* CDB is in the CAM ccb (buffer) */
7866 bcopy(csio->cdb_io.cdb_bytes, cp->cdb_buf, cmd_len);
7867 cmd_ba = CCB_BA (cp, cdb_buf[0]);
7870 cp->phys.cmd.addr = cpu_to_scr(cmd_ba);
7871 cp->phys.cmd.size = cpu_to_scr(cmd_len);
7873 return 0;
7877 * Set up data pointers used by SCRIPTS.
7879 static void __inline
7880 sym_setup_data_pointers(hcb_p np, ccb_p cp, int dir)
7882 u32 lastp, goalp;
7885 * No segments means no data.
7887 if (!cp->segments)
7888 dir = CAM_DIR_NONE;
7891 * Set the data pointer.
7893 switch(dir) {
7894 case CAM_DIR_OUT:
7895 goalp = SCRIPTA_BA (np, data_out2) + 8;
7896 lastp = goalp - 8 - (cp->segments * (2*4));
7897 break;
7898 case CAM_DIR_IN:
7899 cp->host_flags |= HF_DATA_IN;
7900 goalp = SCRIPTA_BA (np, data_in2) + 8;
7901 lastp = goalp - 8 - (cp->segments * (2*4));
7902 break;
7903 case CAM_DIR_NONE:
7904 default:
7905 lastp = goalp = SCRIPTB_BA (np, no_data);
7906 break;
7909 cp->phys.head.lastp = cpu_to_scr(lastp);
7910 cp->phys.head.goalp = cpu_to_scr(goalp);
7911 cp->phys.head.savep = cpu_to_scr(lastp);
7912 cp->startp = cp->phys.head.savep;
7916 #ifdef FreeBSD_Bus_Dma_Abstraction
7918 * Call back routine for the DMA map service.
7919 * If bounce buffers are used (why ?), we may sleep and then
7920 * be called there in another context.
7922 static void
7923 sym_execute_ccb(void *arg, bus_dma_segment_t *psegs, int nsegs, int error)
7925 ccb_p cp;
7926 hcb_p np;
7927 union ccb *ccb;
7929 crit_enter();
7931 cp = (ccb_p) arg;
7932 ccb = cp->cam_ccb;
7933 np = (hcb_p) cp->arg;
7936 * Deal with weird races.
7938 if (sym_get_cam_status(ccb) != CAM_REQ_INPROG)
7939 goto out_abort;
7942 * Deal with weird errors.
7944 if (error) {
7945 cp->dmamapped = 0;
7946 sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED);
7947 goto out_abort;
7951 * Build the data descriptor for the chip.
7953 if (nsegs) {
7954 int retv;
7955 /* 896 rev 1 requires to be careful about boundaries */
7956 if (np->device_id == PCI_ID_SYM53C896 && np->revision_id <= 1)
7957 retv = sym_scatter_sg_physical(np, cp, psegs, nsegs);
7958 else
7959 retv = sym_fast_scatter_sg_physical(np,cp, psegs,nsegs);
7960 if (retv < 0) {
7961 sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG);
7962 goto out_abort;
7967 * Synchronize the DMA map only if we have
7968 * actually mapped the data.
7970 if (cp->dmamapped) {
7971 bus_dmamap_sync(np->data_dmat, cp->dmamap,
7972 (bus_dmasync_op_t)(cp->dmamapped == SYM_DMA_READ ?
7973 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
7977 * Set host status to busy state.
7978 * May have been set back to HS_WAIT to avoid a race.
7980 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
7983 * Set data pointers.
7985 sym_setup_data_pointers(np, cp, (ccb->ccb_h.flags & CAM_DIR_MASK));
7988 * Enqueue this IO in our pending queue.
7990 sym_enqueue_cam_ccb(np, ccb);
7993 * When `#ifed 1', the code below makes the driver
7994 * panic on the first attempt to write to a SCSI device.
7995 * It is the first test we want to do after a driver
7996 * change that does not seem obviously safe. :)
7998 #if 0
7999 switch (cp->cdb_buf[0]) {
8000 case 0x0A: case 0x2A: case 0xAA:
8001 panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n");
8002 MDELAY(10000);
8003 break;
8004 default:
8005 break;
8007 #endif
8009 * Activate this job.
8011 sym_put_start_queue(np, cp);
8012 out:
8013 crit_exit();
8014 return;
8015 out_abort:
8016 sym_free_ccb(np, cp);
8017 sym_xpt_done(np, ccb);
8018 goto out;
8022 * How complex it gets to deal with the data in CAM.
8023 * The Bus Dma stuff makes things still more complex.
8025 static void
8026 sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp)
8028 struct ccb_hdr *ccb_h;
8029 int dir, retv;
8031 ccb_h = &csio->ccb_h;
8034 * Now deal with the data.
8036 cp->data_len = csio->dxfer_len;
8037 cp->arg = np;
8040 * No direction means no data.
8042 dir = (ccb_h->flags & CAM_DIR_MASK);
8043 if (dir == CAM_DIR_NONE) {
8044 sym_execute_ccb(cp, NULL, 0, 0);
8045 return;
8048 if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
8049 /* Single buffer */
8050 if (!(ccb_h->flags & CAM_DATA_PHYS)) {
8051 /* Buffer is virtual */
8052 cp->dmamapped = (dir == CAM_DIR_IN) ?
8053 SYM_DMA_READ : SYM_DMA_WRITE;
8054 crit_enter();
8055 retv = bus_dmamap_load(np->data_dmat, cp->dmamap,
8056 csio->data_ptr, csio->dxfer_len,
8057 sym_execute_ccb, cp, 0);
8058 if (retv == EINPROGRESS) {
8059 cp->host_status = HS_WAIT;
8060 xpt_freeze_simq(np->sim, 1);
8061 csio->ccb_h.status |= CAM_RELEASE_SIMQ;
8063 crit_exit();
8064 } else {
8065 /* Buffer is physical */
8066 struct bus_dma_segment seg;
8068 seg.ds_addr = (bus_addr_t) csio->data_ptr;
8069 sym_execute_ccb(cp, &seg, 1, 0);
8071 } else {
8072 /* Scatter/gather list */
8073 struct bus_dma_segment *segs;
8075 if ((ccb_h->flags & CAM_SG_LIST_PHYS) != 0) {
8076 /* The SG list pointer is physical */
8077 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
8078 goto out_abort;
8081 if (!(ccb_h->flags & CAM_DATA_PHYS)) {
8082 /* SG buffer pointers are virtual */
8083 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
8084 goto out_abort;
8087 /* SG buffer pointers are physical */
8088 segs = (struct bus_dma_segment *)csio->data_ptr;
8089 sym_execute_ccb(cp, segs, csio->sglist_cnt, 0);
8091 return;
8092 out_abort:
8093 sym_free_ccb(np, cp);
8094 sym_xpt_done(np, (union ccb *) csio);
8098 * Move the scatter list to our data block.
8100 static int
8101 sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp,
8102 bus_dma_segment_t *psegs, int nsegs)
8104 struct sym_tblmove *data;
8105 bus_dma_segment_t *psegs2;
8107 if (nsegs > SYM_CONF_MAX_SG)
8108 return -1;
8110 data = &cp->phys.data[SYM_CONF_MAX_SG-1];
8111 psegs2 = &psegs[nsegs-1];
8112 cp->segments = nsegs;
8114 while (1) {
8115 data->addr = cpu_to_scr(psegs2->ds_addr);
8116 data->size = cpu_to_scr(psegs2->ds_len);
8117 if (DEBUG_FLAGS & DEBUG_SCATTER) {
8118 kprintf ("%s scatter: paddr=%lx len=%ld\n",
8119 sym_name(np), (long) psegs2->ds_addr,
8120 (long) psegs2->ds_len);
8122 if (psegs2 != psegs) {
8123 --data;
8124 --psegs2;
8125 continue;
8127 break;
8129 return 0;
8132 #else /* FreeBSD_Bus_Dma_Abstraction */
8135 * How complex it gets to deal with the data in CAM.
8136 * Variant without the Bus Dma Abstraction option.
8138 static void
8139 sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp)
8141 struct ccb_hdr *ccb_h;
8142 int dir, retv;
8144 ccb_h = &csio->ccb_h;
8147 * Now deal with the data.
8149 cp->data_len = 0;
8150 cp->segments = 0;
8153 * No direction means no data.
8155 dir = (ccb_h->flags & CAM_DIR_MASK);
8156 if (dir == CAM_DIR_NONE)
8157 goto end_scatter;
8159 if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
8160 /* Single buffer */
8161 if (!(ccb_h->flags & CAM_DATA_PHYS)) {
8162 /* Buffer is virtual */
8163 retv = sym_scatter_virtual(np, cp,
8164 (vm_offset_t) csio->data_ptr,
8165 (vm_size_t) csio->dxfer_len);
8166 } else {
8167 /* Buffer is physical */
8168 retv = sym_scatter_physical(np, cp,
8169 (vm_offset_t) csio->data_ptr,
8170 (vm_size_t) csio->dxfer_len);
8172 } else {
8173 /* Scatter/gather list */
8174 int nsegs;
8175 struct bus_dma_segment *segs;
8176 segs = (struct bus_dma_segment *)csio->data_ptr;
8177 nsegs = csio->sglist_cnt;
8179 if ((ccb_h->flags & CAM_SG_LIST_PHYS) != 0) {
8180 /* The SG list pointer is physical */
8181 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
8182 goto out_abort;
8184 if (!(ccb_h->flags & CAM_DATA_PHYS)) {
8185 /* SG buffer pointers are virtual */
8186 retv = sym_scatter_sg_virtual(np, cp, segs, nsegs);
8187 } else {
8188 /* SG buffer pointers are physical */
8189 retv = sym_scatter_sg_physical(np, cp, segs, nsegs);
8192 if (retv < 0) {
8193 sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG);
8194 goto out_abort;
8197 end_scatter:
8199 * Set data pointers.
8201 sym_setup_data_pointers(np, cp, dir);
8204 * Enqueue this IO in our pending queue.
8206 sym_enqueue_cam_ccb(np, (union ccb *) csio);
8209 * Activate this job.
8211 sym_put_start_queue(np, cp);
8214 * Command is successfully queued.
8216 return;
8217 out_abort:
8218 sym_free_ccb(np, cp);
8219 sym_xpt_done(np, (union ccb *) csio);
8223 * Scatter a virtual buffer into bus addressable chunks.
8225 static int
8226 sym_scatter_virtual(hcb_p np, ccb_p cp, vm_offset_t vaddr, vm_size_t len)
8228 u_long pe, pn;
8229 u_long n, k;
8230 int s;
8232 cp->data_len += len;
8234 pe = vaddr + len;
8235 n = len;
8236 s = SYM_CONF_MAX_SG - 1 - cp->segments;
8238 while (n && s >= 0) {
8239 pn = (pe - 1) & ~PAGE_MASK;
8240 k = pe - pn;
8241 if (k > n) {
8242 k = n;
8243 pn = pe - n;
8245 if (DEBUG_FLAGS & DEBUG_SCATTER) {
8246 kprintf ("%s scatter: va=%lx pa=%lx siz=%ld\n",
8247 sym_name(np), pn, (u_long) vtobus(pn), k);
8249 cp->phys.data[s].addr = cpu_to_scr(vtobus(pn));
8250 cp->phys.data[s].size = cpu_to_scr(k);
8251 pe = pn;
8252 n -= k;
8253 --s;
8255 cp->segments = SYM_CONF_MAX_SG - 1 - s;
8257 return n ? -1 : 0;
8261 * Scatter a SG list with virtual addresses into bus addressable chunks.
8263 static int
8264 sym_scatter_sg_virtual(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs)
8266 int i, retv = 0;
8268 for (i = nsegs - 1 ; i >= 0 ; --i) {
8269 retv = sym_scatter_virtual(np, cp,
8270 psegs[i].ds_addr, psegs[i].ds_len);
8271 if (retv < 0)
8272 break;
8274 return retv;
8278 * Scatter a physical buffer into bus addressable chunks.
8280 static int
8281 sym_scatter_physical(hcb_p np, ccb_p cp, vm_offset_t paddr, vm_size_t len)
8283 struct bus_dma_segment seg;
8285 seg.ds_addr = paddr;
8286 seg.ds_len = len;
8287 return sym_scatter_sg_physical(np, cp, &seg, 1);
8290 #endif /* FreeBSD_Bus_Dma_Abstraction */
8293 * Scatter a SG list with physical addresses into bus addressable chunks.
8294 * We need to ensure 16MB boundaries not to be crossed during DMA of
8295 * each segment, due to some chips being flawed.
8297 #define BOUND_MASK ((1UL<<24)-1)
8298 static int
8299 sym_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs)
8301 u_long ps, pe, pn;
8302 u_long k;
8303 int s, t;
8305 #ifndef FreeBSD_Bus_Dma_Abstraction
8306 s = SYM_CONF_MAX_SG - 1 - cp->segments;
8307 #else
8308 s = SYM_CONF_MAX_SG - 1;
8309 #endif
8310 t = nsegs - 1;
8311 ps = psegs[t].ds_addr;
8312 pe = ps + psegs[t].ds_len;
8314 while (s >= 0) {
8315 pn = (pe - 1) & ~BOUND_MASK;
8316 if (pn <= ps)
8317 pn = ps;
8318 k = pe - pn;
8319 if (DEBUG_FLAGS & DEBUG_SCATTER) {
8320 kprintf ("%s scatter: paddr=%lx len=%ld\n",
8321 sym_name(np), pn, k);
8323 cp->phys.data[s].addr = cpu_to_scr(pn);
8324 cp->phys.data[s].size = cpu_to_scr(k);
8325 #ifndef FreeBSD_Bus_Dma_Abstraction
8326 cp->data_len += k;
8327 #endif
8328 --s;
8329 if (pn == ps) {
8330 if (--t < 0)
8331 break;
8332 ps = psegs[t].ds_addr;
8333 pe = ps + psegs[t].ds_len;
8335 else
8336 pe = pn;
8339 cp->segments = SYM_CONF_MAX_SG - 1 - s;
8341 return t >= 0 ? -1 : 0;
8343 #undef BOUND_MASK
8346 * SIM action for non performance critical stuff.
8348 static void sym_action2(struct cam_sim *sim, union ccb *ccb)
8350 hcb_p np;
8351 tcb_p tp;
8352 lcb_p lp;
8353 struct ccb_hdr *ccb_h;
8356 * Retrieve our controller data structure.
8358 np = (hcb_p) cam_sim_softc(sim);
8360 ccb_h = &ccb->ccb_h;
8362 switch (ccb_h->func_code) {
8363 case XPT_SET_TRAN_SETTINGS:
8365 struct ccb_trans_settings *cts;
8367 cts = &ccb->cts;
8368 tp = &np->target[ccb_h->target_id];
8371 * Update SPI transport settings in TARGET control block.
8372 * Update SCSI device settings in LUN control block.
8374 lp = sym_lp(np, tp, ccb_h->target_lun);
8375 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
8376 sym_update_trans(np, tp, &tp->tinfo.goal, cts);
8377 if (lp)
8378 sym_update_dflags(np, &lp->current_flags, cts);
8380 if (cts->type == CTS_TYPE_USER_SETTINGS) {
8381 sym_update_trans(np, tp, &tp->tinfo.user, cts);
8382 if (lp)
8383 sym_update_dflags(np, &lp->user_flags, cts);
8386 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8387 break;
8389 case XPT_GET_TRAN_SETTINGS:
8391 struct ccb_trans_settings *cts;
8392 struct sym_trans *tip;
8393 u_char dflags;
8395 cts = &ccb->cts;
8396 tp = &np->target[ccb_h->target_id];
8397 lp = sym_lp(np, tp, ccb_h->target_lun);
8399 #define cts__scsi (&cts->proto_specific.scsi)
8400 #define cts__spi (&cts->xport_specific.spi)
8401 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
8402 tip = &tp->tinfo.current;
8403 dflags = lp ? lp->current_flags : 0;
8405 else {
8406 tip = &tp->tinfo.user;
8407 dflags = lp ? lp->user_flags : tp->usrflags;
8410 cts->protocol = PROTO_SCSI;
8411 cts->transport = XPORT_SPI;
8412 cts->protocol_version = tip->scsi_version;
8413 cts->transport_version = tip->spi_version;
8415 cts__spi->sync_period = tip->period;
8416 cts__spi->sync_offset = tip->offset;
8417 cts__spi->bus_width = tip->width;
8418 cts__spi->ppr_options = tip->options;
8420 cts__spi->valid = CTS_SPI_VALID_SYNC_RATE
8421 | CTS_SPI_VALID_SYNC_OFFSET
8422 | CTS_SPI_VALID_BUS_WIDTH
8423 | CTS_SPI_VALID_PPR_OPTIONS;
8425 cts__spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
8426 if (dflags & SYM_DISC_ENABLED)
8427 cts__spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
8428 cts__spi->valid |= CTS_SPI_VALID_DISC;
8430 cts__scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
8431 if (dflags & SYM_TAGS_ENABLED)
8432 cts__scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
8433 cts__scsi->valid |= CTS_SCSI_VALID_TQ;
8434 #undef cts__spi
8435 #undef cts__scsi
8436 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8437 break;
8439 case XPT_CALC_GEOMETRY:
8441 struct ccb_calc_geometry *ccg;
8442 u32 size_mb;
8443 u32 secs_per_cylinder;
8444 int extended;
8447 * Silly DOS geometry.
8449 ccg = &ccb->ccg;
8450 size_mb = ccg->volume_size
8451 / ((1024L * 1024L) / ccg->block_size);
8452 extended = 1;
8454 if (size_mb > 1024 && extended) {
8455 ccg->heads = 255;
8456 ccg->secs_per_track = 63;
8457 } else {
8458 ccg->heads = 64;
8459 ccg->secs_per_track = 32;
8461 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
8462 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
8463 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8464 break;
8466 case XPT_PATH_INQ:
8468 struct ccb_pathinq *cpi = &ccb->cpi;
8469 cpi->version_num = 1;
8470 cpi->hba_inquiry = PI_MDP_ABLE|PI_SDTR_ABLE|PI_TAG_ABLE;
8471 if ((np->features & FE_WIDE) != 0)
8472 cpi->hba_inquiry |= PI_WIDE_16;
8473 cpi->target_sprt = 0;
8474 cpi->hba_misc = 0;
8475 if (np->usrflags & SYM_SCAN_TARGETS_HILO)
8476 cpi->hba_misc |= PIM_SCANHILO;
8477 if (np->usrflags & SYM_AVOID_BUS_RESET)
8478 cpi->hba_misc |= PIM_NOBUSRESET;
8479 cpi->hba_eng_cnt = 0;
8480 cpi->max_target = (np->features & FE_WIDE) ? 15 : 7;
8481 /* Semantic problem:)LUN number max = max number of LUNs - 1 */
8482 cpi->max_lun = SYM_CONF_MAX_LUN-1;
8483 if (SYM_SETUP_MAX_LUN < SYM_CONF_MAX_LUN)
8484 cpi->max_lun = SYM_SETUP_MAX_LUN-1;
8485 cpi->bus_id = cam_sim_bus(sim);
8486 cpi->initiator_id = np->myaddr;
8487 cpi->base_transfer_speed = 3300;
8488 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
8489 strncpy(cpi->hba_vid, "Symbios", HBA_IDLEN);
8490 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
8491 cpi->unit_number = cam_sim_unit(sim);
8493 cpi->protocol = PROTO_SCSI;
8494 cpi->protocol_version = SCSI_REV_2;
8495 cpi->transport = XPORT_SPI;
8496 cpi->transport_version = 2;
8497 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
8498 if (np->features & FE_ULTRA3) {
8499 cpi->transport_version = 3;
8500 cpi->xport_specific.spi.ppr_options =
8501 SID_SPI_CLOCK_DT_ST;
8503 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8504 break;
8506 case XPT_ABORT:
8508 union ccb *abort_ccb = ccb->cab.abort_ccb;
8509 switch(abort_ccb->ccb_h.func_code) {
8510 case XPT_SCSI_IO:
8511 if (sym_abort_scsiio(np, abort_ccb, 0) == 0) {
8512 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8513 break;
8515 default:
8516 sym_xpt_done2(np, ccb, CAM_UA_ABORT);
8517 break;
8519 break;
8521 case XPT_RESET_DEV:
8523 sym_reset_dev(np, ccb);
8524 break;
8526 case XPT_RESET_BUS:
8528 sym_reset_scsi_bus(np, 0);
8529 if (sym_verbose) {
8530 xpt_print_path(np->path);
8531 kprintf("SCSI BUS reset delivered.\n");
8533 sym_init (np, 1);
8534 sym_xpt_done2(np, ccb, CAM_REQ_CMP);
8535 break;
8537 case XPT_ACCEPT_TARGET_IO:
8538 case XPT_CONT_TARGET_IO:
8539 case XPT_EN_LUN:
8540 case XPT_NOTIFY_ACK:
8541 case XPT_IMMED_NOTIFY:
8542 case XPT_TERM_IO:
8543 default:
8544 sym_xpt_done2(np, ccb, CAM_REQ_INVALID);
8545 break;
8550 * Asynchronous notification handler.
8552 static void
8553 sym_async(void *cb_arg, u32 code, struct cam_path *path, void *arg)
8555 hcb_p np;
8556 struct cam_sim *sim;
8557 u_int tn;
8558 tcb_p tp;
8560 crit_enter();
8562 sim = (struct cam_sim *) cb_arg;
8563 np = (hcb_p) cam_sim_softc(sim);
8565 switch (code) {
8566 case AC_LOST_DEVICE:
8567 tn = xpt_path_target_id(path);
8568 if (tn >= SYM_CONF_MAX_TARGET)
8569 break;
8571 tp = &np->target[tn];
8573 tp->to_reset = 0;
8574 tp->head.sval = 0;
8575 tp->head.wval = np->rv_scntl3;
8576 tp->head.uval = 0;
8578 tp->tinfo.current.period = tp->tinfo.goal.period = 0;
8579 tp->tinfo.current.offset = tp->tinfo.goal.offset = 0;
8580 tp->tinfo.current.width = tp->tinfo.goal.width = BUS_8_BIT;
8581 tp->tinfo.current.options = tp->tinfo.goal.options = 0;
8583 break;
8584 default:
8585 break;
8588 crit_exit();
8592 * Update transfer settings of a target.
8594 static void sym_update_trans(hcb_p np, tcb_p tp, struct sym_trans *tip,
8595 struct ccb_trans_settings *cts)
8598 * Update the infos.
8600 #define cts__spi (&cts->xport_specific.spi)
8601 if ((cts__spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
8602 tip->width = cts__spi->bus_width;
8603 if ((cts__spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)
8604 tip->offset = cts__spi->sync_offset;
8605 if ((cts__spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
8606 tip->period = cts__spi->sync_period;
8607 if ((cts__spi->valid & CTS_SPI_VALID_PPR_OPTIONS) != 0)
8608 tip->options = (cts__spi->ppr_options & PPR_OPT_DT);
8609 if (cts->protocol_version != PROTO_VERSION_UNSPECIFIED &&
8610 cts->protocol_version != PROTO_VERSION_UNKNOWN)
8611 tip->scsi_version = cts->protocol_version;
8612 if (cts->transport_version != XPORT_VERSION_UNSPECIFIED &&
8613 cts->transport_version != XPORT_VERSION_UNKNOWN)
8614 tip->spi_version = cts->transport_version;
8615 #undef cts__spi
8617 * Scale against driver configuration limits.
8619 if (tip->width > SYM_SETUP_MAX_WIDE) tip->width = SYM_SETUP_MAX_WIDE;
8620 if (tip->offset > SYM_SETUP_MAX_OFFS) tip->offset = SYM_SETUP_MAX_OFFS;
8621 if (tip->period < SYM_SETUP_MIN_SYNC) tip->period = SYM_SETUP_MIN_SYNC;
8624 * Scale against actual controller BUS width.
8626 if (tip->width > np->maxwide)
8627 tip->width = np->maxwide;
8630 * Only accept DT if controller supports and SYNC/WIDE asked.
8632 if (!((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) ||
8633 !(tip->width == BUS_16_BIT && tip->offset)) {
8634 tip->options &= ~PPR_OPT_DT;
8638 * Scale period factor and offset against controller limits.
8640 if (tip->options & PPR_OPT_DT) {
8641 if (tip->period < np->minsync_dt)
8642 tip->period = np->minsync_dt;
8643 if (tip->period > np->maxsync_dt)
8644 tip->period = np->maxsync_dt;
8645 if (tip->offset > np->maxoffs_dt)
8646 tip->offset = np->maxoffs_dt;
8648 else {
8649 if (tip->period < np->minsync)
8650 tip->period = np->minsync;
8651 if (tip->period > np->maxsync)
8652 tip->period = np->maxsync;
8653 if (tip->offset > np->maxoffs)
8654 tip->offset = np->maxoffs;
8659 * Update flags for a device (logical unit).
8661 static void
8662 sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts)
8664 #define cts__scsi (&cts->proto_specific.scsi)
8665 #define cts__spi (&cts->xport_specific.spi)
8666 if ((cts__spi->valid & CTS_SPI_VALID_DISC) != 0) {
8667 if ((cts__spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
8668 *flags |= SYM_DISC_ENABLED;
8669 else
8670 *flags &= ~SYM_DISC_ENABLED;
8673 if ((cts__scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
8674 if ((cts__scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
8675 *flags |= SYM_TAGS_ENABLED;
8676 else
8677 *flags &= ~SYM_TAGS_ENABLED;
8679 #undef cts__spi
8680 #undef cts__scsi
8684 /*============= DRIVER INITIALISATION ==================*/
8686 #ifdef FreeBSD_Bus_Io_Abstraction
8688 static device_method_t sym_pci_methods[] = {
8689 DEVMETHOD(device_probe, sym_pci_probe),
8690 DEVMETHOD(device_attach, sym_pci_attach),
8691 { 0, 0 }
8694 static driver_t sym_pci_driver = {
8695 "sym",
8696 sym_pci_methods,
8697 sizeof(struct sym_hcb)
8700 static devclass_t sym_devclass;
8702 DRIVER_MODULE(sym, pci, sym_pci_driver, sym_devclass, 0, 0);
8704 #else /* Pre-FreeBSD_Bus_Io_Abstraction */
8706 static u_long sym_unit;
8708 static struct pci_device sym_pci_driver = {
8709 "sym",
8710 sym_pci_probe,
8711 sym_pci_attach,
8712 &sym_unit,
8713 NULL
8716 #if defined(__DragonFly__) || __FreeBSD_version >= 400000
8717 COMPAT_PCI_DRIVER (sym, sym_pci_driver);
8718 #else
8719 DATA_SET (pcidevice_set, sym_pci_driver);
8720 #endif
8722 #endif /* FreeBSD_Bus_Io_Abstraction */
8724 static struct sym_pci_chip sym_pci_dev_table[] = {
8725 {PCI_ID_SYM53C810, 0x0f, "810", 4, 8, 4, 64,
8726 FE_ERL}
8728 #ifdef SYM_DEBUG_GENERIC_SUPPORT
8729 {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1,
8730 FE_BOF}
8732 #else
8733 {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1,
8734 FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF}
8736 #endif
8737 {PCI_ID_SYM53C815, 0xff, "815", 4, 8, 4, 64,
8738 FE_BOF|FE_ERL}
8740 {PCI_ID_SYM53C825, 0x0f, "825", 6, 8, 4, 64,
8741 FE_WIDE|FE_BOF|FE_ERL|FE_DIFF}
8743 {PCI_ID_SYM53C825, 0xff, "825a", 6, 8, 4, 2,
8744 FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF}
8746 {PCI_ID_SYM53C860, 0xff, "860", 4, 8, 5, 1,
8747 FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN}
8749 {PCI_ID_SYM53C875, 0x01, "875", 6, 16, 5, 2,
8750 FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8751 FE_RAM|FE_DIFF}
8753 {PCI_ID_SYM53C875, 0xff, "875", 6, 16, 5, 2,
8754 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8755 FE_RAM|FE_DIFF}
8757 {PCI_ID_SYM53C875_2, 0xff, "875", 6, 16, 5, 2,
8758 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8759 FE_RAM|FE_DIFF}
8761 {PCI_ID_SYM53C885, 0xff, "885", 6, 16, 5, 2,
8762 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8763 FE_RAM|FE_DIFF}
8765 #ifdef SYM_DEBUG_GENERIC_SUPPORT
8766 {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2,
8767 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|
8768 FE_RAM|FE_LCKFRQ}
8770 #else
8771 {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2,
8772 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8773 FE_RAM|FE_LCKFRQ}
8775 #endif
8776 {PCI_ID_SYM53C896, 0xff, "896", 6, 31, 7, 4,
8777 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8778 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
8780 {PCI_ID_SYM53C895A, 0xff, "895a", 6, 31, 7, 4,
8781 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8782 FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
8784 {PCI_ID_LSI53C1010, 0x00, "1010-33", 6, 31, 7, 8,
8785 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
8786 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
8787 FE_C10}
8789 {PCI_ID_LSI53C1010, 0xff, "1010-33", 6, 31, 7, 8,
8790 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
8791 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
8792 FE_C10|FE_U3EN}
8794 {PCI_ID_LSI53C1010_2, 0xff, "1010-66", 6, 31, 7, 8,
8795 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
8796 FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC|
8797 FE_C10|FE_U3EN}
8799 {PCI_ID_LSI53C1510D, 0xff, "1510d", 6, 31, 7, 4,
8800 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
8801 FE_RAM|FE_IO256|FE_LEDC}
8804 #define sym_pci_num_devs \
8805 (sizeof(sym_pci_dev_table) / sizeof(sym_pci_dev_table[0]))
8808 * Look up the chip table.
8810 * Return a pointer to the chip entry if found,
8811 * zero otherwise.
8813 static struct sym_pci_chip *
8814 #ifdef FreeBSD_Bus_Io_Abstraction
8815 sym_find_pci_chip(device_t dev)
8816 #else
8817 sym_find_pci_chip(pcici_t pci_tag)
8818 #endif
8820 struct sym_pci_chip *chip;
8821 int i;
8822 u_short device_id;
8823 u_char revision;
8825 #ifdef FreeBSD_Bus_Io_Abstraction
8826 if (pci_get_vendor(dev) != PCI_VENDOR_NCR)
8827 return 0;
8829 device_id = pci_get_device(dev);
8830 revision = pci_get_revid(dev);
8831 #else
8832 if (pci_cfgread(pci_tag, PCIR_VENDOR, 2) != PCI_VENDOR_NCR)
8833 return 0;
8835 device_id = pci_cfgread(pci_tag, PCIR_DEVICE, 2);
8836 revision = pci_cfgread(pci_tag, PCIR_REVID, 1);
8837 #endif
8839 for (i = 0; i < sym_pci_num_devs; i++) {
8840 chip = &sym_pci_dev_table[i];
8841 if (device_id != chip->device_id)
8842 continue;
8843 if (revision > chip->revision_id)
8844 continue;
8845 return chip;
8848 return 0;
8852 * Tell upper layer if the chip is supported.
8854 #ifdef FreeBSD_Bus_Io_Abstraction
8855 static int
8856 sym_pci_probe(device_t dev)
8858 struct sym_pci_chip *chip;
8860 chip = sym_find_pci_chip(dev);
8861 if (chip && sym_find_firmware(chip)) {
8862 device_set_desc(dev, chip->name);
8863 return (chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP)? -2000 : 0;
8865 return ENXIO;
8867 #else /* Pre-FreeBSD_Bus_Io_Abstraction */
8868 static const char *
8869 sym_pci_probe(pcici_t pci_tag, pcidi_t type)
8871 struct sym_pci_chip *chip;
8873 chip = sym_find_pci_chip(pci_tag);
8874 if (chip && sym_find_firmware(chip)) {
8875 #if NNCR > 0
8876 /* Only claim chips we are allowed to take precedence over the ncr */
8877 if (!(chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP))
8878 #else
8879 if (1)
8880 #endif
8881 return chip->name;
8883 return 0;
8885 #endif
8888 * Attach a sym53c8xx device.
8890 #ifdef FreeBSD_Bus_Io_Abstraction
8891 static int
8892 sym_pci_attach(device_t dev)
8893 #else
8894 static void
8895 sym_pci_attach(pcici_t pci_tag, int unit)
8897 int err = sym_pci_attach2(pci_tag, unit);
8898 if (err)
8899 kprintf("sym: failed to attach unit %d - err=%d.\n", unit, err);
8901 static int
8902 sym_pci_attach2(pcici_t pci_tag, int unit)
8903 #endif
8905 struct sym_pci_chip *chip;
8906 u_short command;
8907 u_char cachelnsz;
8908 struct sym_hcb *np = 0;
8909 struct sym_nvram nvram;
8910 struct sym_fw *fw = 0;
8911 int i;
8912 #ifdef FreeBSD_Bus_Dma_Abstraction
8913 bus_dma_tag_t bus_dmat;
8916 * I expected to be told about a parent
8917 * DMA tag, but didn't find any.
8919 bus_dmat = NULL;
8920 #endif
8923 * Only probed devices should be attached.
8924 * We just enjoy being paranoid. :)
8926 #ifdef FreeBSD_Bus_Io_Abstraction
8927 chip = sym_find_pci_chip(dev);
8928 #else
8929 chip = sym_find_pci_chip(pci_tag);
8930 #endif
8931 if (chip == NULL || (fw = sym_find_firmware(chip)) == NULL)
8932 return (ENXIO);
8935 * Allocate immediately the host control block,
8936 * since we are only expecting to succeed. :)
8937 * We keep track in the HCB of all the resources that
8938 * are to be released on error.
8940 #ifdef FreeBSD_Bus_Dma_Abstraction
8941 np = __sym_calloc_dma(bus_dmat, sizeof(*np), "HCB");
8942 if (np)
8943 np->bus_dmat = bus_dmat;
8944 else
8945 goto attach_failed;
8946 #else
8947 np = sym_calloc_dma(sizeof(*np), "HCB");
8948 if (!np)
8949 goto attach_failed;
8950 #endif
8953 * Copy some useful infos to the HCB.
8955 np->hcb_ba = vtobus(np);
8956 np->verbose = bootverbose;
8957 #ifdef FreeBSD_Bus_Io_Abstraction
8958 np->device = dev;
8959 np->unit = device_get_unit(dev);
8960 np->device_id = pci_get_device(dev);
8961 np->revision_id = pci_get_revid(dev);
8962 #else
8963 np->pci_tag = pci_tag;
8964 np->unit = unit;
8965 np->device_id = pci_cfgread(pci_tag, PCIR_DEVICE, 2);
8966 np->revision_id = pci_cfgread(pci_tag, PCIR_REVID, 1);
8967 #endif
8968 np->features = chip->features;
8969 np->clock_divn = chip->nr_divisor;
8970 np->maxoffs = chip->offset_max;
8971 np->maxburst = chip->burst_max;
8972 np->scripta_sz = fw->a_size;
8973 np->scriptb_sz = fw->b_size;
8974 np->fw_setup = fw->setup;
8975 np->fw_patch = fw->patch;
8976 np->fw_name = fw->name;
8979 * Edit its name.
8981 ksnprintf(np->inst_name, sizeof(np->inst_name), "sym%d", np->unit);
8984 * Initialyze the CCB free and busy queues.
8986 sym_que_init(&np->free_ccbq);
8987 sym_que_init(&np->busy_ccbq);
8988 sym_que_init(&np->comp_ccbq);
8989 sym_que_init(&np->cam_ccbq);
8992 * Allocate a tag for the DMA of user data.
8994 #ifdef FreeBSD_Bus_Dma_Abstraction
8995 if (bus_dma_tag_create(np->bus_dmat, 1, (1<<24),
8996 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
8997 NULL, NULL,
8998 BUS_SPACE_MAXSIZE, SYM_CONF_MAX_SG,
8999 (1<<24), 0, &np->data_dmat)) {
9000 device_printf(dev, "failed to create DMA tag.\n");
9001 goto attach_failed;
9003 #endif
9005 * Read and apply some fix-ups to the PCI COMMAND
9006 * register. We want the chip to be enabled for:
9007 * - BUS mastering
9008 * - PCI parity checking (reporting would also be fine)
9009 * - Write And Invalidate.
9011 #ifdef FreeBSD_Bus_Io_Abstraction
9012 command = pci_read_config(dev, PCIR_COMMAND, 2);
9013 #else
9014 command = pci_cfgread(pci_tag, PCIR_COMMAND, 2);
9015 #endif
9016 command |= PCIM_CMD_BUSMASTEREN;
9017 command |= PCIM_CMD_PERRESPEN;
9018 command |= /* PCIM_CMD_MWIEN */ 0x0010;
9019 #ifdef FreeBSD_Bus_Io_Abstraction
9020 pci_write_config(dev, PCIR_COMMAND, command, 2);
9021 #else
9022 pci_cfgwrite(pci_tag, PCIR_COMMAND, command, 2);
9023 #endif
9026 * Let the device know about the cache line size,
9027 * if it doesn't yet.
9029 #ifdef FreeBSD_Bus_Io_Abstraction
9030 cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
9031 #else
9032 cachelnsz = pci_cfgread(pci_tag, PCIR_CACHELNSZ, 1);
9033 #endif
9034 if (!cachelnsz) {
9035 cachelnsz = 8;
9036 #ifdef FreeBSD_Bus_Io_Abstraction
9037 pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1);
9038 #else
9039 pci_cfgwrite(pci_tag, PCIR_CACHELNSZ, cachelnsz, 1);
9040 #endif
9044 * Alloc/get/map/retrieve everything that deals with MMIO.
9046 #ifdef FreeBSD_Bus_Io_Abstraction
9047 if ((command & PCIM_CMD_MEMEN) != 0) {
9048 int regs_id = SYM_PCI_MMIO;
9049 np->mmio_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &regs_id,
9050 0, ~0, 1, RF_ACTIVE);
9052 if (!np->mmio_res) {
9053 device_printf(dev, "failed to allocate MMIO resources\n");
9054 goto attach_failed;
9056 np->mmio_bsh = rman_get_bushandle(np->mmio_res);
9057 np->mmio_tag = rman_get_bustag(np->mmio_res);
9058 np->mmio_pa = rman_get_start(np->mmio_res);
9059 np->mmio_va = (vm_offset_t) rman_get_virtual(np->mmio_res);
9060 np->mmio_ba = np->mmio_pa;
9061 #else
9062 if ((command & PCIM_CMD_MEMEN) != 0) {
9063 vm_offset_t vaddr, paddr;
9064 if (!pci_map_mem(pci_tag, SYM_PCI_MMIO, &vaddr, &paddr)) {
9065 kprintf("%s: failed to map MMIO window\n", sym_name(np));
9066 goto attach_failed;
9068 np->mmio_va = vaddr;
9069 np->mmio_pa = paddr;
9070 np->mmio_ba = paddr;
9072 #endif
9075 * Allocate the IRQ.
9077 #ifdef FreeBSD_Bus_Io_Abstraction
9078 i = 0;
9079 np->irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &i,
9080 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
9081 if (!np->irq_res) {
9082 device_printf(dev, "failed to allocate IRQ resource\n");
9083 goto attach_failed;
9085 #endif
9087 #ifdef SYM_CONF_IOMAPPED
9089 * User want us to use normal IO with PCI.
9090 * Alloc/get/map/retrieve everything that deals with IO.
9092 #ifdef FreeBSD_Bus_Io_Abstraction
9093 if ((command & PCI_COMMAND_IO_ENABLE) != 0) {
9094 int regs_id = SYM_PCI_IO;
9095 np->io_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &regs_id,
9096 0, ~0, 1, RF_ACTIVE);
9098 if (!np->io_res) {
9099 device_printf(dev, "failed to allocate IO resources\n");
9100 goto attach_failed;
9102 np->io_bsh = rman_get_bushandle(np->io_res);
9103 np->io_tag = rman_get_bustag(np->io_res);
9104 np->io_port = rman_get_start(np->io_res);
9105 #else
9106 if ((command & PCI_COMMAND_IO_ENABLE) != 0) {
9107 pci_port_t io_port;
9108 if (!pci_map_port (pci_tag, SYM_PCI_IO, &io_port)) {
9109 kprintf("%s: failed to map IO window\n", sym_name(np));
9110 goto attach_failed;
9112 np->io_port = io_port;
9114 #endif
9116 #endif /* SYM_CONF_IOMAPPED */
9119 * If the chip has RAM.
9120 * Alloc/get/map/retrieve the corresponding resources.
9122 if ((np->features & (FE_RAM|FE_RAM8K)) &&
9123 (command & PCIM_CMD_MEMEN) != 0) {
9124 #ifdef FreeBSD_Bus_Io_Abstraction
9125 int regs_id = SYM_PCI_RAM;
9126 if (np->features & FE_64BIT)
9127 regs_id = SYM_PCI_RAM64;
9128 np->ram_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &regs_id,
9129 0, ~0, 1, RF_ACTIVE);
9130 if (!np->ram_res) {
9131 device_printf(dev,"failed to allocate RAM resources\n");
9132 goto attach_failed;
9134 np->ram_id = regs_id;
9135 np->ram_bsh = rman_get_bushandle(np->ram_res);
9136 np->ram_tag = rman_get_bustag(np->ram_res);
9137 np->ram_pa = rman_get_start(np->ram_res);
9138 np->ram_va = (vm_offset_t) rman_get_virtual(np->ram_res);
9139 np->ram_ba = np->ram_pa;
9140 #else
9141 vm_offset_t vaddr, paddr;
9142 int regs_id = SYM_PCI_RAM;
9143 if (np->features & FE_64BIT)
9144 regs_id = SYM_PCI_RAM64;
9145 if (!pci_map_mem(pci_tag, regs_id, &vaddr, &paddr)) {
9146 kprintf("%s: failed to map RAM window\n", sym_name(np));
9147 goto attach_failed;
9149 np->ram_va = vaddr;
9150 np->ram_pa = paddr;
9151 np->ram_ba = paddr;
9152 #endif
9156 * Save setting of some IO registers, so we will
9157 * be able to probe specific implementations.
9159 sym_save_initial_setting (np);
9162 * Reset the chip now, since it has been reported
9163 * that SCSI clock calibration may not work properly
9164 * if the chip is currently active.
9166 sym_chip_reset (np);
9169 * Try to read the user set-up.
9171 (void) sym_read_nvram(np, &nvram);
9174 * Prepare controller and devices settings, according
9175 * to chip features, user set-up and driver set-up.
9177 (void) sym_prepare_setting(np, &nvram);
9180 * Check the PCI clock frequency.
9181 * Must be performed after prepare_setting since it destroys
9182 * STEST1 that is used to probe for the clock doubler.
9184 i = sym_getpciclock(np);
9185 if (i > 37000)
9186 #ifdef FreeBSD_Bus_Io_Abstraction
9187 device_printf(dev, "PCI BUS clock seems too high: %u KHz.\n",i);
9188 #else
9189 kprintf("%s: PCI BUS clock seems too high: %u KHz.\n",
9190 sym_name(np), i);
9191 #endif
9194 * Allocate the start queue.
9196 np->squeue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE");
9197 if (!np->squeue)
9198 goto attach_failed;
9199 np->squeue_ba = vtobus(np->squeue);
9202 * Allocate the done queue.
9204 np->dqueue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE");
9205 if (!np->dqueue)
9206 goto attach_failed;
9207 np->dqueue_ba = vtobus(np->dqueue);
9210 * Allocate the target bus address array.
9212 np->targtbl = (u32 *) sym_calloc_dma(256, "TARGTBL");
9213 if (!np->targtbl)
9214 goto attach_failed;
9215 np->targtbl_ba = vtobus(np->targtbl);
9218 * Allocate SCRIPTS areas.
9220 np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0");
9221 np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0");
9222 if (!np->scripta0 || !np->scriptb0)
9223 goto attach_failed;
9226 * Allocate some CCB. We need at least ONE.
9228 if (!sym_alloc_ccb(np))
9229 goto attach_failed;
9232 * Calculate BUS addresses where we are going
9233 * to load the SCRIPTS.
9235 np->scripta_ba = vtobus(np->scripta0);
9236 np->scriptb_ba = vtobus(np->scriptb0);
9237 np->scriptb0_ba = np->scriptb_ba;
9239 if (np->ram_ba) {
9240 np->scripta_ba = np->ram_ba;
9241 if (np->features & FE_RAM8K) {
9242 np->ram_ws = 8192;
9243 np->scriptb_ba = np->scripta_ba + 4096;
9244 #if BITS_PER_LONG > 32
9245 np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32);
9246 #endif
9248 else
9249 np->ram_ws = 4096;
9253 * Copy scripts to controller instance.
9255 bcopy(fw->a_base, np->scripta0, np->scripta_sz);
9256 bcopy(fw->b_base, np->scriptb0, np->scriptb_sz);
9259 * Setup variable parts in scripts and compute
9260 * scripts bus addresses used from the C code.
9262 np->fw_setup(np, fw);
9265 * Bind SCRIPTS with physical addresses usable by the
9266 * SCRIPTS processor (as seen from the BUS = BUS addresses).
9268 sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz);
9269 sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz);
9271 #ifdef SYM_CONF_IARB_SUPPORT
9273 * If user wants IARB to be set when we win arbitration
9274 * and have other jobs, compute the max number of consecutive
9275 * settings of IARB hints before we leave devices a chance to
9276 * arbitrate for reselection.
9278 #ifdef SYM_SETUP_IARB_MAX
9279 np->iarb_max = SYM_SETUP_IARB_MAX;
9280 #else
9281 np->iarb_max = 4;
9282 #endif
9283 #endif
9286 * Prepare the idle and invalid task actions.
9288 np->idletask.start = cpu_to_scr(SCRIPTA_BA (np, idle));
9289 np->idletask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l));
9290 np->idletask_ba = vtobus(&np->idletask);
9292 np->notask.start = cpu_to_scr(SCRIPTA_BA (np, idle));
9293 np->notask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l));
9294 np->notask_ba = vtobus(&np->notask);
9296 np->bad_itl.start = cpu_to_scr(SCRIPTA_BA (np, idle));
9297 np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l));
9298 np->bad_itl_ba = vtobus(&np->bad_itl);
9300 np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA (np, idle));
9301 np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA (np,bad_i_t_l_q));
9302 np->bad_itlq_ba = vtobus(&np->bad_itlq);
9305 * Allocate and prepare the lun JUMP table that is used
9306 * for a target prior the probing of devices (bad lun table).
9307 * A private table will be allocated for the target on the
9308 * first INQUIRY response received.
9310 np->badluntbl = sym_calloc_dma(256, "BADLUNTBL");
9311 if (!np->badluntbl)
9312 goto attach_failed;
9314 np->badlun_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun));
9315 for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */
9316 np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
9319 * Prepare the bus address array that contains the bus
9320 * address of each target control block.
9321 * For now, assume all logical units are wrong. :)
9323 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
9324 np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i]));
9325 np->target[i].head.luntbl_sa =
9326 cpu_to_scr(vtobus(np->badluntbl));
9327 np->target[i].head.lun0_sa =
9328 cpu_to_scr(vtobus(&np->badlun_sa));
9332 * Now check the cache handling of the pci chipset.
9334 if (sym_snooptest (np)) {
9335 #ifdef FreeBSD_Bus_Io_Abstraction
9336 device_printf(dev, "CACHE INCORRECTLY CONFIGURED.\n");
9337 #else
9338 kprintf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np));
9339 #endif
9340 goto attach_failed;
9344 * Now deal with CAM.
9345 * Hopefully, we will succeed with that one.:)
9347 if (!sym_cam_attach(np))
9348 goto attach_failed;
9351 * Sigh! we are done.
9353 return 0;
9356 * We have failed.
9357 * We will try to free all the resources we have
9358 * allocated, but if we are a boot device, this
9359 * will not help that much.;)
9361 attach_failed:
9362 if (np)
9363 sym_pci_free(np);
9364 return ENXIO;
9368 * Free everything that have been allocated for this device.
9370 static void sym_pci_free(hcb_p np)
9372 SYM_QUEHEAD *qp;
9373 ccb_p cp;
9374 tcb_p tp;
9375 lcb_p lp;
9376 int target, lun;
9379 * First free CAM resources.
9381 crit_enter();
9382 sym_cam_free(np);
9383 crit_exit();
9386 * Now every should be quiet for us to
9387 * free other resources.
9389 #ifdef FreeBSD_Bus_Io_Abstraction
9390 if (np->ram_res)
9391 bus_release_resource(np->device, SYS_RES_MEMORY,
9392 np->ram_id, np->ram_res);
9393 if (np->mmio_res)
9394 bus_release_resource(np->device, SYS_RES_MEMORY,
9395 SYM_PCI_MMIO, np->mmio_res);
9396 if (np->io_res)
9397 bus_release_resource(np->device, SYS_RES_IOPORT,
9398 SYM_PCI_IO, np->io_res);
9399 if (np->irq_res)
9400 bus_release_resource(np->device, SYS_RES_IRQ,
9401 0, np->irq_res);
9402 #else
9404 * YEAH!!!
9405 * It seems there is no means to free MMIO resources.
9407 #endif
9409 if (np->scriptb0)
9410 sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0");
9411 if (np->scripta0)
9412 sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0");
9413 if (np->squeue)
9414 sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE");
9415 if (np->dqueue)
9416 sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE");
9418 while ((qp = sym_remque_head(&np->free_ccbq)) != 0) {
9419 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
9420 #ifdef FreeBSD_Bus_Dma_Abstraction
9421 bus_dmamap_destroy(np->data_dmat, cp->dmamap);
9422 #endif
9423 sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF");
9424 sym_mfree_dma(cp, sizeof(*cp), "CCB");
9427 if (np->badluntbl)
9428 sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL");
9430 for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) {
9431 tp = &np->target[target];
9432 for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) {
9433 lp = sym_lp(np, tp, lun);
9434 if (!lp)
9435 continue;
9436 if (lp->itlq_tbl)
9437 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4,
9438 "ITLQ_TBL");
9439 if (lp->cb_tags)
9440 sym_mfree(lp->cb_tags, SYM_CONF_MAX_TASK,
9441 "CB_TAGS");
9442 sym_mfree_dma(lp, sizeof(*lp), "LCB");
9444 #if SYM_CONF_MAX_LUN > 1
9445 if (tp->lunmp)
9446 sym_mfree(tp->lunmp, SYM_CONF_MAX_LUN*sizeof(lcb_p),
9447 "LUNMP");
9448 #endif
9450 if (np->targtbl)
9451 sym_mfree_dma(np->targtbl, 256, "TARGTBL");
9452 #ifdef FreeBSD_Bus_Dma_Abstraction
9453 if (np->data_dmat)
9454 bus_dma_tag_destroy(np->data_dmat);
9455 #endif
9456 sym_mfree_dma(np, sizeof(*np), "HCB");
9460 * Allocate CAM resources and register a bus to CAM.
9462 int sym_cam_attach(hcb_p np)
9464 struct cam_devq *devq = 0;
9465 struct cam_sim *sim = 0;
9466 struct cam_path *path = 0;
9467 struct ccb_setasync csa;
9468 int err;
9470 crit_enter();
9473 * Establish our interrupt handler.
9475 #ifdef FreeBSD_Bus_Io_Abstraction
9476 err = bus_setup_intr(np->device, np->irq_res, 0,
9477 sym_intr, np, &np->intr, NULL);
9478 if (err) {
9479 device_printf(np->device, "bus_setup_intr() failed: %d\n",
9480 err);
9481 goto fail;
9483 #else
9484 err = 0;
9485 if (!pci_map_int (np->pci_tag, sym_intr, np)) {
9486 kprintf("%s: failed to map interrupt\n", sym_name(np));
9487 goto fail;
9489 #endif
9492 * Create the device queue for our sym SIM.
9494 devq = cam_simq_alloc(SYM_CONF_MAX_START);
9495 if (devq == NULL) {
9496 goto fail;
9500 * Construct our SIM entry.
9502 sim = cam_sim_alloc(sym_action, sym_poll, "sym", np, np->unit,
9503 &sim_mplock, 1, SYM_SETUP_MAX_TAG, devq);
9504 cam_simq_release(devq);
9505 if (sim == NULL)
9506 goto fail;
9508 if (xpt_bus_register(sim, 0) != CAM_SUCCESS)
9509 goto fail;
9510 np->sim = sim;
9511 sim = 0;
9513 if (xpt_create_path(&path, 0,
9514 cam_sim_path(np->sim), CAM_TARGET_WILDCARD,
9515 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
9516 goto fail;
9518 np->path = path;
9521 * Establish our async notification handler.
9523 xpt_setup_ccb(&csa.ccb_h, np->path, 5);
9524 csa.ccb_h.func_code = XPT_SASYNC_CB;
9525 csa.event_enable = AC_LOST_DEVICE;
9526 csa.callback = sym_async;
9527 csa.callback_arg = np->sim;
9528 xpt_action((union ccb *)&csa);
9531 * Start the chip now, without resetting the BUS, since
9532 * it seems that this must stay under control of CAM.
9533 * With LVD/SE capable chips and BUS in SE mode, we may
9534 * get a spurious SMBC interrupt.
9536 sym_init (np, 0);
9538 crit_exit();
9539 return 1;
9540 fail:
9541 if (sim)
9542 cam_sim_free(sim);
9544 sym_cam_free(np);
9546 crit_exit();
9547 return 0;
9551 * Free everything that deals with CAM.
9553 void sym_cam_free(hcb_p np)
9555 #ifdef FreeBSD_Bus_Io_Abstraction
9556 if (np->intr)
9557 bus_teardown_intr(np->device, np->irq_res, np->intr);
9558 #else
9559 /* pci_unmap_int(np->pci_tag); */ /* Does nothing */
9560 #endif
9562 if (np->sim) {
9563 xpt_bus_deregister(cam_sim_path(np->sim));
9564 cam_sim_free(np->sim);
9566 if (np->path)
9567 xpt_free_path(np->path);
9570 /*============ OPTIONNAL NVRAM SUPPORT =================*/
9573 * Get host setup from NVRAM.
9575 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram)
9577 #ifdef SYM_CONF_NVRAM_SUPPORT
9579 * Get parity checking, host ID, verbose mode
9580 * and miscellaneous host flags from NVRAM.
9582 switch(nvram->type) {
9583 case SYM_SYMBIOS_NVRAM:
9584 if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE))
9585 np->rv_scntl0 &= ~0x0a;
9586 np->myaddr = nvram->data.Symbios.host_id & 0x0f;
9587 if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS)
9588 np->verbose += 1;
9589 if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO)
9590 np->usrflags |= SYM_SCAN_TARGETS_HILO;
9591 if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET)
9592 np->usrflags |= SYM_AVOID_BUS_RESET;
9593 break;
9594 case SYM_TEKRAM_NVRAM:
9595 np->myaddr = nvram->data.Tekram.host_id & 0x0f;
9596 break;
9597 default:
9598 break;
9600 #endif
9604 * Get target setup from NVRAM.
9606 #ifdef SYM_CONF_NVRAM_SUPPORT
9607 static void sym_Symbios_setup_target(hcb_p np,int target, Symbios_nvram *nvram);
9608 static void sym_Tekram_setup_target(hcb_p np,int target, Tekram_nvram *nvram);
9609 #endif
9611 static void
9612 sym_nvram_setup_target (hcb_p np, int target, struct sym_nvram *nvp)
9614 #ifdef SYM_CONF_NVRAM_SUPPORT
9615 switch(nvp->type) {
9616 case SYM_SYMBIOS_NVRAM:
9617 sym_Symbios_setup_target (np, target, &nvp->data.Symbios);
9618 break;
9619 case SYM_TEKRAM_NVRAM:
9620 sym_Tekram_setup_target (np, target, &nvp->data.Tekram);
9621 break;
9622 default:
9623 break;
9625 #endif
9628 #ifdef SYM_CONF_NVRAM_SUPPORT
9630 * Get target set-up from Symbios format NVRAM.
9632 static void
9633 sym_Symbios_setup_target(hcb_p np, int target, Symbios_nvram *nvram)
9635 tcb_p tp = &np->target[target];
9636 Symbios_target *tn = &nvram->target[target];
9638 tp->tinfo.user.period = tn->sync_period ? (tn->sync_period + 3) / 4 : 0;
9639 tp->tinfo.user.width = tn->bus_width == 0x10 ? BUS_16_BIT : BUS_8_BIT;
9640 tp->usrtags =
9641 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0;
9643 if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE))
9644 tp->usrflags &= ~SYM_DISC_ENABLED;
9645 if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME))
9646 tp->usrflags |= SYM_SCAN_BOOT_DISABLED;
9647 if (!(tn->flags & SYMBIOS_SCAN_LUNS))
9648 tp->usrflags |= SYM_SCAN_LUNS_DISABLED;
9652 * Get target set-up from Tekram format NVRAM.
9654 static void
9655 sym_Tekram_setup_target(hcb_p np, int target, Tekram_nvram *nvram)
9657 tcb_p tp = &np->target[target];
9658 struct Tekram_target *tn = &nvram->target[target];
9659 int i;
9661 if (tn->flags & TEKRAM_SYNC_NEGO) {
9662 i = tn->sync_index & 0xf;
9663 tp->tinfo.user.period = Tekram_sync[i];
9666 tp->tinfo.user.width =
9667 (tn->flags & TEKRAM_WIDE_NEGO) ? BUS_16_BIT : BUS_8_BIT;
9669 if (tn->flags & TEKRAM_TAGGED_COMMANDS) {
9670 tp->usrtags = 2 << nvram->max_tags_index;
9673 if (tn->flags & TEKRAM_DISCONNECT_ENABLE)
9674 tp->usrflags |= SYM_DISC_ENABLED;
9676 /* If any device does not support parity, we will not use this option */
9677 if (!(tn->flags & TEKRAM_PARITY_CHECK))
9678 np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */
9681 #ifdef SYM_CONF_DEBUG_NVRAM
9683 * Dump Symbios format NVRAM for debugging purpose.
9685 static void sym_display_Symbios_nvram(hcb_p np, Symbios_nvram *nvram)
9687 int i;
9689 /* display Symbios nvram host data */
9690 kprintf("%s: HOST ID=%d%s%s%s%s%s%s\n",
9691 sym_name(np), nvram->host_id & 0x0f,
9692 (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
9693 (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"",
9694 (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"",
9695 (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"",
9696 (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET" :"",
9697 (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :"");
9699 /* display Symbios nvram drive data */
9700 for (i = 0 ; i < 15 ; i++) {
9701 struct Symbios_target *tn = &nvram->target[i];
9702 kprintf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
9703 sym_name(np), i,
9704 (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "",
9705 (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "",
9706 (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "",
9707 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "",
9708 tn->bus_width,
9709 tn->sync_period / 4,
9710 tn->timeout);
9715 * Dump TEKRAM format NVRAM for debugging purpose.
9717 static u_char Tekram_boot_delay[7] = {3, 5, 10, 20, 30, 60, 120};
9718 static void sym_display_Tekram_nvram(hcb_p np, Tekram_nvram *nvram)
9720 int i, tags, boot_delay;
9721 char *rem;
9723 /* display Tekram nvram host data */
9724 tags = 2 << nvram->max_tags_index;
9725 boot_delay = 0;
9726 if (nvram->boot_delay_index < 6)
9727 boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
9728 switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
9729 default:
9730 case 0: rem = ""; break;
9731 case 1: rem = " REMOVABLE=boot device"; break;
9732 case 2: rem = " REMOVABLE=all"; break;
9735 kprintf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
9736 sym_name(np), nvram->host_id & 0x0f,
9737 (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
9738 (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"",
9739 (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"",
9740 (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"",
9741 (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"",
9742 (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"",
9743 (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"",
9744 (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"",
9745 rem, boot_delay, tags);
9747 /* display Tekram nvram drive data */
9748 for (i = 0; i <= 15; i++) {
9749 int sync, j;
9750 struct Tekram_target *tn = &nvram->target[i];
9751 j = tn->sync_index & 0xf;
9752 sync = Tekram_sync[j];
9753 kprintf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n",
9754 sym_name(np), i,
9755 (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "",
9756 (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "",
9757 (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "",
9758 (tn->flags & TEKRAM_START_CMD) ? " START" : "",
9759 (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "",
9760 (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "",
9761 sync);
9764 #endif /* SYM_CONF_DEBUG_NVRAM */
9765 #endif /* SYM_CONF_NVRAM_SUPPORT */
9769 * Try reading Symbios or Tekram NVRAM
9771 #ifdef SYM_CONF_NVRAM_SUPPORT
9772 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram);
9773 static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram);
9774 #endif
9776 int sym_read_nvram(hcb_p np, struct sym_nvram *nvp)
9778 #ifdef SYM_CONF_NVRAM_SUPPORT
9780 * Try to read SYMBIOS nvram.
9781 * Try to read TEKRAM nvram if Symbios nvram not found.
9783 if (SYM_SETUP_SYMBIOS_NVRAM &&
9784 !sym_read_Symbios_nvram (np, &nvp->data.Symbios)) {
9785 nvp->type = SYM_SYMBIOS_NVRAM;
9786 #ifdef SYM_CONF_DEBUG_NVRAM
9787 sym_display_Symbios_nvram(np, &nvp->data.Symbios);
9788 #endif
9790 else if (SYM_SETUP_TEKRAM_NVRAM &&
9791 !sym_read_Tekram_nvram (np, &nvp->data.Tekram)) {
9792 nvp->type = SYM_TEKRAM_NVRAM;
9793 #ifdef SYM_CONF_DEBUG_NVRAM
9794 sym_display_Tekram_nvram(np, &nvp->data.Tekram);
9795 #endif
9797 else
9798 nvp->type = 0;
9799 #else
9800 nvp->type = 0;
9801 #endif
9802 return nvp->type;
9806 #ifdef SYM_CONF_NVRAM_SUPPORT
9808 * 24C16 EEPROM reading.
9810 * GPOI0 - data in/data out
9811 * GPIO1 - clock
9812 * Symbios NVRAM wiring now also used by Tekram.
9815 #define SET_BIT 0
9816 #define CLR_BIT 1
9817 #define SET_CLK 2
9818 #define CLR_CLK 3
9821 * Set/clear data/clock bit in GPIO0
9823 static void S24C16_set_bit(hcb_p np, u_char write_bit, u_char *gpreg,
9824 int bit_mode)
9826 UDELAY (5);
9827 switch (bit_mode){
9828 case SET_BIT:
9829 *gpreg |= write_bit;
9830 break;
9831 case CLR_BIT:
9832 *gpreg &= 0xfe;
9833 break;
9834 case SET_CLK:
9835 *gpreg |= 0x02;
9836 break;
9837 case CLR_CLK:
9838 *gpreg &= 0xfd;
9839 break;
9842 OUTB (nc_gpreg, *gpreg);
9843 UDELAY (5);
9847 * Send START condition to NVRAM to wake it up.
9849 static void S24C16_start(hcb_p np, u_char *gpreg)
9851 S24C16_set_bit(np, 1, gpreg, SET_BIT);
9852 S24C16_set_bit(np, 0, gpreg, SET_CLK);
9853 S24C16_set_bit(np, 0, gpreg, CLR_BIT);
9854 S24C16_set_bit(np, 0, gpreg, CLR_CLK);
9858 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
9860 static void S24C16_stop(hcb_p np, u_char *gpreg)
9862 S24C16_set_bit(np, 0, gpreg, SET_CLK);
9863 S24C16_set_bit(np, 1, gpreg, SET_BIT);
9867 * Read or write a bit to the NVRAM,
9868 * read if GPIO0 input else write if GPIO0 output
9870 static void S24C16_do_bit(hcb_p np, u_char *read_bit, u_char write_bit,
9871 u_char *gpreg)
9873 S24C16_set_bit(np, write_bit, gpreg, SET_BIT);
9874 S24C16_set_bit(np, 0, gpreg, SET_CLK);
9875 if (read_bit)
9876 *read_bit = INB (nc_gpreg);
9877 S24C16_set_bit(np, 0, gpreg, CLR_CLK);
9878 S24C16_set_bit(np, 0, gpreg, CLR_BIT);
9882 * Output an ACK to the NVRAM after reading,
9883 * change GPIO0 to output and when done back to an input
9885 static void S24C16_write_ack(hcb_p np, u_char write_bit, u_char *gpreg,
9886 u_char *gpcntl)
9888 OUTB (nc_gpcntl, *gpcntl & 0xfe);
9889 S24C16_do_bit(np, 0, write_bit, gpreg);
9890 OUTB (nc_gpcntl, *gpcntl);
9894 * Input an ACK from NVRAM after writing,
9895 * change GPIO0 to input and when done back to an output
9897 static void S24C16_read_ack(hcb_p np, u_char *read_bit, u_char *gpreg,
9898 u_char *gpcntl)
9900 OUTB (nc_gpcntl, *gpcntl | 0x01);
9901 S24C16_do_bit(np, read_bit, 1, gpreg);
9902 OUTB (nc_gpcntl, *gpcntl);
9906 * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
9907 * GPIO0 must already be set as an output
9909 static void S24C16_write_byte(hcb_p np, u_char *ack_data, u_char write_data,
9910 u_char *gpreg, u_char *gpcntl)
9912 int x;
9914 for (x = 0; x < 8; x++)
9915 S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
9917 S24C16_read_ack(np, ack_data, gpreg, gpcntl);
9921 * READ a byte from the NVRAM and then send an ACK to say we have got it,
9922 * GPIO0 must already be set as an input
9924 static void S24C16_read_byte(hcb_p np, u_char *read_data, u_char ack_data,
9925 u_char *gpreg, u_char *gpcntl)
9927 int x;
9928 u_char read_bit;
9930 *read_data = 0;
9931 for (x = 0; x < 8; x++) {
9932 S24C16_do_bit(np, &read_bit, 1, gpreg);
9933 *read_data |= ((read_bit & 0x01) << (7 - x));
9936 S24C16_write_ack(np, ack_data, gpreg, gpcntl);
9940 * Read 'len' bytes starting at 'offset'.
9942 static int sym_read_S24C16_nvram (hcb_p np, int offset, u_char *data, int len)
9944 u_char gpcntl, gpreg;
9945 u_char old_gpcntl, old_gpreg;
9946 u_char ack_data;
9947 int retv = 1;
9948 int x;
9950 /* save current state of GPCNTL and GPREG */
9951 old_gpreg = INB (nc_gpreg);
9952 old_gpcntl = INB (nc_gpcntl);
9953 gpcntl = old_gpcntl & 0x1c;
9955 /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
9956 OUTB (nc_gpreg, old_gpreg);
9957 OUTB (nc_gpcntl, gpcntl);
9959 /* this is to set NVRAM into a known state with GPIO0/1 both low */
9960 gpreg = old_gpreg;
9961 S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
9962 S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
9964 /* now set NVRAM inactive with GPIO0/1 both high */
9965 S24C16_stop(np, &gpreg);
9967 /* activate NVRAM */
9968 S24C16_start(np, &gpreg);
9970 /* write device code and random address MSB */
9971 S24C16_write_byte(np, &ack_data,
9972 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
9973 if (ack_data & 0x01)
9974 goto out;
9976 /* write random address LSB */
9977 S24C16_write_byte(np, &ack_data,
9978 offset & 0xff, &gpreg, &gpcntl);
9979 if (ack_data & 0x01)
9980 goto out;
9982 /* regenerate START state to set up for reading */
9983 S24C16_start(np, &gpreg);
9985 /* rewrite device code and address MSB with read bit set (lsb = 0x01) */
9986 S24C16_write_byte(np, &ack_data,
9987 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
9988 if (ack_data & 0x01)
9989 goto out;
9991 /* now set up GPIO0 for inputting data */
9992 gpcntl |= 0x01;
9993 OUTB (nc_gpcntl, gpcntl);
9995 /* input all requested data - only part of total NVRAM */
9996 for (x = 0; x < len; x++)
9997 S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl);
9999 /* finally put NVRAM back in inactive mode */
10000 gpcntl &= 0xfe;
10001 OUTB (nc_gpcntl, gpcntl);
10002 S24C16_stop(np, &gpreg);
10003 retv = 0;
10004 out:
10005 /* return GPIO0/1 to original states after having accessed NVRAM */
10006 OUTB (nc_gpcntl, old_gpcntl);
10007 OUTB (nc_gpreg, old_gpreg);
10009 return retv;
10012 #undef SET_BIT
10013 #undef CLR_BIT
10014 #undef SET_CLK
10015 #undef CLR_CLK
10018 * Try reading Symbios NVRAM.
10019 * Return 0 if OK.
10021 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram)
10023 static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
10024 u_char *data = (u_char *) nvram;
10025 int len = sizeof(*nvram);
10026 u_short csum;
10027 int x;
10029 /* probe the 24c16 and read the SYMBIOS 24c16 area */
10030 if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len))
10031 return 1;
10033 /* check valid NVRAM signature, verify byte count and checksum */
10034 if (nvram->type != 0 ||
10035 bcmp(nvram->trailer, Symbios_trailer, 6) ||
10036 nvram->byte_count != len - 12)
10037 return 1;
10039 /* verify checksum */
10040 for (x = 6, csum = 0; x < len - 6; x++)
10041 csum += data[x];
10042 if (csum != nvram->checksum)
10043 return 1;
10045 return 0;
10049 * 93C46 EEPROM reading.
10051 * GPOI0 - data in
10052 * GPIO1 - data out
10053 * GPIO2 - clock
10054 * GPIO4 - chip select
10056 * Used by Tekram.
10060 * Pulse clock bit in GPIO0
10062 static void T93C46_Clk(hcb_p np, u_char *gpreg)
10064 OUTB (nc_gpreg, *gpreg | 0x04);
10065 UDELAY (2);
10066 OUTB (nc_gpreg, *gpreg);
10070 * Read bit from NVRAM
10072 static void T93C46_Read_Bit(hcb_p np, u_char *read_bit, u_char *gpreg)
10074 UDELAY (2);
10075 T93C46_Clk(np, gpreg);
10076 *read_bit = INB (nc_gpreg);
10080 * Write bit to GPIO0
10082 static void T93C46_Write_Bit(hcb_p np, u_char write_bit, u_char *gpreg)
10084 if (write_bit & 0x01)
10085 *gpreg |= 0x02;
10086 else
10087 *gpreg &= 0xfd;
10089 *gpreg |= 0x10;
10091 OUTB (nc_gpreg, *gpreg);
10092 UDELAY (2);
10094 T93C46_Clk(np, gpreg);
10098 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
10100 static void T93C46_Stop(hcb_p np, u_char *gpreg)
10102 *gpreg &= 0xef;
10103 OUTB (nc_gpreg, *gpreg);
10104 UDELAY (2);
10106 T93C46_Clk(np, gpreg);
10110 * Send read command and address to NVRAM
10112 static void T93C46_Send_Command(hcb_p np, u_short write_data,
10113 u_char *read_bit, u_char *gpreg)
10115 int x;
10117 /* send 9 bits, start bit (1), command (2), address (6) */
10118 for (x = 0; x < 9; x++)
10119 T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
10121 *read_bit = INB (nc_gpreg);
10125 * READ 2 bytes from the NVRAM
10127 static void T93C46_Read_Word(hcb_p np, u_short *nvram_data, u_char *gpreg)
10129 int x;
10130 u_char read_bit;
10132 *nvram_data = 0;
10133 for (x = 0; x < 16; x++) {
10134 T93C46_Read_Bit(np, &read_bit, gpreg);
10136 if (read_bit & 0x01)
10137 *nvram_data |= (0x01 << (15 - x));
10138 else
10139 *nvram_data &= ~(0x01 << (15 - x));
10144 * Read Tekram NvRAM data.
10146 static int T93C46_Read_Data(hcb_p np, u_short *data,int len,u_char *gpreg)
10148 u_char read_bit;
10149 int x;
10151 for (x = 0; x < len; x++) {
10153 /* output read command and address */
10154 T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg);
10155 if (read_bit & 0x01)
10156 return 1; /* Bad */
10157 T93C46_Read_Word(np, &data[x], gpreg);
10158 T93C46_Stop(np, gpreg);
10161 return 0;
10165 * Try reading 93C46 Tekram NVRAM.
10167 static int sym_read_T93C46_nvram (hcb_p np, Tekram_nvram *nvram)
10169 u_char gpcntl, gpreg;
10170 u_char old_gpcntl, old_gpreg;
10171 int retv = 1;
10173 /* save current state of GPCNTL and GPREG */
10174 old_gpreg = INB (nc_gpreg);
10175 old_gpcntl = INB (nc_gpcntl);
10177 /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
10178 1/2/4 out */
10179 gpreg = old_gpreg & 0xe9;
10180 OUTB (nc_gpreg, gpreg);
10181 gpcntl = (old_gpcntl & 0xe9) | 0x09;
10182 OUTB (nc_gpcntl, gpcntl);
10184 /* input all of NVRAM, 64 words */
10185 retv = T93C46_Read_Data(np, (u_short *) nvram,
10186 sizeof(*nvram) / sizeof(short), &gpreg);
10188 /* return GPIO0/1/2/4 to original states after having accessed NVRAM */
10189 OUTB (nc_gpcntl, old_gpcntl);
10190 OUTB (nc_gpreg, old_gpreg);
10192 return retv;
10196 * Try reading Tekram NVRAM.
10197 * Return 0 if OK.
10199 static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram)
10201 u_char *data = (u_char *) nvram;
10202 int len = sizeof(*nvram);
10203 u_short csum;
10204 int x;
10206 switch (np->device_id) {
10207 case PCI_ID_SYM53C885:
10208 case PCI_ID_SYM53C895:
10209 case PCI_ID_SYM53C896:
10210 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
10211 data, len);
10212 break;
10213 case PCI_ID_SYM53C875:
10214 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
10215 data, len);
10216 if (!x)
10217 break;
10218 default:
10219 x = sym_read_T93C46_nvram(np, nvram);
10220 break;
10222 if (x)
10223 return 1;
10225 /* verify checksum */
10226 for (x = 0, csum = 0; x < len - 1; x += 2)
10227 csum += data[x] + (data[x+1] << 8);
10228 if (csum != 0x1234)
10229 return 1;
10231 return 0;
10234 #endif /* SYM_CONF_NVRAM_SUPPORT */