MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / scsi / sym53c8xx_comm.h
blob6408020752cb8dadc32a86f97c4fcf5a748a986b
1 /******************************************************************************
2 ** High Performance device driver for the Symbios 53C896 controller.
3 **
4 ** Copyright (C) 1998-2001 Gerard Roudier <groudier@free.fr>
5 **
6 ** This driver also supports all the Symbios 53C8XX controller family,
7 ** except 53C810 revisions < 16, 53C825 revisions < 16 and all
8 ** revisions of 53C815 controllers.
9 **
10 ** This driver is based on the Linux port of the FreeBSD ncr driver.
11 **
12 ** Copyright (C) 1994 Wolfgang Stanglmeier
13 **
14 **-----------------------------------------------------------------------------
15 **
16 ** This program is free software; you can redistribute it and/or modify
17 ** it under the terms of the GNU General Public License as published by
18 ** the Free Software Foundation; either version 2 of the License, or
19 ** (at your option) any later version.
21 ** This program is distributed in the hope that it will be useful,
22 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
23 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 ** GNU General Public License for more details.
26 ** You should have received a copy of the GNU General Public License
27 ** along with this program; if not, write to the Free Software
28 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 **-----------------------------------------------------------------------------
32 ** The Linux port of the FreeBSD ncr driver has been achieved in
33 ** november 1995 by:
35 ** Gerard Roudier <groudier@free.fr>
37 ** Being given that this driver originates from the FreeBSD version, and
38 ** in order to keep synergy on both, any suggested enhancements and corrections
39 ** received on Linux are automatically a potential candidate for the FreeBSD
40 ** version.
42 ** The original driver has been written for 386bsd and FreeBSD by
43 ** Wolfgang Stanglmeier <wolf@cologne.de>
44 ** Stefan Esser <se@mi.Uni-Koeln.de>
46 **-----------------------------------------------------------------------------
48 ** Major contributions:
49 ** --------------------
51 ** NVRAM detection and reading.
52 ** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
54 *******************************************************************************
58 ** This file contains definitions and code that the
59 ** sym53c8xx and ncr53c8xx drivers should share.
60 ** The sharing will be achieved in a further version
61 ** of the driver bundle. For now, only the ncr53c8xx
62 ** driver includes this file.
65 /*==========================================================
67 ** Hmmm... What complex some PCI-HOST bridges actually
68 ** are, despite the fact that the PCI specifications
69 ** are looking so smart and simple! ;-)
71 **==========================================================
74 /*==========================================================
76 ** Miscallaneous defines.
78 **==========================================================
81 #define u_char unsigned char
82 #define u_long unsigned long
84 #ifndef bzero
85 #define bzero(d, n) memset((d), 0, (n))
86 #endif
88 /*==========================================================
90 ** assert ()
92 **==========================================================
94 ** modified copy from 386bsd:/usr/include/sys/assert.h
96 **----------------------------------------------------------
99 #define assert(expression) { \
100 if (!(expression)) { \
101 (void)panic( \
102 "assertion \"%s\" failed: file \"%s\", line %d\n", \
103 #expression, \
104 __FILE__, __LINE__); \
108 /*==========================================================
110 ** Debugging tags
112 **==========================================================
115 #define DEBUG_ALLOC (0x0001)
116 #define DEBUG_PHASE (0x0002)
117 #define DEBUG_QUEUE (0x0008)
118 #define DEBUG_RESULT (0x0010)
119 #define DEBUG_POINTER (0x0020)
120 #define DEBUG_SCRIPT (0x0040)
121 #define DEBUG_TINY (0x0080)
122 #define DEBUG_TIMING (0x0100)
123 #define DEBUG_NEGO (0x0200)
124 #define DEBUG_TAGS (0x0400)
125 #define DEBUG_SCATTER (0x0800)
126 #define DEBUG_IC (0x1000)
129 ** Enable/Disable debug messages.
130 ** Can be changed at runtime too.
133 #ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
134 static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
135 #define DEBUG_FLAGS ncr_debug
136 #else
137 #define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
138 #endif
140 /*==========================================================
142 ** A la VMS/CAM-3 queue management.
143 ** Implemented from linux list management.
145 **==========================================================
148 typedef struct xpt_quehead {
149 struct xpt_quehead *flink; /* Forward pointer */
150 struct xpt_quehead *blink; /* Backward pointer */
151 } XPT_QUEHEAD;
153 #define xpt_que_init(ptr) do { \
154 (ptr)->flink = (ptr); (ptr)->blink = (ptr); \
155 } while (0)
157 static inline void __xpt_que_add(struct xpt_quehead * new,
158 struct xpt_quehead * blink,
159 struct xpt_quehead * flink)
161 flink->blink = new;
162 new->flink = flink;
163 new->blink = blink;
164 blink->flink = new;
167 static inline void __xpt_que_del(struct xpt_quehead * blink,
168 struct xpt_quehead * flink)
170 flink->blink = blink;
171 blink->flink = flink;
174 static inline int xpt_que_empty(struct xpt_quehead *head)
176 return head->flink == head;
179 static inline void xpt_que_splice(struct xpt_quehead *list,
180 struct xpt_quehead *head)
182 struct xpt_quehead *first = list->flink;
184 if (first != list) {
185 struct xpt_quehead *last = list->blink;
186 struct xpt_quehead *at = head->flink;
188 first->blink = head;
189 head->flink = first;
191 last->flink = at;
192 at->blink = last;
196 #define xpt_que_entry(ptr, type, member) \
197 ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
200 #define xpt_insque(new, pos) __xpt_que_add(new, pos, (pos)->flink)
202 #define xpt_remque(el) __xpt_que_del((el)->blink, (el)->flink)
204 #define xpt_insque_head(new, head) __xpt_que_add(new, head, (head)->flink)
206 static inline struct xpt_quehead *xpt_remque_head(struct xpt_quehead *head)
208 struct xpt_quehead *elem = head->flink;
210 if (elem != head)
211 __xpt_que_del(head, elem->flink);
212 else
213 elem = NULL;
214 return elem;
217 #define xpt_insque_tail(new, head) __xpt_que_add(new, (head)->blink, head)
219 static inline struct xpt_quehead *xpt_remque_tail(struct xpt_quehead *head)
221 struct xpt_quehead *elem = head->blink;
223 if (elem != head)
224 __xpt_que_del(elem->blink, head);
225 else
226 elem = 0;
227 return elem;
231 /*==========================================================
233 ** SMP threading.
235 ** Assuming that SMP systems are generally high end
236 ** systems and may use several SCSI adapters, we are
237 ** using one lock per controller instead of some global
238 ** one. For the moment (linux-2.1.95), driver's entry
239 ** points are called with the 'io_request_lock' lock
240 ** held, so:
241 ** - We are uselessly loosing a couple of micro-seconds
242 ** to lock the controller data structure.
243 ** - But the driver is not broken by design for SMP and
244 ** so can be more resistant to bugs or bad changes in
245 ** the IO sub-system code.
246 ** - A small advantage could be that the interrupt code
247 ** is grained as wished (e.g.: by controller).
249 **==========================================================
252 spinlock_t DRIVER_SMP_LOCK = SPIN_LOCK_UNLOCKED;
253 #define NCR_LOCK_DRIVER(flags) spin_lock_irqsave(&DRIVER_SMP_LOCK, flags)
254 #define NCR_UNLOCK_DRIVER(flags) \
255 spin_unlock_irqrestore(&DRIVER_SMP_LOCK, flags)
257 #define NCR_INIT_LOCK_NCB(np) spin_lock_init(&np->smp_lock)
258 #define NCR_LOCK_NCB(np, flags) spin_lock_irqsave(&np->smp_lock, flags)
259 #define NCR_UNLOCK_NCB(np, flags) spin_unlock_irqrestore(&np->smp_lock, flags)
261 #define NCR_LOCK_SCSI_DONE(host, flags) \
262 spin_lock_irqsave((host)->host_lock, flags)
263 #define NCR_UNLOCK_SCSI_DONE(host, flags) \
264 spin_unlock_irqrestore(((host)->host_lock), flags)
266 /*==========================================================
268 ** Memory mapped IO
270 ** Since linux-2.1, we must use ioremap() to map the io
271 ** memory space and iounmap() to unmap it. This allows
272 ** portability. Linux 1.3.X and 2.0.X allow to remap
273 ** physical pages addresses greater than the highest
274 ** physical memory address to kernel virtual pages with
275 ** vremap() / vfree(). That was not portable but worked
276 ** with i386 architecture.
278 **==========================================================
281 #ifdef __sparc__
282 #include <asm/irq.h>
283 #endif
285 #define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
287 /*==========================================================
289 ** Insert a delay in micro-seconds and milli-seconds.
291 ** Under Linux, udelay() is restricted to delay <
292 ** 1 milli-second. In fact, it generally works for up
293 ** to 1 second delay. Since 2.1.105, the mdelay() function
294 ** is provided for delays in milli-seconds.
295 ** Under 2.0 kernels, udelay() is an inline function
296 ** that is very inaccurate on Pentium processors.
298 **==========================================================
301 #define UDELAY udelay
302 #define MDELAY mdelay
304 /*==========================================================
306 ** Simple power of two buddy-like allocator.
308 ** This simple code is not intended to be fast, but to
309 ** provide power of 2 aligned memory allocations.
310 ** Since the SCRIPTS processor only supplies 8 bit
311 ** arithmetic, this allocator allows simple and fast
312 ** address calculations from the SCRIPTS code.
313 ** In addition, cache line alignment is guaranteed for
314 ** power of 2 cache line size.
315 ** Enhanced in linux-2.3.44 to provide a memory pool
316 ** per pcidev to support dynamic dma mapping. (I would
317 ** have preferred a real bus astraction, btw).
319 **==========================================================
322 #define __GetFreePages(flags, order) __get_free_pages(flags, order)
324 #define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */
325 #if PAGE_SIZE >= 8192
326 #define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */
327 #else
328 #define MEMO_PAGE_ORDER 1 /* 2 PAGES maximum */
329 #endif
330 #define MEMO_FREE_UNUSED /* Free unused pages immediately */
331 #define MEMO_WARN 1
332 #define MEMO_GFP_FLAGS GFP_ATOMIC
333 #define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)
334 #define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)
335 #define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)
337 typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */
338 typedef struct device *m_bush_t; /* Something that addresses DMAable */
340 typedef struct m_link { /* Link between free memory chunks */
341 struct m_link *next;
342 } m_link_s;
344 typedef struct m_vtob { /* Virtual to Bus address translation */
345 struct m_vtob *next;
346 m_addr_t vaddr;
347 m_addr_t baddr;
348 } m_vtob_s;
349 #define VTOB_HASH_SHIFT 5
350 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
351 #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
352 #define VTOB_HASH_CODE(m) \
353 ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
355 typedef struct m_pool { /* Memory pool of a given kind */
356 m_bush_t bush;
357 m_addr_t (*getp)(struct m_pool *);
358 void (*freep)(struct m_pool *, m_addr_t);
359 #define M_GETP() mp->getp(mp)
360 #define M_FREEP(p) mp->freep(mp, p)
361 #define GetPages() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)
362 #define FreePages(p) free_pages(p, MEMO_PAGE_ORDER)
363 int nump;
364 m_vtob_s *(vtob[VTOB_HASH_SIZE]);
365 struct m_pool *next;
366 struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1];
367 } m_pool_s;
369 static void *___m_alloc(m_pool_s *mp, int size)
371 int i = 0;
372 int s = (1 << MEMO_SHIFT);
373 int j;
374 m_addr_t a;
375 m_link_s *h = mp->h;
377 if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
378 return NULL;
380 while (size > s) {
381 s <<= 1;
382 ++i;
385 j = i;
386 while (!h[j].next) {
387 if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
388 h[j].next = (m_link_s *) M_GETP();
389 if (h[j].next)
390 h[j].next->next = NULL;
391 break;
393 ++j;
394 s <<= 1;
396 a = (m_addr_t) h[j].next;
397 if (a) {
398 h[j].next = h[j].next->next;
399 while (j > i) {
400 j -= 1;
401 s >>= 1;
402 h[j].next = (m_link_s *) (a+s);
403 h[j].next->next = NULL;
406 #ifdef DEBUG
407 printk("___m_alloc(%d) = %p\n", size, (void *) a);
408 #endif
409 return (void *) a;
412 static void ___m_free(m_pool_s *mp, void *ptr, int size)
414 int i = 0;
415 int s = (1 << MEMO_SHIFT);
416 m_link_s *q;
417 m_addr_t a, b;
418 m_link_s *h = mp->h;
420 #ifdef DEBUG
421 printk("___m_free(%p, %d)\n", ptr, size);
422 #endif
424 if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
425 return;
427 while (size > s) {
428 s <<= 1;
429 ++i;
432 a = (m_addr_t) ptr;
434 while (1) {
435 #ifdef MEMO_FREE_UNUSED
436 if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
437 M_FREEP(a);
438 break;
440 #endif
441 b = a ^ s;
442 q = &h[i];
443 while (q->next && q->next != (m_link_s *) b) {
444 q = q->next;
446 if (!q->next) {
447 ((m_link_s *) a)->next = h[i].next;
448 h[i].next = (m_link_s *) a;
449 break;
451 q->next = q->next->next;
452 a = a & b;
453 s <<= 1;
454 ++i;
458 static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags)
460 void *p;
462 p = ___m_alloc(mp, size);
464 if (DEBUG_FLAGS & DEBUG_ALLOC)
465 printk ("new %-10s[%4d] @%p.\n", name, size, p);
467 if (p)
468 bzero(p, size);
469 else if (uflags & MEMO_WARN)
470 printk (NAME53C8XX ": failed to allocate %s[%d]\n", name, size);
472 return p;
475 #define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN)
477 static void __m_free(m_pool_s *mp, void *ptr, int size, char *name)
479 if (DEBUG_FLAGS & DEBUG_ALLOC)
480 printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
482 ___m_free(mp, ptr, size);
487 * With pci bus iommu support, we use a default pool of unmapped memory
488 * for memory we donnot need to DMA from/to and one pool per pcidev for
489 * memory accessed by the PCI chip. `mp0' is the default not DMAable pool.
492 static m_addr_t ___mp0_getp(m_pool_s *mp)
494 m_addr_t m = GetPages();
495 if (m)
496 ++mp->nump;
497 return m;
500 static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
502 FreePages(m);
503 --mp->nump;
506 static m_pool_s mp0 = {NULL, ___mp0_getp, ___mp0_freep};
509 * DMAable pools.
513 * With pci bus iommu support, we maintain one pool per pcidev and a
514 * hashed reverse table for virtual to bus physical address translations.
516 static m_addr_t ___dma_getp(m_pool_s *mp)
518 m_addr_t vp;
519 m_vtob_s *vbp;
521 vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB");
522 if (vbp) {
523 dma_addr_t daddr;
524 vp = (m_addr_t) dma_alloc_coherent(mp->bush,
525 PAGE_SIZE<<MEMO_PAGE_ORDER,
526 &daddr, GFP_ATOMIC);
527 if (vp) {
528 int hc = VTOB_HASH_CODE(vp);
529 vbp->vaddr = vp;
530 vbp->baddr = daddr;
531 vbp->next = mp->vtob[hc];
532 mp->vtob[hc] = vbp;
533 ++mp->nump;
534 return vp;
537 if (vbp)
538 __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
539 return 0;
542 static void ___dma_freep(m_pool_s *mp, m_addr_t m)
544 m_vtob_s **vbpp, *vbp;
545 int hc = VTOB_HASH_CODE(m);
547 vbpp = &mp->vtob[hc];
548 while (*vbpp && (*vbpp)->vaddr != m)
549 vbpp = &(*vbpp)->next;
550 if (*vbpp) {
551 vbp = *vbpp;
552 *vbpp = (*vbpp)->next;
553 dma_free_coherent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER,
554 (void *)vbp->vaddr, (dma_addr_t)vbp->baddr);
555 __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
556 --mp->nump;
560 static inline m_pool_s *___get_dma_pool(m_bush_t bush)
562 m_pool_s *mp;
563 for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next);
564 return mp;
567 static m_pool_s *___cre_dma_pool(m_bush_t bush)
569 m_pool_s *mp;
570 mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL");
571 if (mp) {
572 bzero(mp, sizeof(*mp));
573 mp->bush = bush;
574 mp->getp = ___dma_getp;
575 mp->freep = ___dma_freep;
576 mp->next = mp0.next;
577 mp0.next = mp;
579 return mp;
582 static void ___del_dma_pool(m_pool_s *p)
584 struct m_pool **pp = &mp0.next;
586 while (*pp && *pp != p)
587 pp = &(*pp)->next;
588 if (*pp) {
589 *pp = (*pp)->next;
590 __m_free(&mp0, p, sizeof(*p), "MPOOL");
594 static void *__m_calloc_dma(m_bush_t bush, int size, char *name)
596 u_long flags;
597 struct m_pool *mp;
598 void *m = NULL;
600 NCR_LOCK_DRIVER(flags);
601 mp = ___get_dma_pool(bush);
602 if (!mp)
603 mp = ___cre_dma_pool(bush);
604 if (mp)
605 m = __m_calloc(mp, size, name);
606 if (mp && !mp->nump)
607 ___del_dma_pool(mp);
608 NCR_UNLOCK_DRIVER(flags);
610 return m;
613 static void __m_free_dma(m_bush_t bush, void *m, int size, char *name)
615 u_long flags;
616 struct m_pool *mp;
618 NCR_LOCK_DRIVER(flags);
619 mp = ___get_dma_pool(bush);
620 if (mp)
621 __m_free(mp, m, size, name);
622 if (mp && !mp->nump)
623 ___del_dma_pool(mp);
624 NCR_UNLOCK_DRIVER(flags);
627 static m_addr_t __vtobus(m_bush_t bush, void *m)
629 u_long flags;
630 m_pool_s *mp;
631 int hc = VTOB_HASH_CODE(m);
632 m_vtob_s *vp = NULL;
633 m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
635 NCR_LOCK_DRIVER(flags);
636 mp = ___get_dma_pool(bush);
637 if (mp) {
638 vp = mp->vtob[hc];
639 while (vp && (m_addr_t) vp->vaddr != a)
640 vp = vp->next;
642 NCR_UNLOCK_DRIVER(flags);
643 return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
646 #define _m_calloc_dma(np, s, n) __m_calloc_dma(np->dev, s, n)
647 #define _m_free_dma(np, p, s, n) __m_free_dma(np->dev, p, s, n)
648 #define m_calloc_dma(s, n) _m_calloc_dma(np, s, n)
649 #define m_free_dma(p, s, n) _m_free_dma(np, p, s, n)
650 #define _vtobus(np, p) __vtobus(np->dev, p)
651 #define vtobus(p) _vtobus(np, p)
654 * Deal with DMA mapping/unmapping.
657 /* To keep track of the dma mapping (sg/single) that has been set */
658 #define __data_mapped SCp.phase
659 #define __data_mapping SCp.have_data_in
661 static void __unmap_scsi_data(struct device *dev, Scsi_Cmnd *cmd)
663 enum dma_data_direction dma_dir =
664 (enum dma_data_direction)scsi_to_pci_dma_dir(cmd->sc_data_direction);
666 switch(cmd->__data_mapped) {
667 case 2:
668 dma_unmap_sg(dev, cmd->buffer, cmd->use_sg, dma_dir);
669 break;
670 case 1:
671 dma_unmap_single(dev, cmd->__data_mapping,
672 cmd->request_bufflen, dma_dir);
673 break;
675 cmd->__data_mapped = 0;
678 static u_long __map_scsi_single_data(struct device *dev, Scsi_Cmnd *cmd)
680 dma_addr_t mapping;
681 enum dma_data_direction dma_dir =
682 (enum dma_data_direction)scsi_to_pci_dma_dir(cmd->sc_data_direction);
685 if (cmd->request_bufflen == 0)
686 return 0;
688 mapping = dma_map_single(dev, cmd->request_buffer,
689 cmd->request_bufflen, dma_dir);
690 cmd->__data_mapped = 1;
691 cmd->__data_mapping = mapping;
693 return mapping;
696 static int __map_scsi_sg_data(struct device *dev, Scsi_Cmnd *cmd)
698 int use_sg;
699 enum dma_data_direction dma_dir =
700 (enum dma_data_direction)scsi_to_pci_dma_dir(cmd->sc_data_direction);
702 if (cmd->use_sg == 0)
703 return 0;
705 use_sg = dma_map_sg(dev, cmd->buffer, cmd->use_sg, dma_dir);
706 cmd->__data_mapped = 2;
707 cmd->__data_mapping = use_sg;
709 return use_sg;
712 static void __sync_scsi_data_for_cpu(struct device *dev, Scsi_Cmnd *cmd)
714 enum dma_data_direction dma_dir =
715 (enum dma_data_direction)scsi_to_pci_dma_dir(cmd->sc_data_direction);
717 switch(cmd->__data_mapped) {
718 case 2:
719 dma_sync_sg_for_cpu(dev, cmd->buffer, cmd->use_sg, dma_dir);
720 break;
721 case 1:
722 dma_sync_single_for_cpu(dev, cmd->__data_mapping,
723 cmd->request_bufflen, dma_dir);
724 break;
728 static void __sync_scsi_data_for_device(struct device *dev, Scsi_Cmnd *cmd)
730 enum dma_data_direction dma_dir =
731 (enum dma_data_direction)scsi_to_pci_dma_dir(cmd->sc_data_direction);
733 switch(cmd->__data_mapped) {
734 case 2:
735 dma_sync_sg_for_device(dev, cmd->buffer, cmd->use_sg, dma_dir);
736 break;
737 case 1:
738 dma_sync_single_for_device(dev, cmd->__data_mapping,
739 cmd->request_bufflen, dma_dir);
740 break;
744 #define scsi_sg_dma_address(sc) sg_dma_address(sc)
745 #define scsi_sg_dma_len(sc) sg_dma_len(sc)
747 #define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->dev, cmd)
748 #define map_scsi_single_data(np, cmd) __map_scsi_single_data(np->dev, cmd)
749 #define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->dev, cmd)
750 #define sync_scsi_data_for_cpu(np, cmd) __sync_scsi_data_for_cpu(np->dev, cmd)
751 #define sync_scsi_data_for_device(np, cmd) __sync_scsi_data_for_device(np->dev, cmd)
753 #define scsi_data_direction(cmd) (cmd->sc_data_direction)
755 /*==========================================================
757 ** Driver setup.
759 ** This structure is initialized from linux config
760 ** options. It can be overridden at boot-up by the boot
761 ** command line.
763 **==========================================================
765 static struct ncr_driver_setup
766 driver_setup = SCSI_NCR_DRIVER_SETUP;
768 #ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
769 static struct ncr_driver_setup
770 driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
771 #endif
773 #define initverbose (driver_setup.verbose)
774 #define bootverbose (np->verbose)
777 /*===================================================================
779 ** Utility routines that protperly return data through /proc FS.
781 **===================================================================
783 #ifdef SCSI_NCR_USER_INFO_SUPPORT
785 struct info_str
787 char *buffer;
788 int length;
789 int offset;
790 int pos;
793 static void copy_mem_info(struct info_str *info, char *data, int len)
795 if (info->pos + len > info->length)
796 len = info->length - info->pos;
798 if (info->pos + len < info->offset) {
799 info->pos += len;
800 return;
802 if (info->pos < info->offset) {
803 data += (info->offset - info->pos);
804 len -= (info->offset - info->pos);
807 if (len > 0) {
808 memcpy(info->buffer + info->pos, data, len);
809 info->pos += len;
813 static int copy_info(struct info_str *info, char *fmt, ...)
815 va_list args;
816 char buf[81];
817 int len;
819 va_start(args, fmt);
820 len = vsprintf(buf, fmt, args);
821 va_end(args);
823 copy_mem_info(info, buf, len);
824 return len;
827 #endif
829 /*===================================================================
831 ** Driver setup from the boot command line
833 **===================================================================
836 #ifdef MODULE
837 #define ARG_SEP ' '
838 #else
839 #define ARG_SEP ','
840 #endif
842 #define OPT_TAGS 1
843 #define OPT_MASTER_PARITY 2
844 #define OPT_SCSI_PARITY 3
845 #define OPT_DISCONNECTION 4
846 #define OPT_SPECIAL_FEATURES 5
847 #define OPT_UNUSED_1 6
848 #define OPT_FORCE_SYNC_NEGO 7
849 #define OPT_REVERSE_PROBE 8
850 #define OPT_DEFAULT_SYNC 9
851 #define OPT_VERBOSE 10
852 #define OPT_DEBUG 11
853 #define OPT_BURST_MAX 12
854 #define OPT_LED_PIN 13
855 #define OPT_MAX_WIDE 14
856 #define OPT_SETTLE_DELAY 15
857 #define OPT_DIFF_SUPPORT 16
858 #define OPT_IRQM 17
859 #define OPT_PCI_FIX_UP 18
860 #define OPT_BUS_CHECK 19
861 #define OPT_OPTIMIZE 20
862 #define OPT_RECOVERY 21
863 #define OPT_SAFE_SETUP 22
864 #define OPT_USE_NVRAM 23
865 #define OPT_EXCLUDE 24
866 #define OPT_HOST_ID 25
868 #ifdef SCSI_NCR_IARB_SUPPORT
869 #define OPT_IARB 26
870 #endif
872 static char setup_token[] __initdata =
873 "tags:" "mpar:"
874 "spar:" "disc:"
875 "specf:" "ultra:"
876 "fsn:" "revprob:"
877 "sync:" "verb:"
878 "debug:" "burst:"
879 "led:" "wide:"
880 "settle:" "diff:"
881 "irqm:" "pcifix:"
882 "buschk:" "optim:"
883 "recovery:"
884 "safe:" "nvram:"
885 "excl:" "hostid:"
886 #ifdef SCSI_NCR_IARB_SUPPORT
887 "iarb:"
888 #endif
889 ; /* DONNOT REMOVE THIS ';' */
891 #ifdef MODULE
892 #define ARG_SEP ' '
893 #else
894 #define ARG_SEP ','
895 #endif
897 static int __init get_setup_token(char *p)
899 char *cur = setup_token;
900 char *pc;
901 int i = 0;
903 while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
904 ++pc;
905 ++i;
906 if (!strncmp(p, cur, pc - cur))
907 return i;
908 cur = pc;
910 return 0;
914 static int __init sym53c8xx__setup(char *str)
916 #ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
917 char *cur = str;
918 char *pc, *pv;
919 int i, val, c;
920 int xi = 0;
922 while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
923 char *pe;
925 val = 0;
926 pv = pc;
927 c = *++pv;
929 if (c == 'n')
930 val = 0;
931 else if (c == 'y')
932 val = 1;
933 else
934 val = (int) simple_strtoul(pv, &pe, 0);
936 switch (get_setup_token(cur)) {
937 case OPT_TAGS:
938 driver_setup.default_tags = val;
939 if (pe && *pe == '/') {
940 i = 0;
941 while (*pe && *pe != ARG_SEP &&
942 i < sizeof(driver_setup.tag_ctrl)-1) {
943 driver_setup.tag_ctrl[i++] = *pe++;
945 driver_setup.tag_ctrl[i] = '\0';
947 break;
948 case OPT_MASTER_PARITY:
949 driver_setup.master_parity = val;
950 break;
951 case OPT_SCSI_PARITY:
952 driver_setup.scsi_parity = val;
953 break;
954 case OPT_DISCONNECTION:
955 driver_setup.disconnection = val;
956 break;
957 case OPT_SPECIAL_FEATURES:
958 driver_setup.special_features = val;
959 break;
960 case OPT_FORCE_SYNC_NEGO:
961 driver_setup.force_sync_nego = val;
962 break;
963 case OPT_REVERSE_PROBE:
964 driver_setup.reverse_probe = val;
965 break;
966 case OPT_DEFAULT_SYNC:
967 driver_setup.default_sync = val;
968 break;
969 case OPT_VERBOSE:
970 driver_setup.verbose = val;
971 break;
972 case OPT_DEBUG:
973 driver_setup.debug = val;
974 break;
975 case OPT_BURST_MAX:
976 driver_setup.burst_max = val;
977 break;
978 case OPT_LED_PIN:
979 driver_setup.led_pin = val;
980 break;
981 case OPT_MAX_WIDE:
982 driver_setup.max_wide = val? 1:0;
983 break;
984 case OPT_SETTLE_DELAY:
985 driver_setup.settle_delay = val;
986 break;
987 case OPT_DIFF_SUPPORT:
988 driver_setup.diff_support = val;
989 break;
990 case OPT_IRQM:
991 driver_setup.irqm = val;
992 break;
993 case OPT_PCI_FIX_UP:
994 driver_setup.pci_fix_up = val;
995 break;
996 case OPT_BUS_CHECK:
997 driver_setup.bus_check = val;
998 break;
999 case OPT_OPTIMIZE:
1000 driver_setup.optimize = val;
1001 break;
1002 case OPT_RECOVERY:
1003 driver_setup.recovery = val;
1004 break;
1005 case OPT_USE_NVRAM:
1006 driver_setup.use_nvram = val;
1007 break;
1008 case OPT_SAFE_SETUP:
1009 memcpy(&driver_setup, &driver_safe_setup,
1010 sizeof(driver_setup));
1011 break;
1012 case OPT_EXCLUDE:
1013 if (xi < SCSI_NCR_MAX_EXCLUDES)
1014 driver_setup.excludes[xi++] = val;
1015 break;
1016 case OPT_HOST_ID:
1017 driver_setup.host_id = val;
1018 break;
1019 #ifdef SCSI_NCR_IARB_SUPPORT
1020 case OPT_IARB:
1021 driver_setup.iarb = val;
1022 break;
1023 #endif
1024 default:
1025 printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur);
1026 break;
1029 if ((cur = strchr(cur, ARG_SEP)) != NULL)
1030 ++cur;
1032 #endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
1033 return 1;
1036 /*===================================================================
1038 ** Get device queue depth from boot command line.
1040 **===================================================================
1042 #define DEF_DEPTH (driver_setup.default_tags)
1043 #define ALL_TARGETS -2
1044 #define NO_TARGET -1
1045 #define ALL_LUNS -2
1046 #define NO_LUN -1
1048 static int device_queue_depth(int unit, int target, int lun)
1050 int c, h, t, u, v;
1051 char *p = driver_setup.tag_ctrl;
1052 char *ep;
1054 h = -1;
1055 t = NO_TARGET;
1056 u = NO_LUN;
1057 while ((c = *p++) != 0) {
1058 v = simple_strtoul(p, &ep, 0);
1059 switch(c) {
1060 case '/':
1061 ++h;
1062 t = ALL_TARGETS;
1063 u = ALL_LUNS;
1064 break;
1065 case 't':
1066 if (t != target)
1067 t = (target == v) ? v : NO_TARGET;
1068 u = ALL_LUNS;
1069 break;
1070 case 'u':
1071 if (u != lun)
1072 u = (lun == v) ? v : NO_LUN;
1073 break;
1074 case 'q':
1075 if (h == unit &&
1076 (t == ALL_TARGETS || t == target) &&
1077 (u == ALL_LUNS || u == lun))
1078 return v;
1079 break;
1080 case '-':
1081 t = ALL_TARGETS;
1082 u = ALL_LUNS;
1083 break;
1084 default:
1085 break;
1087 p = ep;
1089 return DEF_DEPTH;