Import 2.3.18pre1
[davej-history.git] / include / asm-sparc64 / io.h
blob254babf91f99d320577c8c3f301a36a7c527a761
1 /* $Id: io.h,v 1.24 1999/09/06 01:17:54 davem Exp $ */
2 #ifndef __SPARC64_IO_H
3 #define __SPARC64_IO_H
5 #include <linux/kernel.h>
6 #include <linux/types.h>
8 #include <asm/page.h> /* IO address mapping routines need this */
9 #include <asm/system.h>
10 #include <asm/asi.h>
12 /* PC crapola... */
13 #define __SLOW_DOWN_IO do { } while (0)
14 #define SLOW_DOWN_IO do { } while (0)
17 #define PCI_DVMA_HASHSZ 256
19 extern unsigned long pci_dvma_v2p_hash[PCI_DVMA_HASHSZ];
20 extern unsigned long pci_dvma_p2v_hash[PCI_DVMA_HASHSZ];
22 #define pci_dvma_ahashfn(addr) (((addr) >> 24) & 0xff)
24 extern __inline__ unsigned long virt_to_phys(volatile void *addr)
26 unsigned long vaddr = (unsigned long)addr;
27 unsigned long off;
29 /* Handle kernel variable pointers... */
30 if (vaddr < PAGE_OFFSET)
31 vaddr += PAGE_OFFSET - (unsigned long)&empty_zero_page;
33 off = pci_dvma_v2p_hash[pci_dvma_ahashfn(vaddr - PAGE_OFFSET)];
34 return vaddr + off;
37 extern __inline__ void *phys_to_virt(unsigned long addr)
39 unsigned long paddr = addr & 0xffffffffUL;
40 unsigned long off;
42 off = pci_dvma_p2v_hash[pci_dvma_ahashfn(paddr)];
43 return (void *)(paddr + off);
46 #define virt_to_bus virt_to_phys
47 #define bus_to_virt phys_to_virt
49 /* Different PCI controllers we support have their PCI MEM space
50 * mapped to an either 2GB (Psycho) or 4GB (Sabre) aligned area,
51 * so need to chop off the top 33 or 32 bits.
53 extern unsigned long pci_memspace_mask;
55 #define bus_dvma_to_mem(__vaddr) ((__vaddr) & pci_memspace_mask)
57 extern __inline__ unsigned int inb(unsigned long addr)
59 unsigned int ret;
61 __asm__ __volatile__("lduba [%1] %2, %0"
62 : "=r" (ret)
63 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
65 return ret;
68 extern __inline__ unsigned int inw(unsigned long addr)
70 unsigned int ret;
72 __asm__ __volatile__("lduha [%1] %2, %0"
73 : "=r" (ret)
74 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
76 return ret;
79 extern __inline__ unsigned int inl(unsigned long addr)
81 unsigned int ret;
83 __asm__ __volatile__("lduwa [%1] %2, %0"
84 : "=r" (ret)
85 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
87 return ret;
90 extern __inline__ void outb(unsigned char b, unsigned long addr)
92 __asm__ __volatile__("stba %0, [%1] %2"
93 : /* no outputs */
94 : "r" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
97 extern __inline__ void outw(unsigned short w, unsigned long addr)
99 __asm__ __volatile__("stha %0, [%1] %2"
100 : /* no outputs */
101 : "r" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
104 extern __inline__ void outl(unsigned int l, unsigned long addr)
106 __asm__ __volatile__("stwa %0, [%1] %2"
107 : /* no outputs */
108 : "r" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
111 #define inb_p inb
112 #define outb_p outb
114 extern void outsb(unsigned long addr, const void *src, unsigned long count);
115 extern void outsw(unsigned long addr, const void *src, unsigned long count);
116 extern void outsl(unsigned long addr, const void *src, unsigned long count);
117 extern void insb(unsigned long addr, void *dst, unsigned long count);
118 extern void insw(unsigned long addr, void *dst, unsigned long count);
119 extern void insl(unsigned long addr, void *dst, unsigned long count);
121 /* Memory functions, same as I/O accesses on Ultra. */
122 extern __inline__ unsigned int _readb(unsigned long addr)
124 unsigned int ret;
126 __asm__ __volatile__("lduba [%1] %2, %0"
127 : "=r" (ret)
128 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
130 return ret;
133 extern __inline__ unsigned int _readw(unsigned long addr)
135 unsigned int ret;
137 __asm__ __volatile__("lduha [%1] %2, %0"
138 : "=r" (ret)
139 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
141 return ret;
144 extern __inline__ unsigned int _readl(unsigned long addr)
146 unsigned int ret;
148 __asm__ __volatile__("lduwa [%1] %2, %0"
149 : "=r" (ret)
150 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
152 return ret;
155 extern __inline__ void _writeb(unsigned char b, unsigned long addr)
157 __asm__ __volatile__("stba %0, [%1] %2"
158 : /* no outputs */
159 : "r" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
162 extern __inline__ void _writew(unsigned short w, unsigned long addr)
164 __asm__ __volatile__("stha %0, [%1] %2"
165 : /* no outputs */
166 : "r" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
169 extern __inline__ void _writel(unsigned int l, unsigned long addr)
171 __asm__ __volatile__("stwa %0, [%1] %2"
172 : /* no outputs */
173 : "r" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
176 #define readb(__addr) (_readb((unsigned long)(__addr)))
177 #define readw(__addr) (_readw((unsigned long)(__addr)))
178 #define readl(__addr) (_readl((unsigned long)(__addr)))
179 #define writeb(__b, __addr) (_writeb((__b), (unsigned long)(__addr)))
180 #define writew(__w, __addr) (_writew((__w), (unsigned long)(__addr)))
181 #define writel(__l, __addr) (_writel((__l), (unsigned long)(__addr)))
184 * Memcpy to/from I/O space is just a regular memory operation on
185 * Ultra as well.
189 * FIXME: Write faster routines using ASL_*L for this.
191 static inline void *
192 memset_io(void *dst, int c, __kernel_size_t n)
194 char *d = dst;
196 while (n--)
197 *d++ = c;
199 return dst;
202 static inline void *
203 memcpy_fromio(void *dst, const void *src, __kernel_size_t n)
205 const char *s = src;
206 char *d = dst;
208 while (n--)
209 *d++ = *s++;
211 return dst;
214 static inline void *
215 memcpy_toio(void *dst, const void *src, __kernel_size_t n)
217 const char *s = src;
218 char *d = dst;
220 while (n--)
221 *d++ = *s++;
223 return dst;
226 #if 0 /* XXX Not exactly, we need to use ASI_*L from/to the I/O end,
227 * XXX so these are disabled until we code that stuff.
229 #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),((char *)(b)),(c),(d))
230 #endif
232 static inline int check_signature(unsigned long io_addr,
233 const unsigned char *signature,
234 int length)
236 int retval = 0;
237 do {
238 if (readb(io_addr++) != *signature++)
239 goto out;
240 } while (--length);
241 retval = 1;
242 out:
243 return retval;
246 /* On sparc64 we have the whole physical IO address space accessible
247 * using physically addressed loads and stores, so this does nothing.
249 #define ioremap(__offset, __size) ((void *)(__offset))
250 #define iounmap(__addr) do { } while(0)
252 extern void sparc_ultra_mapioaddr(unsigned long physaddr,
253 unsigned long virt_addr,
254 int bus, int rdonly);
255 extern void sparc_ultra_unmapioaddr(unsigned long virt_addr);
257 extern __inline__ void mapioaddr(unsigned long physaddr,
258 unsigned long virt_addr,
259 int bus, int rdonly)
261 sparc_ultra_mapioaddr(physaddr, virt_addr, bus, rdonly);
264 extern __inline__ void unmapioaddr(unsigned long virt_addr)
266 sparc_ultra_unmapioaddr(virt_addr);
269 extern void *sparc_alloc_io(u32 pa, void *va, int sz, char *name,
270 u32 io, int rdonly);
271 extern void sparc_free_io (void *va, int sz);
272 extern void *sparc_dvma_malloc (int sz, char *name, __u32 *dvma_addr);
274 /* Nothing to do */
276 #define dma_cache_inv(_start,_size) do { } while (0)
277 #define dma_cache_wback(_start,_size) do { } while (0)
278 #define dma_cache_wback_inv(_start,_size) do { } while (0)
280 #endif /* !(__SPARC64_IO_H) */