1 /* $Id: io.h,v 1.24 1999/09/06 01:17:54 davem Exp $ */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
8 #include <asm/page.h> /* IO address mapping routines need this */
9 #include <asm/system.h>
13 #define __SLOW_DOWN_IO do { } while (0)
14 #define SLOW_DOWN_IO do { } while (0)
17 #define PCI_DVMA_HASHSZ 256
19 extern unsigned long pci_dvma_v2p_hash
[PCI_DVMA_HASHSZ
];
20 extern unsigned long pci_dvma_p2v_hash
[PCI_DVMA_HASHSZ
];
22 #define pci_dvma_ahashfn(addr) (((addr) >> 24) & 0xff)
24 extern __inline__
unsigned long virt_to_phys(volatile void *addr
)
26 unsigned long vaddr
= (unsigned long)addr
;
29 /* Handle kernel variable pointers... */
30 if (vaddr
< PAGE_OFFSET
)
31 vaddr
+= PAGE_OFFSET
- (unsigned long)&empty_zero_page
;
33 off
= pci_dvma_v2p_hash
[pci_dvma_ahashfn(vaddr
- PAGE_OFFSET
)];
37 extern __inline__
void *phys_to_virt(unsigned long addr
)
39 unsigned long paddr
= addr
& 0xffffffffUL
;
42 off
= pci_dvma_p2v_hash
[pci_dvma_ahashfn(paddr
)];
43 return (void *)(paddr
+ off
);
46 #define virt_to_bus virt_to_phys
47 #define bus_to_virt phys_to_virt
49 /* Different PCI controllers we support have their PCI MEM space
50 * mapped to an either 2GB (Psycho) or 4GB (Sabre) aligned area,
51 * so need to chop off the top 33 or 32 bits.
53 extern unsigned long pci_memspace_mask
;
55 #define bus_dvma_to_mem(__vaddr) ((__vaddr) & pci_memspace_mask)
57 extern __inline__
unsigned int inb(unsigned long addr
)
61 __asm__
__volatile__("lduba [%1] %2, %0"
63 : "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
));
68 extern __inline__
unsigned int inw(unsigned long addr
)
72 __asm__
__volatile__("lduha [%1] %2, %0"
74 : "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
));
79 extern __inline__
unsigned int inl(unsigned long addr
)
83 __asm__
__volatile__("lduwa [%1] %2, %0"
85 : "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
));
90 extern __inline__
void outb(unsigned char b
, unsigned long addr
)
92 __asm__
__volatile__("stba %0, [%1] %2"
94 : "r" (b
), "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
));
97 extern __inline__
void outw(unsigned short w
, unsigned long addr
)
99 __asm__
__volatile__("stha %0, [%1] %2"
101 : "r" (w
), "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
));
104 extern __inline__
void outl(unsigned int l
, unsigned long addr
)
106 __asm__
__volatile__("stwa %0, [%1] %2"
108 : "r" (l
), "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
));
114 extern void outsb(unsigned long addr
, const void *src
, unsigned long count
);
115 extern void outsw(unsigned long addr
, const void *src
, unsigned long count
);
116 extern void outsl(unsigned long addr
, const void *src
, unsigned long count
);
117 extern void insb(unsigned long addr
, void *dst
, unsigned long count
);
118 extern void insw(unsigned long addr
, void *dst
, unsigned long count
);
119 extern void insl(unsigned long addr
, void *dst
, unsigned long count
);
121 /* Memory functions, same as I/O accesses on Ultra. */
122 extern __inline__
unsigned int _readb(unsigned long addr
)
126 __asm__
__volatile__("lduba [%1] %2, %0"
128 : "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
));
133 extern __inline__
unsigned int _readw(unsigned long addr
)
137 __asm__
__volatile__("lduha [%1] %2, %0"
139 : "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
));
144 extern __inline__
unsigned int _readl(unsigned long addr
)
148 __asm__
__volatile__("lduwa [%1] %2, %0"
150 : "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
));
155 extern __inline__
void _writeb(unsigned char b
, unsigned long addr
)
157 __asm__
__volatile__("stba %0, [%1] %2"
159 : "r" (b
), "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
));
162 extern __inline__
void _writew(unsigned short w
, unsigned long addr
)
164 __asm__
__volatile__("stha %0, [%1] %2"
166 : "r" (w
), "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
));
169 extern __inline__
void _writel(unsigned int l
, unsigned long addr
)
171 __asm__
__volatile__("stwa %0, [%1] %2"
173 : "r" (l
), "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
));
176 #define readb(__addr) (_readb((unsigned long)(__addr)))
177 #define readw(__addr) (_readw((unsigned long)(__addr)))
178 #define readl(__addr) (_readl((unsigned long)(__addr)))
179 #define writeb(__b, __addr) (_writeb((__b), (unsigned long)(__addr)))
180 #define writew(__w, __addr) (_writew((__w), (unsigned long)(__addr)))
181 #define writel(__l, __addr) (_writel((__l), (unsigned long)(__addr)))
184 * Memcpy to/from I/O space is just a regular memory operation on
189 * FIXME: Write faster routines using ASL_*L for this.
192 memset_io(void *dst
, int c
, __kernel_size_t n
)
203 memcpy_fromio(void *dst
, const void *src
, __kernel_size_t n
)
215 memcpy_toio(void *dst
, const void *src
, __kernel_size_t n
)
226 #if 0 /* XXX Not exactly, we need to use ASI_*L from/to the I/O end,
227 * XXX so these are disabled until we code that stuff.
229 #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),((char *)(b)),(c),(d))
232 static inline int check_signature(unsigned long io_addr
,
233 const unsigned char *signature
,
238 if (readb(io_addr
++) != *signature
++)
246 /* On sparc64 we have the whole physical IO address space accessible
247 * using physically addressed loads and stores, so this does nothing.
249 #define ioremap(__offset, __size) ((void *)(__offset))
250 #define iounmap(__addr) do { } while(0)
252 extern void sparc_ultra_mapioaddr(unsigned long physaddr
,
253 unsigned long virt_addr
,
254 int bus
, int rdonly
);
255 extern void sparc_ultra_unmapioaddr(unsigned long virt_addr
);
257 extern __inline__
void mapioaddr(unsigned long physaddr
,
258 unsigned long virt_addr
,
261 sparc_ultra_mapioaddr(physaddr
, virt_addr
, bus
, rdonly
);
264 extern __inline__
void unmapioaddr(unsigned long virt_addr
)
266 sparc_ultra_unmapioaddr(virt_addr
);
269 extern void *sparc_alloc_io(u32 pa
, void *va
, int sz
, char *name
,
271 extern void sparc_free_io (void *va
, int sz
);
272 extern void *sparc_dvma_malloc (int sz
, char *name
, __u32
*dvma_addr
);
276 #define dma_cache_inv(_start,_size) do { } while (0)
277 #define dma_cache_wback(_start,_size) do { } while (0)
278 #define dma_cache_wback_inv(_start,_size) do { } while (0)
280 #endif /* !(__SPARC64_IO_H) */