1 #ifndef __ASM_SH64_IO_H
2 #define __ASM_SH64_IO_H
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * include/asm-sh64/io.h
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
18 * read{b,w,l}/write{b,w,l} are for PCI,
19 * while in{b,w,l}/out{b,w,l} are for ISA
20 * These may (will) be platform specific function.
22 * In addition, we have
23 * ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O.
24 * which are processor specific. Address should be the result of
28 #include <asm/cache.h>
29 #include <asm/system.h>
32 #define virt_to_bus virt_to_phys
33 #define bus_to_virt phys_to_virt
34 #define page_to_bus page_to_phys
37 * Nothing overly special here.. instead of doing the same thing
38 * over and over again, we just define a set of sh64_in/out functions
39 * with an implicit size. The traditional read{b,w,l}/write{b,w,l}
40 * mess is wrapped to this, as are the SH-specific ctrl_in/out routines.
42 static inline unsigned char sh64_in8(unsigned long addr
)
44 return *(volatile unsigned char *)addr
;
47 static inline unsigned short sh64_in16(unsigned long addr
)
49 return *(volatile unsigned short *)addr
;
52 static inline unsigned long sh64_in32(unsigned long addr
)
54 return *(volatile unsigned long *)addr
;
57 static inline unsigned long long sh64_in64(unsigned long addr
)
59 return *(volatile unsigned long long *)addr
;
62 static inline void sh64_out8(unsigned char b
, unsigned long addr
)
64 *(volatile unsigned char *)addr
= b
;
68 static inline void sh64_out16(unsigned short b
, unsigned long addr
)
70 *(volatile unsigned short *)addr
= b
;
74 static inline void sh64_out32(unsigned long b
, unsigned long addr
)
76 *(volatile unsigned long *)addr
= b
;
80 static inline void sh64_out64(unsigned long long b
, unsigned long addr
)
82 *(volatile unsigned long long *)addr
= b
;
86 #define readb(addr) sh64_in8(addr)
87 #define readw(addr) sh64_in16(addr)
88 #define readl(addr) sh64_in32(addr)
90 #define writeb(b, addr) sh64_out8(b, addr)
91 #define writew(b, addr) sh64_out16(b, addr)
92 #define writel(b, addr) sh64_out32(b, addr)
94 #define ctrl_inb(addr) sh64_in8(addr)
95 #define ctrl_inw(addr) sh64_in16(addr)
96 #define ctrl_inl(addr) sh64_in32(addr)
98 #define ctrl_outb(b, addr) sh64_out8(b, addr)
99 #define ctrl_outw(b, addr) sh64_out16(b, addr)
100 #define ctrl_outl(b, addr) sh64_out32(b, addr)
102 unsigned long inb(unsigned long port
);
103 unsigned long inw(unsigned long port
);
104 unsigned long inl(unsigned long port
);
105 void outb(unsigned long value
, unsigned long port
);
106 void outw(unsigned long value
, unsigned long port
);
107 void outl(unsigned long value
, unsigned long port
);
111 #ifdef CONFIG_SH_CAYMAN
112 extern unsigned long smsc_superio_virt
;
115 extern unsigned long pciio_virt
;
118 #define IO_SPACE_LIMIT 0xffffffff
121 * Change virtual addresses to physical addresses and vv.
122 * These are trivial on the 1:1 Linux/SuperH mapping
124 extern __inline__
unsigned long virt_to_phys(volatile void * address
)
126 return __pa(address
);
129 extern __inline__
void * phys_to_virt(unsigned long address
)
131 return __va(address
);
134 extern void * __ioremap(unsigned long phys_addr
, unsigned long size
,
135 unsigned long flags
);
137 extern __inline__
void * ioremap(unsigned long phys_addr
, unsigned long size
)
139 return __ioremap(phys_addr
, size
, 1);
142 extern __inline__
void * ioremap_nocache (unsigned long phys_addr
, unsigned long size
)
144 return __ioremap(phys_addr
, size
, 0);
147 extern void iounmap(void *addr
);
149 unsigned long onchip_remap(unsigned long addr
, unsigned long size
, const char* name
);
150 extern void onchip_unmap(unsigned long vaddr
);
152 static __inline__
int check_signature(unsigned long io_addr
,
153 const unsigned char *signature
, int length
)
157 if (readb(io_addr
) != *signature
)
169 * The caches on some architectures aren't dma-coherent and have need to
170 * handle this in software. There are three types of operations that
171 * can be applied to dma buffers.
173 * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
174 * writing the content of the caches back to memory, if necessary.
175 * The function also invalidates the affected part of the caches as
176 * necessary before DMA transfers from outside to memory.
177 * - dma_cache_inv(start, size) invalidates the affected parts of the
178 * caches. Dirty lines of the caches may be written back or simply
179 * be discarded. This operation is necessary before dma operations
181 * - dma_cache_wback(start, size) writes back any dirty lines but does
182 * not invalidate the cache. This can be used before DMA reads from
186 static __inline__
void dma_cache_wback_inv (unsigned long start
, unsigned long size
)
188 unsigned long s
= start
& L1_CACHE_ALIGN_MASK
;
189 unsigned long e
= (start
+ size
) & L1_CACHE_ALIGN_MASK
;
191 for (; s
<= e
; s
+= L1_CACHE_BYTES
)
192 asm volatile ("ocbp %0, 0" : : "r" (s
));
195 static __inline__
void dma_cache_inv (unsigned long start
, unsigned long size
)
197 // Note that caller has to be careful with overzealous
198 // invalidation should there be partial cache lines at the extremities
199 // of the specified range
200 unsigned long s
= start
& L1_CACHE_ALIGN_MASK
;
201 unsigned long e
= (start
+ size
) & L1_CACHE_ALIGN_MASK
;
203 for (; s
<= e
; s
+= L1_CACHE_BYTES
)
204 asm volatile ("ocbi %0, 0" : : "r" (s
));
207 static __inline__
void dma_cache_wback (unsigned long start
, unsigned long size
)
209 unsigned long s
= start
& L1_CACHE_ALIGN_MASK
;
210 unsigned long e
= (start
+ size
) & L1_CACHE_ALIGN_MASK
;
212 for (; s
<= e
; s
+= L1_CACHE_BYTES
)
213 asm volatile ("ocbwb %0, 0" : : "r" (s
));
216 #endif /* __KERNEL__ */
217 #endif /* __ASM_SH64_IO_H */