[PATCH] isdn: work around excessive udelay()
[linux-2.6/verdex.git] / include / asm-avr32 / io.h
blob2fc8f111dce9e43ca010ca8c474986c4feb78295
1 #ifndef __ASM_AVR32_IO_H
2 #define __ASM_AVR32_IO_H
4 #include <linux/string.h>
6 #ifdef __KERNEL__
8 #include <asm/addrspace.h>
9 #include <asm/byteorder.h>
11 /* virt_to_phys will only work when address is in P1 or P2 */
12 static __inline__ unsigned long virt_to_phys(volatile void *address)
14 return PHYSADDR(address);
17 static __inline__ void * phys_to_virt(unsigned long address)
19 return (void *)P1SEGADDR(address);
22 #define cached_to_phys(addr) ((unsigned long)PHYSADDR(addr))
23 #define uncached_to_phys(addr) ((unsigned long)PHYSADDR(addr))
24 #define phys_to_cached(addr) ((void *)P1SEGADDR(addr))
25 #define phys_to_uncached(addr) ((void *)P2SEGADDR(addr))
28 * Generic IO read/write. These perform native-endian accesses. Note
29 * that some architectures will want to re-define __raw_{read,write}w.
31 extern void __raw_writesb(unsigned int addr, const void *data, int bytelen);
32 extern void __raw_writesw(unsigned int addr, const void *data, int wordlen);
33 extern void __raw_writesl(unsigned int addr, const void *data, int longlen);
35 extern void __raw_readsb(unsigned int addr, void *data, int bytelen);
36 extern void __raw_readsw(unsigned int addr, void *data, int wordlen);
37 extern void __raw_readsl(unsigned int addr, void *data, int longlen);
39 static inline void writeb(unsigned char b, volatile void __iomem *addr)
41 *(volatile unsigned char __force *)addr = b;
43 static inline void writew(unsigned short b, volatile void __iomem *addr)
45 *(volatile unsigned short __force *)addr = b;
47 static inline void writel(unsigned int b, volatile void __iomem *addr)
49 *(volatile unsigned int __force *)addr = b;
51 #define __raw_writeb writeb
52 #define __raw_writew writew
53 #define __raw_writel writel
55 static inline unsigned char readb(const volatile void __iomem *addr)
57 return *(const volatile unsigned char __force *)addr;
59 static inline unsigned short readw(const volatile void __iomem *addr)
61 return *(const volatile unsigned short __force *)addr;
63 static inline unsigned int readl(const volatile void __iomem *addr)
65 return *(const volatile unsigned int __force *)addr;
67 #define __raw_readb readb
68 #define __raw_readw readw
69 #define __raw_readl readl
71 #define writesb(p, d, l) __raw_writesb((unsigned int)p, d, l)
72 #define writesw(p, d, l) __raw_writesw((unsigned int)p, d, l)
73 #define writesl(p, d, l) __raw_writesl((unsigned int)p, d, l)
75 #define readsb(p, d, l) __raw_readsb((unsigned int)p, d, l)
76 #define readsw(p, d, l) __raw_readsw((unsigned int)p, d, l)
77 #define readsl(p, d, l) __raw_readsl((unsigned int)p, d, l)
80 * These two are only here because ALSA _thinks_ it needs them...
82 static inline void memcpy_fromio(void * to, const volatile void __iomem *from,
83 unsigned long count)
85 char *p = to;
86 while (count) {
87 count--;
88 *p = readb(from);
89 p++;
90 from++;
94 static inline void memcpy_toio(volatile void __iomem *to, const void * from,
95 unsigned long count)
97 const char *p = from;
98 while (count) {
99 count--;
100 writeb(*p, to);
101 p++;
102 to++;
106 static inline void memset_io(volatile void __iomem *addr, unsigned char val,
107 unsigned long count)
109 memset((void __force *)addr, val, count);
113 * Bad read/write accesses...
115 extern void __readwrite_bug(const char *fn);
117 #define IO_SPACE_LIMIT 0xffffffff
119 /* Convert I/O port address to virtual address */
120 #define __io(p) ((void __iomem *)phys_to_uncached(p))
123 * IO port access primitives
124 * -------------------------
126 * The AVR32 doesn't have special IO access instructions; all IO is memory
127 * mapped. Note that these are defined to perform little endian accesses
128 * only. Their primary purpose is to access PCI and ISA peripherals.
130 * Note that for a big endian machine, this implies that the following
131 * big endian mode connectivity is in place.
133 * The machine specific io.h include defines __io to translate an "IO"
134 * address to a memory address.
136 * Note that we prevent GCC re-ordering or caching values in expressions
137 * by introducing sequence points into the in*() definitions. Note that
138 * __raw_* do not guarantee this behaviour.
140 * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
142 #define outb(v, p) __raw_writeb(v, __io(p))
143 #define outw(v, p) __raw_writew(cpu_to_le16(v), __io(p))
144 #define outl(v, p) __raw_writel(cpu_to_le32(v), __io(p))
146 #define inb(p) __raw_readb(__io(p))
147 #define inw(p) le16_to_cpu(__raw_readw(__io(p)))
148 #define inl(p) le32_to_cpu(__raw_readl(__io(p)))
150 static inline void __outsb(unsigned long port, void *addr, unsigned int count)
152 while (count--) {
153 outb(*(u8 *)addr, port);
154 addr++;
158 static inline void __insb(unsigned long port, void *addr, unsigned int count)
160 while (count--) {
161 *(u8 *)addr = inb(port);
162 addr++;
166 static inline void __outsw(unsigned long port, void *addr, unsigned int count)
168 while (count--) {
169 outw(*(u16 *)addr, port);
170 addr += 2;
174 static inline void __insw(unsigned long port, void *addr, unsigned int count)
176 while (count--) {
177 *(u16 *)addr = inw(port);
178 addr += 2;
182 static inline void __outsl(unsigned long port, void *addr, unsigned int count)
184 while (count--) {
185 outl(*(u32 *)addr, port);
186 addr += 4;
190 static inline void __insl(unsigned long port, void *addr, unsigned int count)
192 while (count--) {
193 *(u32 *)addr = inl(port);
194 addr += 4;
198 #define outsb(port, addr, count) __outsb(port, addr, count)
199 #define insb(port, addr, count) __insb(port, addr, count)
200 #define outsw(port, addr, count) __outsw(port, addr, count)
201 #define insw(port, addr, count) __insw(port, addr, count)
202 #define outsl(port, addr, count) __outsl(port, addr, count)
203 #define insl(port, addr, count) __insl(port, addr, count)
205 extern void __iomem *__ioremap(unsigned long offset, size_t size,
206 unsigned long flags);
207 extern void __iounmap(void __iomem *addr);
210 * ioremap - map bus memory into CPU space
211 * @offset bus address of the memory
212 * @size size of the resource to map
214 * ioremap performs a platform specific sequence of operations to make
215 * bus memory CPU accessible via the readb/.../writel functions and
216 * the other mmio helpers. The returned address is not guaranteed to
217 * be usable directly as a virtual address.
219 #define ioremap(offset, size) \
220 __ioremap((offset), (size), 0)
222 #define iounmap(addr) \
223 __iounmap(addr)
225 #define cached(addr) P1SEGADDR(addr)
226 #define uncached(addr) P2SEGADDR(addr)
228 #define virt_to_bus virt_to_phys
229 #define bus_to_virt phys_to_virt
230 #define page_to_bus page_to_phys
231 #define bus_to_page phys_to_page
233 #define dma_cache_wback_inv(_start, _size) \
234 flush_dcache_region(_start, _size)
235 #define dma_cache_inv(_start, _size) \
236 invalidate_dcache_region(_start, _size)
237 #define dma_cache_wback(_start, _size) \
238 clean_dcache_region(_start, _size)
241 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
242 * access
244 #define xlate_dev_mem_ptr(p) __va(p)
247 * Convert a virtual cached pointer to an uncached pointer
249 #define xlate_dev_kmem_ptr(p) p
251 #endif /* __KERNEL__ */
253 #endif /* __ASM_AVR32_IO_H */