rtl8187: Fix lockups due to concurrent access to config routine
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-xtensa / io.h
blob47c3616ea9acc4ec937d3fd9db25d9563f62a6f1
1 /*
2 * include/asm-xtensa/io.h
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
11 #ifndef _XTENSA_IO_H
12 #define _XTENSA_IO_H
14 #ifdef __KERNEL__
15 #include <asm/byteorder.h>
16 #include <asm/page.h>
17 #include <linux/kernel.h>
19 #include <linux/types.h>
21 #define XCHAL_KIO_CACHED_VADDR 0xf0000000
22 #define XCHAL_KIO_BYPASS_VADDR 0xf8000000
23 #define XCHAL_KIO_PADDR 0xf0000000
24 #define XCHAL_KIO_SIZE 0x08000000
27 * swap functions to change byte order from little-endian to big-endian and
28 * vice versa.
31 static inline unsigned short _swapw (unsigned short v)
33 return (v << 8) | (v >> 8);
36 static inline unsigned int _swapl (unsigned int v)
38 return (v << 24) | ((v & 0xff00) << 8) | ((v >> 8) & 0xff00) | (v >> 24);
42 * Change virtual addresses to physical addresses and vv.
43 * These are trivial on the 1:1 Linux/Xtensa mapping
46 static inline unsigned long virt_to_phys(volatile void * address)
48 return __pa(address);
51 static inline void * phys_to_virt(unsigned long address)
53 return __va(address);
57 * virt_to_bus and bus_to_virt are deprecated.
60 #define virt_to_bus(x) virt_to_phys(x)
61 #define bus_to_virt(x) phys_to_virt(x)
64 * Return the virtual (cached) address for the specified bus memory.
65 * Note that we currently don't support any address outside the KIO segment.
68 static inline void *ioremap(unsigned long offset, unsigned long size)
70 if (offset >= XCHAL_KIO_PADDR
71 && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
72 return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
74 else
75 BUG();
78 static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
80 if (offset >= XCHAL_KIO_PADDR
81 && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
82 return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
83 else
84 BUG();
87 static inline void iounmap(void *addr)
92 * Generic I/O
95 #define readb(addr) \
96 ({ unsigned char __v = (*(volatile unsigned char *)(addr)); __v; })
97 #define readw(addr) \
98 ({ unsigned short __v = (*(volatile unsigned short *)(addr)); __v; })
99 #define readl(addr) \
100 ({ unsigned int __v = (*(volatile unsigned int *)(addr)); __v; })
101 #define writeb(b, addr) (void)((*(volatile unsigned char *)(addr)) = (b))
102 #define writew(b, addr) (void)((*(volatile unsigned short *)(addr)) = (b))
103 #define writel(b, addr) (void)((*(volatile unsigned int *)(addr)) = (b))
105 static inline __u8 __raw_readb(const volatile void __iomem *addr)
107 return *(__force volatile __u8 *)(addr);
109 static inline __u16 __raw_readw(const volatile void __iomem *addr)
111 return *(__force volatile __u16 *)(addr);
113 static inline __u32 __raw_readl(const volatile void __iomem *addr)
115 return *(__force volatile __u32 *)(addr);
117 static inline void __raw_writeb(__u8 b, volatile void __iomem *addr)
119 *(__force volatile __u8 *)(addr) = b;
121 static inline void __raw_writew(__u16 b, volatile void __iomem *addr)
123 *(__force volatile __u16 *)(addr) = b;
125 static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
127 *(__force volatile __u32 *)(addr) = b;
130 /* These are the definitions for the x86 IO instructions
131 * inb/inw/inl/outb/outw/outl, the "string" versions
132 * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
133 * inb_p/inw_p/...
134 * The macros don't do byte-swapping.
137 #define inb(port) readb((u8 *)((port)))
138 #define outb(val, port) writeb((val),(u8 *)((unsigned long)(port)))
139 #define inw(port) readw((u16 *)((port)))
140 #define outw(val, port) writew((val),(u16 *)((unsigned long)(port)))
141 #define inl(port) readl((u32 *)((port)))
142 #define outl(val, port) writel((val),(u32 *)((unsigned long)(port)))
144 #define inb_p(port) inb((port))
145 #define outb_p(val, port) outb((val), (port))
146 #define inw_p(port) inw((port))
147 #define outw_p(val, port) outw((val), (port))
148 #define inl_p(port) inl((port))
149 #define outl_p(val, port) outl((val), (port))
151 extern void insb (unsigned long port, void *dst, unsigned long count);
152 extern void insw (unsigned long port, void *dst, unsigned long count);
153 extern void insl (unsigned long port, void *dst, unsigned long count);
154 extern void outsb (unsigned long port, const void *src, unsigned long count);
155 extern void outsw (unsigned long port, const void *src, unsigned long count);
156 extern void outsl (unsigned long port, const void *src, unsigned long count);
158 #define IO_SPACE_LIMIT ~0
160 #define memset_io(a,b,c) memset((void *)(a),(b),(c))
161 #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
162 #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
164 /* At this point the Xtensa doesn't provide byte swap instructions */
166 #ifdef __XTENSA_EB__
167 # define in_8(addr) (*(u8*)(addr))
168 # define in_le16(addr) _swapw(*(u16*)(addr))
169 # define in_le32(addr) _swapl(*(u32*)(addr))
170 # define out_8(b, addr) *(u8*)(addr) = (b)
171 # define out_le16(b, addr) *(u16*)(addr) = _swapw(b)
172 # define out_le32(b, addr) *(u32*)(addr) = _swapl(b)
173 #elif defined(__XTENSA_EL__)
174 # define in_8(addr) (*(u8*)(addr))
175 # define in_le16(addr) (*(u16*)(addr))
176 # define in_le32(addr) (*(u32*)(addr))
177 # define out_8(b, addr) *(u8*)(addr) = (b)
178 # define out_le16(b, addr) *(u16*)(addr) = (b)
179 # define out_le32(b, addr) *(u32*)(addr) = (b)
180 #else
181 # error processor byte order undefined!
182 #endif
186 * Convert a physical pointer to a virtual kernel pointer for /dev/mem access
188 #define xlate_dev_mem_ptr(p) __va(p)
191 * Convert a virtual cached pointer to an uncached pointer
193 #define xlate_dev_kmem_ptr(p) p
196 #endif /* __KERNEL__ */
198 #endif /* _XTENSA_IO_H */