[MMC] PXA and i.MX: don't avoid sending stop command on error
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-m68k / raw_io.h
blob811ccd25d4a6dc9f9a6b7c5890f658f349ed46d4
1 /*
2 * linux/include/asm-m68k/raw_io.h
4 * 10/20/00 RZ: - created from bits of io.h and ide.h to cleanup namespace
6 */
8 #ifndef _RAW_IO_H
9 #define _RAW_IO_H
11 #ifdef __KERNEL__
13 #include <asm/types.h>
16 /* Values for nocacheflag and cmode */
17 #define IOMAP_FULL_CACHING 0
18 #define IOMAP_NOCACHE_SER 1
19 #define IOMAP_NOCACHE_NONSER 2
20 #define IOMAP_WRITETHROUGH 3
22 extern void iounmap(void __iomem *addr);
24 extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
25 int cacheflag);
26 extern void __iounmap(void *addr, unsigned long size);
29 /* ++roman: The assignments to temp. vars avoid that gcc sometimes generates
30 * two accesses to memory, which may be undesirable for some devices.
32 #define in_8(addr) \
33 ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
34 #define in_be16(addr) \
35 ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
36 #define in_be32(addr) \
37 ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
38 #define in_le16(addr) \
39 ({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; })
40 #define in_le32(addr) \
41 ({ u32 __v = le32_to_cpu(*(__force volatile u32 *) (addr)); __v; })
43 #define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
44 #define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
45 #define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
46 #define out_le16(addr,w) (void)((*(__force volatile u16 *) (addr)) = cpu_to_le16(w))
47 #define out_le32(addr,l) (void)((*(__force volatile u32 *) (addr)) = cpu_to_le32(l))
49 #define raw_inb in_8
50 #define raw_inw in_be16
51 #define raw_inl in_be32
53 #define raw_outb(val,port) out_8((port),(val))
54 #define raw_outw(val,port) out_be16((port),(val))
55 #define raw_outl(val,port) out_be32((port),(val))
57 static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
59 unsigned int i;
61 for (i = 0; i < len; i++)
62 *buf++ = in_8(port);
65 static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf,
66 unsigned int len)
68 unsigned int i;
70 for (i = 0; i < len; i++)
71 out_8(port, *buf++);
74 static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr)
76 unsigned int tmp;
78 if (nr & 15) {
79 tmp = (nr & 15) - 1;
80 asm volatile (
81 "1: movew %2@,%0@+; dbra %1,1b"
82 : "=a" (buf), "=d" (tmp)
83 : "a" (port), "0" (buf),
84 "1" (tmp));
86 if (nr >> 4) {
87 tmp = (nr >> 4) - 1;
88 asm volatile (
89 "1: "
90 "movew %2@,%0@+; "
91 "movew %2@,%0@+; "
92 "movew %2@,%0@+; "
93 "movew %2@,%0@+; "
94 "movew %2@,%0@+; "
95 "movew %2@,%0@+; "
96 "movew %2@,%0@+; "
97 "movew %2@,%0@+; "
98 "movew %2@,%0@+; "
99 "movew %2@,%0@+; "
100 "movew %2@,%0@+; "
101 "movew %2@,%0@+; "
102 "movew %2@,%0@+; "
103 "movew %2@,%0@+; "
104 "movew %2@,%0@+; "
105 "movew %2@,%0@+; "
106 "dbra %1,1b"
107 : "=a" (buf), "=d" (tmp)
108 : "a" (port), "0" (buf),
109 "1" (tmp));
113 static inline void raw_outsw(volatile u16 __iomem *port, const u16 *buf,
114 unsigned int nr)
116 unsigned int tmp;
118 if (nr & 15) {
119 tmp = (nr & 15) - 1;
120 asm volatile (
121 "1: movew %0@+,%2@; dbra %1,1b"
122 : "=a" (buf), "=d" (tmp)
123 : "a" (port), "0" (buf),
124 "1" (tmp));
126 if (nr >> 4) {
127 tmp = (nr >> 4) - 1;
128 asm volatile (
129 "1: "
130 "movew %0@+,%2@; "
131 "movew %0@+,%2@; "
132 "movew %0@+,%2@; "
133 "movew %0@+,%2@; "
134 "movew %0@+,%2@; "
135 "movew %0@+,%2@; "
136 "movew %0@+,%2@; "
137 "movew %0@+,%2@; "
138 "movew %0@+,%2@; "
139 "movew %0@+,%2@; "
140 "movew %0@+,%2@; "
141 "movew %0@+,%2@; "
142 "movew %0@+,%2@; "
143 "movew %0@+,%2@; "
144 "movew %0@+,%2@; "
145 "movew %0@+,%2@; "
146 "dbra %1,1b"
147 : "=a" (buf), "=d" (tmp)
148 : "a" (port), "0" (buf),
149 "1" (tmp));
153 static inline void raw_insl(volatile u32 __iomem *port, u32 *buf, unsigned int nr)
155 unsigned int tmp;
157 if (nr & 15) {
158 tmp = (nr & 15) - 1;
159 asm volatile (
160 "1: movel %2@,%0@+; dbra %1,1b"
161 : "=a" (buf), "=d" (tmp)
162 : "a" (port), "0" (buf),
163 "1" (tmp));
165 if (nr >> 4) {
166 tmp = (nr >> 4) - 1;
167 asm volatile (
168 "1: "
169 "movel %2@,%0@+; "
170 "movel %2@,%0@+; "
171 "movel %2@,%0@+; "
172 "movel %2@,%0@+; "
173 "movel %2@,%0@+; "
174 "movel %2@,%0@+; "
175 "movel %2@,%0@+; "
176 "movel %2@,%0@+; "
177 "movel %2@,%0@+; "
178 "movel %2@,%0@+; "
179 "movel %2@,%0@+; "
180 "movel %2@,%0@+; "
181 "movel %2@,%0@+; "
182 "movel %2@,%0@+; "
183 "movel %2@,%0@+; "
184 "movel %2@,%0@+; "
185 "dbra %1,1b"
186 : "=a" (buf), "=d" (tmp)
187 : "a" (port), "0" (buf),
188 "1" (tmp));
192 static inline void raw_outsl(volatile u32 __iomem *port, const u32 *buf,
193 unsigned int nr)
195 unsigned int tmp;
197 if (nr & 15) {
198 tmp = (nr & 15) - 1;
199 asm volatile (
200 "1: movel %0@+,%2@; dbra %1,1b"
201 : "=a" (buf), "=d" (tmp)
202 : "a" (port), "0" (buf),
203 "1" (tmp));
205 if (nr >> 4) {
206 tmp = (nr >> 4) - 1;
207 asm volatile (
208 "1: "
209 "movel %0@+,%2@; "
210 "movel %0@+,%2@; "
211 "movel %0@+,%2@; "
212 "movel %0@+,%2@; "
213 "movel %0@+,%2@; "
214 "movel %0@+,%2@; "
215 "movel %0@+,%2@; "
216 "movel %0@+,%2@; "
217 "movel %0@+,%2@; "
218 "movel %0@+,%2@; "
219 "movel %0@+,%2@; "
220 "movel %0@+,%2@; "
221 "movel %0@+,%2@; "
222 "movel %0@+,%2@; "
223 "movel %0@+,%2@; "
224 "movel %0@+,%2@; "
225 "dbra %1,1b"
226 : "=a" (buf), "=d" (tmp)
227 : "a" (port), "0" (buf),
228 "1" (tmp));
233 static inline void raw_insw_swapw(volatile u16 __iomem *port, u16 *buf,
234 unsigned int nr)
236 if ((nr) % 8)
237 __asm__ __volatile__
238 ("\tmovel %0,%/a0\n\t"
239 "movel %1,%/a1\n\t"
240 "movel %2,%/d6\n\t"
241 "subql #1,%/d6\n"
242 "1:\tmovew %/a0@,%/d0\n\t"
243 "rolw #8,%/d0\n\t"
244 "movew %/d0,%/a1@+\n\t"
245 "dbra %/d6,1b"
247 : "g" (port), "g" (buf), "g" (nr)
248 : "d0", "a0", "a1", "d6");
249 else
250 __asm__ __volatile__
251 ("movel %0,%/a0\n\t"
252 "movel %1,%/a1\n\t"
253 "movel %2,%/d6\n\t"
254 "lsrl #3,%/d6\n\t"
255 "subql #1,%/d6\n"
256 "1:\tmovew %/a0@,%/d0\n\t"
257 "rolw #8,%/d0\n\t"
258 "movew %/d0,%/a1@+\n\t"
259 "movew %/a0@,%/d0\n\t"
260 "rolw #8,%/d0\n\t"
261 "movew %/d0,%/a1@+\n\t"
262 "movew %/a0@,%/d0\n\t"
263 "rolw #8,%/d0\n\t"
264 "movew %/d0,%/a1@+\n\t"
265 "movew %/a0@,%/d0\n\t"
266 "rolw #8,%/d0\n\t"
267 "movew %/d0,%/a1@+\n\t"
268 "movew %/a0@,%/d0\n\t"
269 "rolw #8,%/d0\n\t"
270 "movew %/d0,%/a1@+\n\t"
271 "movew %/a0@,%/d0\n\t"
272 "rolw #8,%/d0\n\t"
273 "movew %/d0,%/a1@+\n\t"
274 "movew %/a0@,%/d0\n\t"
275 "rolw #8,%/d0\n\t"
276 "movew %/d0,%/a1@+\n\t"
277 "movew %/a0@,%/d0\n\t"
278 "rolw #8,%/d0\n\t"
279 "movew %/d0,%/a1@+\n\t"
280 "dbra %/d6,1b"
282 : "g" (port), "g" (buf), "g" (nr)
283 : "d0", "a0", "a1", "d6");
286 static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
287 unsigned int nr)
289 if ((nr) % 8)
290 __asm__ __volatile__
291 ("movel %0,%/a0\n\t"
292 "movel %1,%/a1\n\t"
293 "movel %2,%/d6\n\t"
294 "subql #1,%/d6\n"
295 "1:\tmovew %/a1@+,%/d0\n\t"
296 "rolw #8,%/d0\n\t"
297 "movew %/d0,%/a0@\n\t"
298 "dbra %/d6,1b"
300 : "g" (port), "g" (buf), "g" (nr)
301 : "d0", "a0", "a1", "d6");
302 else
303 __asm__ __volatile__
304 ("movel %0,%/a0\n\t"
305 "movel %1,%/a1\n\t"
306 "movel %2,%/d6\n\t"
307 "lsrl #3,%/d6\n\t"
308 "subql #1,%/d6\n"
309 "1:\tmovew %/a1@+,%/d0\n\t"
310 "rolw #8,%/d0\n\t"
311 "movew %/d0,%/a0@\n\t"
312 "movew %/a1@+,%/d0\n\t"
313 "rolw #8,%/d0\n\t"
314 "movew %/d0,%/a0@\n\t"
315 "movew %/a1@+,%/d0\n\t"
316 "rolw #8,%/d0\n\t"
317 "movew %/d0,%/a0@\n\t"
318 "movew %/a1@+,%/d0\n\t"
319 "rolw #8,%/d0\n\t"
320 "movew %/d0,%/a0@\n\t"
321 "movew %/a1@+,%/d0\n\t"
322 "rolw #8,%/d0\n\t"
323 "movew %/d0,%/a0@\n\t"
324 "movew %/a1@+,%/d0\n\t"
325 "rolw #8,%/d0\n\t"
326 "movew %/d0,%/a0@\n\t"
327 "movew %/a1@+,%/d0\n\t"
328 "rolw #8,%/d0\n\t"
329 "movew %/d0,%/a0@\n\t"
330 "movew %/a1@+,%/d0\n\t"
331 "rolw #8,%/d0\n\t"
332 "movew %/d0,%/a0@\n\t"
333 "dbra %/d6,1b"
335 : "g" (port), "g" (buf), "g" (nr)
336 : "d0", "a0", "a1", "d6");
339 #define __raw_writel raw_outl
341 #endif /* __KERNEL__ */
343 #endif /* _RAW_IO_H */