[PATCH] more for_each_cpu() conversions
[linux-2.6/cjktty.git] / include / asm-powerpc / eeh.h
blob5207758a6dd9d4b760623dd2218d848964f7da9b
1 /*
2 * eeh.h
3 * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #ifndef _PPC64_EEH_H
21 #define _PPC64_EEH_H
22 #ifdef __KERNEL__
24 #include <linux/config.h>
25 #include <linux/init.h>
26 #include <linux/list.h>
27 #include <linux/string.h>
29 struct pci_dev;
30 struct pci_bus;
31 struct device_node;
33 #ifdef CONFIG_EEH
35 extern int eeh_subsystem_enabled;
37 /* Values for eeh_mode bits in device_node */
38 #define EEH_MODE_SUPPORTED (1<<0)
39 #define EEH_MODE_NOCHECK (1<<1)
40 #define EEH_MODE_ISOLATED (1<<2)
41 #define EEH_MODE_RECOVERING (1<<3)
42 #define EEH_MODE_IRQ_DISABLED (1<<4)
44 /* Max number of EEH freezes allowed before we consider the device
45 * to be permanently disabled. */
46 #define EEH_MAX_ALLOWED_FREEZES 5
48 void __init eeh_init(void);
49 unsigned long eeh_check_failure(const volatile void __iomem *token,
50 unsigned long val);
51 int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev);
52 void __init pci_addr_cache_build(void);
54 /**
55 * eeh_add_device_early
56 * eeh_add_device_late
58 * Perform eeh initialization for devices added after boot.
59 * Call eeh_add_device_early before doing any i/o to the
60 * device (including config space i/o). Call eeh_add_device_late
61 * to finish the eeh setup for this device.
63 void eeh_add_device_early(struct device_node *);
64 void eeh_add_device_late(struct pci_dev *dev);
65 void eeh_add_device_tree_early(struct device_node *);
66 void eeh_add_device_tree_late(struct pci_bus *);
68 /**
69 * eeh_remove_device - undo EEH setup for the indicated pci device
70 * @dev: pci device to be removed
72 * This routine should be called when a device is removed from
73 * a running system (e.g. by hotplug or dlpar). It unregisters
74 * the PCI device from the EEH subsystem. I/O errors affecting
75 * this device will no longer be detected after this call; thus,
76 * i/o errors affecting this slot may leave this device unusable.
78 void eeh_remove_device(struct pci_dev *);
80 /**
81 * eeh_remove_device_recursive - undo EEH for device & children.
82 * @dev: pci device to be removed
84 * As above, this removes the device; it also removes child
85 * pci devices as well.
87 void eeh_remove_bus_device(struct pci_dev *);
89 /**
90 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
92 * If this macro yields TRUE, the caller relays to eeh_check_failure()
93 * which does further tests out of line.
95 #define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled)
98 * Reads from a device which has been isolated by EEH will return
99 * all 1s. This macro gives an all-1s value of the given size (in
100 * bytes: 1, 2, or 4) for comparing with the result of a read.
102 #define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
104 #else /* !CONFIG_EEH */
105 static inline void eeh_init(void) { }
107 static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
109 return val;
112 static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
114 return 0;
117 static inline void pci_addr_cache_build(void) { }
119 static inline void eeh_add_device_early(struct device_node *dn) { }
121 static inline void eeh_add_device_late(struct pci_dev *dev) { }
123 static inline void eeh_remove_device(struct pci_dev *dev) { }
125 static inline void eeh_add_device_tree_early(struct device_node *dn) { }
127 static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
129 static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
130 #define EEH_POSSIBLE_ERROR(val, type) (0)
131 #define EEH_IO_ERROR_VALUE(size) (-1UL)
132 #endif /* CONFIG_EEH */
135 * MMIO read/write operations with EEH support.
137 static inline u8 eeh_readb(const volatile void __iomem *addr)
139 u8 val = in_8(addr);
140 if (EEH_POSSIBLE_ERROR(val, u8))
141 return eeh_check_failure(addr, val);
142 return val;
144 static inline void eeh_writeb(u8 val, volatile void __iomem *addr)
146 out_8(addr, val);
149 static inline u16 eeh_readw(const volatile void __iomem *addr)
151 u16 val = in_le16(addr);
152 if (EEH_POSSIBLE_ERROR(val, u16))
153 return eeh_check_failure(addr, val);
154 return val;
156 static inline void eeh_writew(u16 val, volatile void __iomem *addr)
158 out_le16(addr, val);
160 static inline u16 eeh_raw_readw(const volatile void __iomem *addr)
162 u16 val = in_be16(addr);
163 if (EEH_POSSIBLE_ERROR(val, u16))
164 return eeh_check_failure(addr, val);
165 return val;
167 static inline void eeh_raw_writew(u16 val, volatile void __iomem *addr) {
168 volatile u16 __iomem *vaddr = (volatile u16 __iomem *) addr;
169 out_be16(vaddr, val);
172 static inline u32 eeh_readl(const volatile void __iomem *addr)
174 u32 val = in_le32(addr);
175 if (EEH_POSSIBLE_ERROR(val, u32))
176 return eeh_check_failure(addr, val);
177 return val;
179 static inline void eeh_writel(u32 val, volatile void __iomem *addr)
181 out_le32(addr, val);
183 static inline u32 eeh_raw_readl(const volatile void __iomem *addr)
185 u32 val = in_be32(addr);
186 if (EEH_POSSIBLE_ERROR(val, u32))
187 return eeh_check_failure(addr, val);
188 return val;
190 static inline void eeh_raw_writel(u32 val, volatile void __iomem *addr)
192 out_be32(addr, val);
195 static inline u64 eeh_readq(const volatile void __iomem *addr)
197 u64 val = in_le64(addr);
198 if (EEH_POSSIBLE_ERROR(val, u64))
199 return eeh_check_failure(addr, val);
200 return val;
202 static inline void eeh_writeq(u64 val, volatile void __iomem *addr)
204 out_le64(addr, val);
206 static inline u64 eeh_raw_readq(const volatile void __iomem *addr)
208 u64 val = in_be64(addr);
209 if (EEH_POSSIBLE_ERROR(val, u64))
210 return eeh_check_failure(addr, val);
211 return val;
213 static inline void eeh_raw_writeq(u64 val, volatile void __iomem *addr)
215 out_be64(addr, val);
218 #define EEH_CHECK_ALIGN(v,a) \
219 ((((unsigned long)(v)) & ((a) - 1)) == 0)
221 static inline void eeh_memset_io(volatile void __iomem *addr, int c,
222 unsigned long n)
224 void *p = (void __force *)addr;
225 u32 lc = c;
226 lc |= lc << 8;
227 lc |= lc << 16;
229 while(n && !EEH_CHECK_ALIGN(p, 4)) {
230 *((volatile u8 *)p) = c;
231 p++;
232 n--;
234 while(n >= 4) {
235 *((volatile u32 *)p) = lc;
236 p += 4;
237 n -= 4;
239 while(n) {
240 *((volatile u8 *)p) = c;
241 p++;
242 n--;
244 __asm__ __volatile__ ("sync" : : : "memory");
246 static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *src,
247 unsigned long n)
249 void *vsrc = (void __force *) src;
250 void *destsave = dest;
251 unsigned long nsave = n;
253 while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) {
254 *((u8 *)dest) = *((volatile u8 *)vsrc);
255 __asm__ __volatile__ ("eieio" : : : "memory");
256 vsrc++;
257 dest++;
258 n--;
260 while(n > 4) {
261 *((u32 *)dest) = *((volatile u32 *)vsrc);
262 __asm__ __volatile__ ("eieio" : : : "memory");
263 vsrc += 4;
264 dest += 4;
265 n -= 4;
267 while(n) {
268 *((u8 *)dest) = *((volatile u8 *)vsrc);
269 __asm__ __volatile__ ("eieio" : : : "memory");
270 vsrc++;
271 dest++;
272 n--;
274 __asm__ __volatile__ ("sync" : : : "memory");
276 /* Look for ffff's here at dest[n]. Assume that at least 4 bytes
277 * were copied. Check all four bytes.
279 if ((nsave >= 4) &&
280 (EEH_POSSIBLE_ERROR((*((u32 *) destsave+nsave-4)), u32))) {
281 eeh_check_failure(src, (*((u32 *) destsave+nsave-4)));
285 static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src,
286 unsigned long n)
288 void *vdest = (void __force *) dest;
290 while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) {
291 *((volatile u8 *)vdest) = *((u8 *)src);
292 src++;
293 vdest++;
294 n--;
296 while(n > 4) {
297 *((volatile u32 *)vdest) = *((volatile u32 *)src);
298 src += 4;
299 vdest += 4;
300 n-=4;
302 while(n) {
303 *((volatile u8 *)vdest) = *((u8 *)src);
304 src++;
305 vdest++;
306 n--;
308 __asm__ __volatile__ ("sync" : : : "memory");
311 #undef EEH_CHECK_ALIGN
313 static inline u8 eeh_inb(unsigned long port)
315 u8 val;
316 if (!_IO_IS_VALID(port))
317 return ~0;
318 val = in_8((u8 __iomem *)(port+pci_io_base));
319 if (EEH_POSSIBLE_ERROR(val, u8))
320 return eeh_check_failure((void __iomem *)(port), val);
321 return val;
324 static inline void eeh_outb(u8 val, unsigned long port)
326 if (_IO_IS_VALID(port))
327 out_8((u8 __iomem *)(port+pci_io_base), val);
330 static inline u16 eeh_inw(unsigned long port)
332 u16 val;
333 if (!_IO_IS_VALID(port))
334 return ~0;
335 val = in_le16((u16 __iomem *)(port+pci_io_base));
336 if (EEH_POSSIBLE_ERROR(val, u16))
337 return eeh_check_failure((void __iomem *)(port), val);
338 return val;
341 static inline void eeh_outw(u16 val, unsigned long port)
343 if (_IO_IS_VALID(port))
344 out_le16((u16 __iomem *)(port+pci_io_base), val);
347 static inline u32 eeh_inl(unsigned long port)
349 u32 val;
350 if (!_IO_IS_VALID(port))
351 return ~0;
352 val = in_le32((u32 __iomem *)(port+pci_io_base));
353 if (EEH_POSSIBLE_ERROR(val, u32))
354 return eeh_check_failure((void __iomem *)(port), val);
355 return val;
358 static inline void eeh_outl(u32 val, unsigned long port)
360 if (_IO_IS_VALID(port))
361 out_le32((u32 __iomem *)(port+pci_io_base), val);
364 /* in-string eeh macros */
365 static inline void eeh_insb(unsigned long port, void * buf, int ns)
367 _insb((u8 __iomem *)(port+pci_io_base), buf, ns);
368 if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8))
369 eeh_check_failure((void __iomem *)(port), *(u8*)buf);
372 static inline void eeh_insw_ns(unsigned long port, void * buf, int ns)
374 _insw_ns((u16 __iomem *)(port+pci_io_base), buf, ns);
375 if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16))
376 eeh_check_failure((void __iomem *)(port), *(u16*)buf);
379 static inline void eeh_insl_ns(unsigned long port, void * buf, int nl)
381 _insl_ns((u32 __iomem *)(port+pci_io_base), buf, nl);
382 if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32))
383 eeh_check_failure((void __iomem *)(port), *(u32*)buf);
386 #endif /* __KERNEL__ */
387 #endif /* _PPC64_EEH_H */