PCI: pcie-check-and-return-bus_register-errors fix
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-powerpc / eeh.h
blob6a784396660bf78a00145b07e9f35c410438898e
1 /*
2 * eeh.h
3 * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #ifndef _PPC64_EEH_H
21 #define _PPC64_EEH_H
22 #ifdef __KERNEL__
24 #include <linux/init.h>
25 #include <linux/list.h>
26 #include <linux/string.h>
28 struct pci_dev;
29 struct pci_bus;
30 struct device_node;
32 #ifdef CONFIG_EEH
34 extern int eeh_subsystem_enabled;
36 /* Values for eeh_mode bits in device_node */
37 #define EEH_MODE_SUPPORTED (1<<0)
38 #define EEH_MODE_NOCHECK (1<<1)
39 #define EEH_MODE_ISOLATED (1<<2)
40 #define EEH_MODE_RECOVERING (1<<3)
41 #define EEH_MODE_IRQ_DISABLED (1<<4)
43 /* Max number of EEH freezes allowed before we consider the device
44 * to be permanently disabled. */
45 #define EEH_MAX_ALLOWED_FREEZES 5
47 void __init eeh_init(void);
48 unsigned long eeh_check_failure(const volatile void __iomem *token,
49 unsigned long val);
50 int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev);
51 void __init pci_addr_cache_build(void);
53 /**
54 * eeh_add_device_early
55 * eeh_add_device_late
57 * Perform eeh initialization for devices added after boot.
58 * Call eeh_add_device_early before doing any i/o to the
59 * device (including config space i/o). Call eeh_add_device_late
60 * to finish the eeh setup for this device.
62 void eeh_add_device_tree_early(struct device_node *);
63 void eeh_add_device_tree_late(struct pci_bus *);
65 /**
66 * eeh_remove_device_recursive - undo EEH for device & children.
67 * @dev: pci device to be removed
69 * As above, this removes the device; it also removes child
70 * pci devices as well.
72 void eeh_remove_bus_device(struct pci_dev *);
74 /**
75 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
77 * If this macro yields TRUE, the caller relays to eeh_check_failure()
78 * which does further tests out of line.
80 #define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled)
83 * Reads from a device which has been isolated by EEH will return
84 * all 1s. This macro gives an all-1s value of the given size (in
85 * bytes: 1, 2, or 4) for comparing with the result of a read.
87 #define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
89 #else /* !CONFIG_EEH */
90 static inline void eeh_init(void) { }
92 static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
94 return val;
97 static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
99 return 0;
102 static inline void pci_addr_cache_build(void) { }
104 static inline void eeh_add_device_tree_early(struct device_node *dn) { }
106 static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
108 static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
109 #define EEH_POSSIBLE_ERROR(val, type) (0)
110 #define EEH_IO_ERROR_VALUE(size) (-1UL)
111 #endif /* CONFIG_EEH */
114 * MMIO read/write operations with EEH support.
116 static inline u8 eeh_readb(const volatile void __iomem *addr)
118 u8 val = in_8(addr);
119 if (EEH_POSSIBLE_ERROR(val, u8))
120 return eeh_check_failure(addr, val);
121 return val;
123 static inline void eeh_writeb(u8 val, volatile void __iomem *addr)
125 out_8(addr, val);
128 static inline u16 eeh_readw(const volatile void __iomem *addr)
130 u16 val = in_le16(addr);
131 if (EEH_POSSIBLE_ERROR(val, u16))
132 return eeh_check_failure(addr, val);
133 return val;
135 static inline void eeh_writew(u16 val, volatile void __iomem *addr)
137 out_le16(addr, val);
139 static inline u16 eeh_raw_readw(const volatile void __iomem *addr)
141 u16 val = in_be16(addr);
142 if (EEH_POSSIBLE_ERROR(val, u16))
143 return eeh_check_failure(addr, val);
144 return val;
146 static inline void eeh_raw_writew(u16 val, volatile void __iomem *addr) {
147 volatile u16 __iomem *vaddr = (volatile u16 __iomem *) addr;
148 out_be16(vaddr, val);
151 static inline u32 eeh_readl(const volatile void __iomem *addr)
153 u32 val = in_le32(addr);
154 if (EEH_POSSIBLE_ERROR(val, u32))
155 return eeh_check_failure(addr, val);
156 return val;
158 static inline void eeh_writel(u32 val, volatile void __iomem *addr)
160 out_le32(addr, val);
162 static inline u32 eeh_raw_readl(const volatile void __iomem *addr)
164 u32 val = in_be32(addr);
165 if (EEH_POSSIBLE_ERROR(val, u32))
166 return eeh_check_failure(addr, val);
167 return val;
169 static inline void eeh_raw_writel(u32 val, volatile void __iomem *addr)
171 out_be32(addr, val);
174 static inline u64 eeh_readq(const volatile void __iomem *addr)
176 u64 val = in_le64(addr);
177 if (EEH_POSSIBLE_ERROR(val, u64))
178 return eeh_check_failure(addr, val);
179 return val;
181 static inline void eeh_writeq(u64 val, volatile void __iomem *addr)
183 out_le64(addr, val);
185 static inline u64 eeh_raw_readq(const volatile void __iomem *addr)
187 u64 val = in_be64(addr);
188 if (EEH_POSSIBLE_ERROR(val, u64))
189 return eeh_check_failure(addr, val);
190 return val;
192 static inline void eeh_raw_writeq(u64 val, volatile void __iomem *addr)
194 out_be64(addr, val);
197 #define EEH_CHECK_ALIGN(v,a) \
198 ((((unsigned long)(v)) & ((a) - 1)) == 0)
200 static inline void eeh_memset_io(volatile void __iomem *addr, int c,
201 unsigned long n)
203 void *p = (void __force *)addr;
204 u32 lc = c;
205 lc |= lc << 8;
206 lc |= lc << 16;
208 __asm__ __volatile__ ("sync" : : : "memory");
209 while(n && !EEH_CHECK_ALIGN(p, 4)) {
210 *((volatile u8 *)p) = c;
211 p++;
212 n--;
214 while(n >= 4) {
215 *((volatile u32 *)p) = lc;
216 p += 4;
217 n -= 4;
219 while(n) {
220 *((volatile u8 *)p) = c;
221 p++;
222 n--;
224 __asm__ __volatile__ ("sync" : : : "memory");
226 static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *src,
227 unsigned long n)
229 void *vsrc = (void __force *) src;
230 void *destsave = dest;
231 unsigned long nsave = n;
233 __asm__ __volatile__ ("sync" : : : "memory");
234 while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) {
235 *((u8 *)dest) = *((volatile u8 *)vsrc);
236 __asm__ __volatile__ ("eieio" : : : "memory");
237 vsrc++;
238 dest++;
239 n--;
241 while(n > 4) {
242 *((u32 *)dest) = *((volatile u32 *)vsrc);
243 __asm__ __volatile__ ("eieio" : : : "memory");
244 vsrc += 4;
245 dest += 4;
246 n -= 4;
248 while(n) {
249 *((u8 *)dest) = *((volatile u8 *)vsrc);
250 __asm__ __volatile__ ("eieio" : : : "memory");
251 vsrc++;
252 dest++;
253 n--;
255 __asm__ __volatile__ ("sync" : : : "memory");
257 /* Look for ffff's here at dest[n]. Assume that at least 4 bytes
258 * were copied. Check all four bytes.
260 if ((nsave >= 4) &&
261 (EEH_POSSIBLE_ERROR((*((u32 *) destsave+nsave-4)), u32))) {
262 eeh_check_failure(src, (*((u32 *) destsave+nsave-4)));
266 static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src,
267 unsigned long n)
269 void *vdest = (void __force *) dest;
271 __asm__ __volatile__ ("sync" : : : "memory");
272 while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) {
273 *((volatile u8 *)vdest) = *((u8 *)src);
274 src++;
275 vdest++;
276 n--;
278 while(n > 4) {
279 *((volatile u32 *)vdest) = *((volatile u32 *)src);
280 src += 4;
281 vdest += 4;
282 n-=4;
284 while(n) {
285 *((volatile u8 *)vdest) = *((u8 *)src);
286 src++;
287 vdest++;
288 n--;
290 __asm__ __volatile__ ("sync" : : : "memory");
293 #undef EEH_CHECK_ALIGN
295 static inline u8 eeh_inb(unsigned long port)
297 u8 val;
298 val = in_8((u8 __iomem *)(port+pci_io_base));
299 if (EEH_POSSIBLE_ERROR(val, u8))
300 return eeh_check_failure((void __iomem *)(port), val);
301 return val;
304 static inline void eeh_outb(u8 val, unsigned long port)
306 out_8((u8 __iomem *)(port+pci_io_base), val);
309 static inline u16 eeh_inw(unsigned long port)
311 u16 val;
312 val = in_le16((u16 __iomem *)(port+pci_io_base));
313 if (EEH_POSSIBLE_ERROR(val, u16))
314 return eeh_check_failure((void __iomem *)(port), val);
315 return val;
318 static inline void eeh_outw(u16 val, unsigned long port)
320 out_le16((u16 __iomem *)(port+pci_io_base), val);
323 static inline u32 eeh_inl(unsigned long port)
325 u32 val;
326 val = in_le32((u32 __iomem *)(port+pci_io_base));
327 if (EEH_POSSIBLE_ERROR(val, u32))
328 return eeh_check_failure((void __iomem *)(port), val);
329 return val;
332 static inline void eeh_outl(u32 val, unsigned long port)
334 out_le32((u32 __iomem *)(port+pci_io_base), val);
337 /* in-string eeh macros */
338 static inline void eeh_insb(unsigned long port, void * buf, int ns)
340 _insb((u8 __iomem *)(port+pci_io_base), buf, ns);
341 if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8))
342 eeh_check_failure((void __iomem *)(port), *(u8*)buf);
345 static inline void eeh_insw_ns(unsigned long port, void * buf, int ns)
347 _insw_ns((u16 __iomem *)(port+pci_io_base), buf, ns);
348 if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16))
349 eeh_check_failure((void __iomem *)(port), *(u16*)buf);
352 static inline void eeh_insl_ns(unsigned long port, void * buf, int nl)
354 _insl_ns((u32 __iomem *)(port+pci_io_base), buf, nl);
355 if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32))
356 eeh_check_failure((void __iomem *)(port), *(u32*)buf);
359 #endif /* __KERNEL__ */
360 #endif /* _PPC64_EEH_H */