V4L/DVB (4731a): Kconfig: restore pvrusb2 menu items
[linux-2.6/linux-loongson.git] / arch / powerpc / platforms / iseries / pci.c
blob4aa165e010d91966214dc01774ef5037f6044b2d
1 /*
2 * Copyright (C) 2001 Allan Trautman, IBM Corporation
4 * iSeries specific routines for PCI.
6 * Based on code from pci.c and iSeries_pci.c 32bit
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/string.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/ide.h>
28 #include <linux/pci.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <asm/prom.h>
33 #include <asm/machdep.h>
34 #include <asm/pci-bridge.h>
35 #include <asm/iommu.h>
36 #include <asm/abs_addr.h>
37 #include <asm/firmware.h>
39 #include <asm/iseries/hv_call_xm.h>
40 #include <asm/iseries/mf.h>
41 #include <asm/iseries/iommu.h>
43 #include <asm/ppc-pci.h>
45 #include "irq.h"
46 #include "pci.h"
47 #include "call_pci.h"
50 * Forward declares of prototypes.
52 static struct device_node *find_Device_Node(int bus, int devfn);
54 static int Pci_Retry_Max = 3; /* Only retry 3 times */
55 static int Pci_Error_Flag = 1; /* Set Retry Error on. */
57 static struct pci_ops iSeries_pci_ops;
60 * Table defines
61 * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space.
63 #define IOMM_TABLE_MAX_ENTRIES 1024
64 #define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
65 #define BASE_IO_MEMORY 0xE000000000000000UL
67 static unsigned long max_io_memory = BASE_IO_MEMORY;
68 static long current_iomm_table_entry;
71 * Lookup Tables.
73 static struct device_node *iomm_table[IOMM_TABLE_MAX_ENTRIES];
74 static u8 iobar_table[IOMM_TABLE_MAX_ENTRIES];
76 static const char pci_io_text[] = "iSeries PCI I/O";
77 static DEFINE_SPINLOCK(iomm_table_lock);
80 * iomm_table_allocate_entry
82 * Adds pci_dev entry in address translation table
84 * - Allocates the number of entries required in table base on BAR
85 * size.
86 * - Allocates starting at BASE_IO_MEMORY and increases.
87 * - The size is round up to be a multiple of entry size.
88 * - CurrentIndex is incremented to keep track of the last entry.
89 * - Builds the resource entry for allocated BARs.
91 static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
93 struct resource *bar_res = &dev->resource[bar_num];
94 long bar_size = pci_resource_len(dev, bar_num);
97 * No space to allocate, quick exit, skip Allocation.
99 if (bar_size == 0)
100 return;
102 * Set Resource values.
104 spin_lock(&iomm_table_lock);
105 bar_res->name = pci_io_text;
106 bar_res->start = BASE_IO_MEMORY +
107 IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
108 bar_res->end = bar_res->start + bar_size - 1;
110 * Allocate the number of table entries needed for BAR.
112 while (bar_size > 0 ) {
113 iomm_table[current_iomm_table_entry] = dev->sysdata;
114 iobar_table[current_iomm_table_entry] = bar_num;
115 bar_size -= IOMM_TABLE_ENTRY_SIZE;
116 ++current_iomm_table_entry;
118 max_io_memory = BASE_IO_MEMORY +
119 IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
120 spin_unlock(&iomm_table_lock);
124 * allocate_device_bars
126 * - Allocates ALL pci_dev BAR's and updates the resources with the
127 * BAR value. BARS with zero length will have the resources
128 * The HvCallPci_getBarParms is used to get the size of the BAR
129 * space. It calls iomm_table_allocate_entry to allocate
130 * each entry.
131 * - Loops through The Bar resources(0 - 5) including the ROM
132 * is resource(6).
134 static void allocate_device_bars(struct pci_dev *dev)
136 int bar_num;
138 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num)
139 iomm_table_allocate_entry(dev, bar_num);
143 * Log error information to system console.
144 * Filter out the device not there errors.
145 * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
146 * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
147 * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
149 static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
150 int AgentId, int HvRc)
152 if (HvRc == 0x0302)
153 return;
154 printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
155 Error_Text, Bus, SubBus, AgentId, HvRc);
159 * iSeries_pcibios_init
161 * Description:
162 * This function checks for all possible system PCI host bridges that connect
163 * PCI buses. The system hypervisor is queried as to the guest partition
164 * ownership status. A pci_controller is built for any bus which is partially
165 * owned or fully owned by this guest partition.
167 void iSeries_pcibios_init(void)
169 struct pci_controller *phb;
170 struct device_node *root = of_find_node_by_path("/");
171 struct device_node *node = NULL;
173 if (root == NULL) {
174 printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
175 "of device tree\n");
176 return;
178 while ((node = of_get_next_child(root, node)) != NULL) {
179 HvBusNumber bus;
180 const u32 *busp;
182 if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
183 continue;
185 busp = get_property(node, "bus-range", NULL);
186 if (busp == NULL)
187 continue;
188 bus = *busp;
189 printk("bus %d appears to exist\n", bus);
190 phb = pcibios_alloc_controller(node);
191 if (phb == NULL)
192 continue;
194 phb->pci_mem_offset = phb->local_number = bus;
195 phb->first_busno = bus;
196 phb->last_busno = bus;
197 phb->ops = &iSeries_pci_ops;
200 of_node_put(root);
202 pci_devs_phb_init();
206 * iSeries_pci_final_fixup(void)
208 void __init iSeries_pci_final_fixup(void)
210 struct pci_dev *pdev = NULL;
211 struct device_node *node;
212 int DeviceCount = 0;
214 /* Fix up at the device node and pci_dev relationship */
215 mf_display_src(0xC9000100);
217 printk("pcibios_final_fixup\n");
218 for_each_pci_dev(pdev) {
219 node = find_Device_Node(pdev->bus->number, pdev->devfn);
220 printk("pci dev %p (%x.%x), node %p\n", pdev,
221 pdev->bus->number, pdev->devfn, node);
223 if (node != NULL) {
224 struct pci_dn *pdn = PCI_DN(node);
225 const u32 *agent;
227 agent = get_property(node, "linux,agent-id", NULL);
228 if ((pdn != NULL) && (agent != NULL)) {
229 u8 irq = iSeries_allocate_IRQ(pdn->busno, 0,
230 pdn->bussubno);
231 int err;
233 err = HvCallXm_connectBusUnit(pdn->busno, pdn->bussubno,
234 *agent, irq);
235 if (err)
236 pci_Log_Error("Connect Bus Unit",
237 pdn->busno, pdn->bussubno, *agent, err);
238 else {
239 err = HvCallPci_configStore8(pdn->busno, pdn->bussubno,
240 *agent,
241 PCI_INTERRUPT_LINE,
242 irq);
243 if (err)
244 pci_Log_Error("PciCfgStore Irq Failed!",
245 pdn->busno, pdn->bussubno, *agent, err);
247 if (!err)
248 pdev->irq = irq;
251 ++DeviceCount;
252 pdev->sysdata = (void *)node;
253 PCI_DN(node)->pcidev = pdev;
254 allocate_device_bars(pdev);
255 iSeries_Device_Information(pdev, DeviceCount);
256 iommu_devnode_init_iSeries(node);
257 } else
258 printk("PCI: Device Tree not found for 0x%016lX\n",
259 (unsigned long)pdev);
261 iSeries_activate_IRQs();
262 mf_display_src(0xC9000200);
266 * Look down the chain to find the matching Device Device
268 static struct device_node *find_Device_Node(int bus, int devfn)
270 struct device_node *node;
272 for (node = NULL; (node = of_find_all_nodes(node)); ) {
273 struct pci_dn *pdn = PCI_DN(node);
275 if (pdn && (bus == pdn->busno) && (devfn == pdn->devfn))
276 return node;
278 return NULL;
281 #if 0
283 * Returns the device node for the passed pci_dev
284 * Sanity Check Node PciDev to passed pci_dev
285 * If none is found, returns a NULL which the client must handle.
287 static struct device_node *get_Device_Node(struct pci_dev *pdev)
289 struct device_node *node;
291 node = pdev->sysdata;
292 if (node == NULL || PCI_DN(node)->pcidev != pdev)
293 node = find_Device_Node(pdev->bus->number, pdev->devfn);
294 return node;
296 #endif
299 * Config space read and write functions.
300 * For now at least, we look for the device node for the bus and devfn
301 * that we are asked to access. It may be possible to translate the devfn
302 * to a subbus and deviceid more directly.
304 static u64 hv_cfg_read_func[4] = {
305 HvCallPciConfigLoad8, HvCallPciConfigLoad16,
306 HvCallPciConfigLoad32, HvCallPciConfigLoad32
309 static u64 hv_cfg_write_func[4] = {
310 HvCallPciConfigStore8, HvCallPciConfigStore16,
311 HvCallPciConfigStore32, HvCallPciConfigStore32
315 * Read PCI config space
317 static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
318 int offset, int size, u32 *val)
320 struct device_node *node = find_Device_Node(bus->number, devfn);
321 u64 fn;
322 struct HvCallPci_LoadReturn ret;
324 if (node == NULL)
325 return PCIBIOS_DEVICE_NOT_FOUND;
326 if (offset > 255) {
327 *val = ~0;
328 return PCIBIOS_BAD_REGISTER_NUMBER;
331 fn = hv_cfg_read_func[(size - 1) & 3];
332 HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
334 if (ret.rc != 0) {
335 *val = ~0;
336 return PCIBIOS_DEVICE_NOT_FOUND; /* or something */
339 *val = ret.value;
340 return 0;
344 * Write PCI config space
347 static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
348 int offset, int size, u32 val)
350 struct device_node *node = find_Device_Node(bus->number, devfn);
351 u64 fn;
352 u64 ret;
354 if (node == NULL)
355 return PCIBIOS_DEVICE_NOT_FOUND;
356 if (offset > 255)
357 return PCIBIOS_BAD_REGISTER_NUMBER;
359 fn = hv_cfg_write_func[(size - 1) & 3];
360 ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
362 if (ret != 0)
363 return PCIBIOS_DEVICE_NOT_FOUND;
365 return 0;
368 static struct pci_ops iSeries_pci_ops = {
369 .read = iSeries_pci_read_config,
370 .write = iSeries_pci_write_config
374 * Check Return Code
375 * -> On Failure, print and log information.
376 * Increment Retry Count, if exceeds max, panic partition.
378 * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
379 * PCI: Device 23.90 ReadL Retry( 1)
380 * PCI: Device 23.90 ReadL Retry Successful(1)
382 static int CheckReturnCode(char *TextHdr, struct device_node *DevNode,
383 int *retry, u64 ret)
385 if (ret != 0) {
386 struct pci_dn *pdn = PCI_DN(DevNode);
388 (*retry)++;
389 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
390 TextHdr, pdn->busno, pdn->devfn,
391 *retry, (int)ret);
393 * Bump the retry and check for retry count exceeded.
394 * If, Exceeded, panic the system.
396 if (((*retry) > Pci_Retry_Max) &&
397 (Pci_Error_Flag > 0)) {
398 mf_display_src(0xB6000103);
399 panic_timeout = 0;
400 panic("PCI: Hardware I/O Error, SRC B6000103, "
401 "Automatic Reboot Disabled.\n");
403 return -1; /* Retry Try */
405 return 0;
409 * Translate the I/O Address into a device node, bar, and bar offset.
410 * Note: Make sure the passed variable end up on the stack to avoid
411 * the exposure of being device global.
413 static inline struct device_node *xlate_iomm_address(
414 const volatile void __iomem *IoAddress,
415 u64 *dsaptr, u64 *BarOffsetPtr)
417 unsigned long OrigIoAddr;
418 unsigned long BaseIoAddr;
419 unsigned long TableIndex;
420 struct device_node *DevNode;
422 OrigIoAddr = (unsigned long __force)IoAddress;
423 if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory))
424 return NULL;
425 BaseIoAddr = OrigIoAddr - BASE_IO_MEMORY;
426 TableIndex = BaseIoAddr / IOMM_TABLE_ENTRY_SIZE;
427 DevNode = iomm_table[TableIndex];
429 if (DevNode != NULL) {
430 int barnum = iobar_table[TableIndex];
431 *dsaptr = iseries_ds_addr(DevNode) | (barnum << 24);
432 *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE;
433 } else
434 panic("PCI: Invalid PCI IoAddress detected!\n");
435 return DevNode;
439 * Read MM I/O Instructions for the iSeries
440 * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
441 * else, data is returned in big Endian format.
443 * iSeries_Read_Byte = Read Byte ( 8 bit)
444 * iSeries_Read_Word = Read Word (16 bit)
445 * iSeries_Read_Long = Read Long (32 bit)
447 static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
449 u64 BarOffset;
450 u64 dsa;
451 int retry = 0;
452 struct HvCallPci_LoadReturn ret;
453 struct device_node *DevNode =
454 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
456 if (DevNode == NULL) {
457 static unsigned long last_jiffies;
458 static int num_printed;
460 if ((jiffies - last_jiffies) > 60 * HZ) {
461 last_jiffies = jiffies;
462 num_printed = 0;
464 if (num_printed++ < 10)
465 printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress);
466 return 0xff;
468 do {
469 HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0);
470 } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0);
472 return (u8)ret.value;
475 static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
477 u64 BarOffset;
478 u64 dsa;
479 int retry = 0;
480 struct HvCallPci_LoadReturn ret;
481 struct device_node *DevNode =
482 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
484 if (DevNode == NULL) {
485 static unsigned long last_jiffies;
486 static int num_printed;
488 if ((jiffies - last_jiffies) > 60 * HZ) {
489 last_jiffies = jiffies;
490 num_printed = 0;
492 if (num_printed++ < 10)
493 printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress);
494 return 0xffff;
496 do {
497 HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
498 BarOffset, 0);
499 } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0);
501 return swab16((u16)ret.value);
504 static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
506 u64 BarOffset;
507 u64 dsa;
508 int retry = 0;
509 struct HvCallPci_LoadReturn ret;
510 struct device_node *DevNode =
511 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
513 if (DevNode == NULL) {
514 static unsigned long last_jiffies;
515 static int num_printed;
517 if ((jiffies - last_jiffies) > 60 * HZ) {
518 last_jiffies = jiffies;
519 num_printed = 0;
521 if (num_printed++ < 10)
522 printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress);
523 return 0xffffffff;
525 do {
526 HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
527 BarOffset, 0);
528 } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0);
530 return swab32((u32)ret.value);
534 * Write MM I/O Instructions for the iSeries
536 * iSeries_Write_Byte = Write Byte (8 bit)
537 * iSeries_Write_Word = Write Word(16 bit)
538 * iSeries_Write_Long = Write Long(32 bit)
540 static void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
542 u64 BarOffset;
543 u64 dsa;
544 int retry = 0;
545 u64 rc;
546 struct device_node *DevNode =
547 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
549 if (DevNode == NULL) {
550 static unsigned long last_jiffies;
551 static int num_printed;
553 if ((jiffies - last_jiffies) > 60 * HZ) {
554 last_jiffies = jiffies;
555 num_printed = 0;
557 if (num_printed++ < 10)
558 printk(KERN_ERR "iSeries_Write_Byte: invalid access at IO address %p\n", IoAddress);
559 return;
561 do {
562 rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0);
563 } while (CheckReturnCode("WWB", DevNode, &retry, rc) != 0);
566 static void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
568 u64 BarOffset;
569 u64 dsa;
570 int retry = 0;
571 u64 rc;
572 struct device_node *DevNode =
573 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
575 if (DevNode == NULL) {
576 static unsigned long last_jiffies;
577 static int num_printed;
579 if ((jiffies - last_jiffies) > 60 * HZ) {
580 last_jiffies = jiffies;
581 num_printed = 0;
583 if (num_printed++ < 10)
584 printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress);
585 return;
587 do {
588 rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0);
589 } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0);
592 static void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
594 u64 BarOffset;
595 u64 dsa;
596 int retry = 0;
597 u64 rc;
598 struct device_node *DevNode =
599 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
601 if (DevNode == NULL) {
602 static unsigned long last_jiffies;
603 static int num_printed;
605 if ((jiffies - last_jiffies) > 60 * HZ) {
606 last_jiffies = jiffies;
607 num_printed = 0;
609 if (num_printed++ < 10)
610 printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress);
611 return;
613 do {
614 rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0);
615 } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0);
618 extern unsigned char __raw_readb(const volatile void __iomem *addr)
620 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
622 return *(volatile unsigned char __force *)addr;
624 EXPORT_SYMBOL(__raw_readb);
626 extern unsigned short __raw_readw(const volatile void __iomem *addr)
628 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
630 return *(volatile unsigned short __force *)addr;
632 EXPORT_SYMBOL(__raw_readw);
634 extern unsigned int __raw_readl(const volatile void __iomem *addr)
636 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
638 return *(volatile unsigned int __force *)addr;
640 EXPORT_SYMBOL(__raw_readl);
642 extern unsigned long __raw_readq(const volatile void __iomem *addr)
644 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
646 return *(volatile unsigned long __force *)addr;
648 EXPORT_SYMBOL(__raw_readq);
650 extern void __raw_writeb(unsigned char v, volatile void __iomem *addr)
652 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
654 *(volatile unsigned char __force *)addr = v;
656 EXPORT_SYMBOL(__raw_writeb);
658 extern void __raw_writew(unsigned short v, volatile void __iomem *addr)
660 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
662 *(volatile unsigned short __force *)addr = v;
664 EXPORT_SYMBOL(__raw_writew);
666 extern void __raw_writel(unsigned int v, volatile void __iomem *addr)
668 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
670 *(volatile unsigned int __force *)addr = v;
672 EXPORT_SYMBOL(__raw_writel);
674 extern void __raw_writeq(unsigned long v, volatile void __iomem *addr)
676 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
678 *(volatile unsigned long __force *)addr = v;
680 EXPORT_SYMBOL(__raw_writeq);
682 int in_8(const volatile unsigned char __iomem *addr)
684 if (firmware_has_feature(FW_FEATURE_ISERIES))
685 return iSeries_Read_Byte(addr);
686 return __in_8(addr);
688 EXPORT_SYMBOL(in_8);
690 void out_8(volatile unsigned char __iomem *addr, int val)
692 if (firmware_has_feature(FW_FEATURE_ISERIES))
693 iSeries_Write_Byte(val, addr);
694 else
695 __out_8(addr, val);
697 EXPORT_SYMBOL(out_8);
699 int in_le16(const volatile unsigned short __iomem *addr)
701 if (firmware_has_feature(FW_FEATURE_ISERIES))
702 return iSeries_Read_Word(addr);
703 return __in_le16(addr);
705 EXPORT_SYMBOL(in_le16);
707 int in_be16(const volatile unsigned short __iomem *addr)
709 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
711 return __in_be16(addr);
713 EXPORT_SYMBOL(in_be16);
715 void out_le16(volatile unsigned short __iomem *addr, int val)
717 if (firmware_has_feature(FW_FEATURE_ISERIES))
718 iSeries_Write_Word(val, addr);
719 else
720 __out_le16(addr, val);
722 EXPORT_SYMBOL(out_le16);
724 void out_be16(volatile unsigned short __iomem *addr, int val)
726 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
728 __out_be16(addr, val);
730 EXPORT_SYMBOL(out_be16);
732 unsigned in_le32(const volatile unsigned __iomem *addr)
734 if (firmware_has_feature(FW_FEATURE_ISERIES))
735 return iSeries_Read_Long(addr);
736 return __in_le32(addr);
738 EXPORT_SYMBOL(in_le32);
740 unsigned in_be32(const volatile unsigned __iomem *addr)
742 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
744 return __in_be32(addr);
746 EXPORT_SYMBOL(in_be32);
748 void out_le32(volatile unsigned __iomem *addr, int val)
750 if (firmware_has_feature(FW_FEATURE_ISERIES))
751 iSeries_Write_Long(val, addr);
752 else
753 __out_le32(addr, val);
755 EXPORT_SYMBOL(out_le32);
757 void out_be32(volatile unsigned __iomem *addr, int val)
759 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
761 __out_be32(addr, val);
763 EXPORT_SYMBOL(out_be32);
765 unsigned long in_le64(const volatile unsigned long __iomem *addr)
767 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
769 return __in_le64(addr);
771 EXPORT_SYMBOL(in_le64);
773 unsigned long in_be64(const volatile unsigned long __iomem *addr)
775 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
777 return __in_be64(addr);
779 EXPORT_SYMBOL(in_be64);
781 void out_le64(volatile unsigned long __iomem *addr, unsigned long val)
783 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
785 __out_le64(addr, val);
787 EXPORT_SYMBOL(out_le64);
789 void out_be64(volatile unsigned long __iomem *addr, unsigned long val)
791 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
793 __out_be64(addr, val);
795 EXPORT_SYMBOL(out_be64);
797 void memset_io(volatile void __iomem *addr, int c, unsigned long n)
799 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
800 volatile char __iomem *d = addr;
802 while (n-- > 0) {
803 iSeries_Write_Byte(c, d++);
805 } else
806 eeh_memset_io(addr, c, n);
808 EXPORT_SYMBOL(memset_io);
810 void memcpy_fromio(void *dest, const volatile void __iomem *src,
811 unsigned long n)
813 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
814 char *d = dest;
815 const volatile char __iomem *s = src;
817 while (n-- > 0) {
818 *d++ = iSeries_Read_Byte(s++);
820 } else
821 eeh_memcpy_fromio(dest, src, n);
823 EXPORT_SYMBOL(memcpy_fromio);
825 void memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
827 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
828 const char *s = src;
829 volatile char __iomem *d = dest;
831 while (n-- > 0) {
832 iSeries_Write_Byte(*s++, d++);
834 } else
835 eeh_memcpy_toio(dest, src, n);
837 EXPORT_SYMBOL(memcpy_toio);