2 * Implement the default iomap interfaces
4 * (C) Copyright 2004 Linus Torvalds
9 #include <linux/module.h>
12 * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
13 * access or a MMIO access, these functions don't care. The info is
14 * encoded in the hardware mapping set up by the mapping functions
15 * (or the cookie itself, depending on implementation and hw).
17 * The generic routines don't assume any hardware mappings, and just
18 * encode the PIO/MMIO as part of the cookie. They coldly assume that
19 * the MMIO IO mappings are not in the low address range.
21 * Architectures for which this is not true can't use this generic
22 * implementation and should do their own copy.
25 #ifndef HAVE_ARCH_PIO_SIZE
27 * We encode the physical PIO addresses (0-0xffff) into the
28 * pointer by offsetting them with a constant (0x10000) and
29 * assuming that all the low addresses are always PIO. That means
30 * we can do some sanity checks on the low bits, and don't
31 * need to just take things for granted.
33 #define PIO_OFFSET 0x10000UL
34 #define PIO_MASK 0x0ffffUL
35 #define PIO_RESERVED 0x40000UL
38 static void bad_io_access(unsigned long port
, const char *access
)
40 static int count
= 10;
43 printk(KERN_ERR
"Bad IO access at port %lx (%s)\n", port
, access
);
49 * Ugly macros are a way of life.
51 #define IO_COND(addr, is_pio, is_mmio) do { \
52 unsigned long port = (unsigned long __force)addr; \
53 if (port >= PIO_RESERVED) { \
55 } else if (port > PIO_OFFSET) { \
59 bad_io_access(port, #is_pio ); \
63 #define pio_read16be(port) swab16(inw(port))
64 #define pio_read32be(port) swab32(inl(port))
68 #define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr))
69 #define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr))
72 unsigned int fastcall
ioread8(void __iomem
*addr
)
74 IO_COND(addr
, return inb(port
), return readb(addr
));
77 unsigned int fastcall
ioread16(void __iomem
*addr
)
79 IO_COND(addr
, return inw(port
), return readw(addr
));
82 unsigned int fastcall
ioread16be(void __iomem
*addr
)
84 IO_COND(addr
, return pio_read16be(port
), return mmio_read16be(addr
));
87 unsigned int fastcall
ioread32(void __iomem
*addr
)
89 IO_COND(addr
, return inl(port
), return readl(addr
));
92 unsigned int fastcall
ioread32be(void __iomem
*addr
)
94 IO_COND(addr
, return pio_read32be(port
), return mmio_read32be(addr
));
97 EXPORT_SYMBOL(ioread8
);
98 EXPORT_SYMBOL(ioread16
);
99 EXPORT_SYMBOL(ioread16be
);
100 EXPORT_SYMBOL(ioread32
);
101 EXPORT_SYMBOL(ioread32be
);
103 #ifndef pio_write16be
104 #define pio_write16be(val,port) outw(swab16(val),port)
105 #define pio_write32be(val,port) outl(swab32(val),port)
108 #ifndef mmio_write16be
109 #define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port)
110 #define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port)
113 void fastcall
iowrite8(u8 val
, void __iomem
*addr
)
115 IO_COND(addr
, outb(val
,port
), writeb(val
, addr
));
117 void fastcall
iowrite16(u16 val
, void __iomem
*addr
)
119 IO_COND(addr
, outw(val
,port
), writew(val
, addr
));
121 void fastcall
iowrite16be(u16 val
, void __iomem
*addr
)
123 IO_COND(addr
, pio_write16be(val
,port
), mmio_write16be(val
, addr
));
125 void fastcall
iowrite32(u32 val
, void __iomem
*addr
)
127 IO_COND(addr
, outl(val
,port
), writel(val
, addr
));
129 void fastcall
iowrite32be(u32 val
, void __iomem
*addr
)
131 IO_COND(addr
, pio_write32be(val
,port
), mmio_write32be(val
, addr
));
133 EXPORT_SYMBOL(iowrite8
);
134 EXPORT_SYMBOL(iowrite16
);
135 EXPORT_SYMBOL(iowrite16be
);
136 EXPORT_SYMBOL(iowrite32
);
137 EXPORT_SYMBOL(iowrite32be
);
140 * These are the "repeat MMIO read/write" functions.
141 * Note the "__raw" accesses, since we don't want to
142 * convert to CPU byte order. We write in "IO byte
143 * order" (we also don't have IO barriers).
146 static inline void mmio_insb(void __iomem
*addr
, u8
*dst
, int count
)
148 while (--count
>= 0) {
149 u8 data
= __raw_readb(addr
);
154 static inline void mmio_insw(void __iomem
*addr
, u16
*dst
, int count
)
156 while (--count
>= 0) {
157 u16 data
= __raw_readw(addr
);
162 static inline void mmio_insl(void __iomem
*addr
, u32
*dst
, int count
)
164 while (--count
>= 0) {
165 u32 data
= __raw_readl(addr
);
173 static inline void mmio_outsb(void __iomem
*addr
, const u8
*src
, int count
)
175 while (--count
>= 0) {
176 __raw_writeb(*src
, addr
);
180 static inline void mmio_outsw(void __iomem
*addr
, const u16
*src
, int count
)
182 while (--count
>= 0) {
183 __raw_writew(*src
, addr
);
187 static inline void mmio_outsl(void __iomem
*addr
, const u32
*src
, int count
)
189 while (--count
>= 0) {
190 __raw_writel(*src
, addr
);
196 void fastcall
ioread8_rep(void __iomem
*addr
, void *dst
, unsigned long count
)
198 IO_COND(addr
, insb(port
,dst
,count
), mmio_insb(addr
, dst
, count
));
200 void fastcall
ioread16_rep(void __iomem
*addr
, void *dst
, unsigned long count
)
202 IO_COND(addr
, insw(port
,dst
,count
), mmio_insw(addr
, dst
, count
));
204 void fastcall
ioread32_rep(void __iomem
*addr
, void *dst
, unsigned long count
)
206 IO_COND(addr
, insl(port
,dst
,count
), mmio_insl(addr
, dst
, count
));
208 EXPORT_SYMBOL(ioread8_rep
);
209 EXPORT_SYMBOL(ioread16_rep
);
210 EXPORT_SYMBOL(ioread32_rep
);
212 void fastcall
iowrite8_rep(void __iomem
*addr
, const void *src
, unsigned long count
)
214 IO_COND(addr
, outsb(port
, src
, count
), mmio_outsb(addr
, src
, count
));
216 void fastcall
iowrite16_rep(void __iomem
*addr
, const void *src
, unsigned long count
)
218 IO_COND(addr
, outsw(port
, src
, count
), mmio_outsw(addr
, src
, count
));
220 void fastcall
iowrite32_rep(void __iomem
*addr
, const void *src
, unsigned long count
)
222 IO_COND(addr
, outsl(port
, src
,count
), mmio_outsl(addr
, src
, count
));
224 EXPORT_SYMBOL(iowrite8_rep
);
225 EXPORT_SYMBOL(iowrite16_rep
);
226 EXPORT_SYMBOL(iowrite32_rep
);
228 /* Create a virtual mapping cookie for an IO port range */
229 void __iomem
*ioport_map(unsigned long port
, unsigned int nr
)
233 return (void __iomem
*) (unsigned long) (port
+ PIO_OFFSET
);
236 void ioport_unmap(void __iomem
*addr
)
240 EXPORT_SYMBOL(ioport_map
);
241 EXPORT_SYMBOL(ioport_unmap
);
243 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
244 void __iomem
*pci_iomap(struct pci_dev
*dev
, int bar
, unsigned long maxlen
)
246 unsigned long start
= pci_resource_start(dev
, bar
);
247 unsigned long len
= pci_resource_len(dev
, bar
);
248 unsigned long flags
= pci_resource_flags(dev
, bar
);
252 if (maxlen
&& len
> maxlen
)
254 if (flags
& IORESOURCE_IO
)
255 return ioport_map(start
, len
);
256 if (flags
& IORESOURCE_MEM
) {
257 if (flags
& IORESOURCE_CACHEABLE
)
258 return ioremap(start
, len
);
259 return ioremap_nocache(start
, len
);
265 void pci_iounmap(struct pci_dev
*dev
, void __iomem
* addr
)
267 IO_COND(addr
, /* nothing */, iounmap(addr
));
269 EXPORT_SYMBOL(pci_iomap
);
270 EXPORT_SYMBOL(pci_iounmap
);