2 * Implement the default iomap interfaces
4 * (C) Copyright 2004 Linus Torvalds
9 #ifdef CONFIG_GENERIC_IOMAP
10 #include <linux/module.h>
13 * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
14 * access or a MMIO access, these functions don't care. The info is
15 * encoded in the hardware mapping set up by the mapping functions
16 * (or the cookie itself, depending on implementation and hw).
18 * The generic routines don't assume any hardware mappings, and just
19 * encode the PIO/MMIO as part of the cookie. They coldly assume that
20 * the MMIO IO mappings are not in the low address range.
22 * Architectures for which this is not true can't use this generic
23 * implementation and should do their own copy.
26 #ifndef HAVE_ARCH_PIO_SIZE
28 * We encode the physical PIO addresses (0-0xffff) into the
29 * pointer by offsetting them with a constant (0x10000) and
30 * assuming that all the low addresses are always PIO. That means
31 * we can do some sanity checks on the low bits, and don't
32 * need to just take things for granted.
34 #define PIO_OFFSET 0x10000UL
35 #define PIO_MASK 0x0ffffUL
36 #define PIO_RESERVED 0x40000UL
40 * Ugly macros are a way of life.
42 #define VERIFY_PIO(port) BUG_ON((port & ~PIO_MASK) != PIO_OFFSET)
44 #define IO_COND(addr, is_pio, is_mmio) do { \
45 unsigned long port = (unsigned long __force)addr; \
46 if (port < PIO_RESERVED) { \
56 #define pio_read16be(port) swab16(inw(port))
57 #define pio_read32be(port) swab32(inl(port))
61 #define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr))
62 #define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr))
65 unsigned int fastcall
ioread8(void __iomem
*addr
)
67 IO_COND(addr
, return inb(port
), return readb(addr
));
69 unsigned int fastcall
ioread16(void __iomem
*addr
)
71 IO_COND(addr
, return inw(port
), return readw(addr
));
73 unsigned int fastcall
ioread16be(void __iomem
*addr
)
75 IO_COND(addr
, return pio_read16be(port
), return mmio_read16be(addr
));
77 unsigned int fastcall
ioread32(void __iomem
*addr
)
79 IO_COND(addr
, return inl(port
), return readl(addr
));
81 unsigned int fastcall
ioread32be(void __iomem
*addr
)
83 IO_COND(addr
, return pio_read32be(port
), return mmio_read32be(addr
));
85 EXPORT_SYMBOL(ioread8
);
86 EXPORT_SYMBOL(ioread16
);
87 EXPORT_SYMBOL(ioread16be
);
88 EXPORT_SYMBOL(ioread32
);
89 EXPORT_SYMBOL(ioread32be
);
92 #define pio_write16be(val,port) outw(swab16(val),port)
93 #define pio_write32be(val,port) outl(swab32(val),port)
96 #ifndef mmio_write16be
97 #define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port)
98 #define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port)
101 void fastcall
iowrite8(u8 val
, void __iomem
*addr
)
103 IO_COND(addr
, outb(val
,port
), writeb(val
, addr
));
105 void fastcall
iowrite16(u16 val
, void __iomem
*addr
)
107 IO_COND(addr
, outw(val
,port
), writew(val
, addr
));
109 void fastcall
iowrite16be(u16 val
, void __iomem
*addr
)
111 IO_COND(addr
, pio_write16be(val
,port
), mmio_write16be(val
, addr
));
113 void fastcall
iowrite32(u32 val
, void __iomem
*addr
)
115 IO_COND(addr
, outl(val
,port
), writel(val
, addr
));
117 void fastcall
iowrite32be(u32 val
, void __iomem
*addr
)
119 IO_COND(addr
, pio_write32be(val
,port
), mmio_write32be(val
, addr
));
121 EXPORT_SYMBOL(iowrite8
);
122 EXPORT_SYMBOL(iowrite16
);
123 EXPORT_SYMBOL(iowrite16be
);
124 EXPORT_SYMBOL(iowrite32
);
125 EXPORT_SYMBOL(iowrite32be
);
128 * These are the "repeat MMIO read/write" functions.
129 * Note the "__raw" accesses, since we don't want to
130 * convert to CPU byte order. We write in "IO byte
131 * order" (we also don't have IO barriers).
134 static inline void mmio_insb(void __iomem
*addr
, u8
*dst
, int count
)
136 while (--count
>= 0) {
137 u8 data
= __raw_readb(addr
);
142 static inline void mmio_insw(void __iomem
*addr
, u16
*dst
, int count
)
144 while (--count
>= 0) {
145 u16 data
= __raw_readw(addr
);
150 static inline void mmio_insl(void __iomem
*addr
, u32
*dst
, int count
)
152 while (--count
>= 0) {
153 u32 data
= __raw_readl(addr
);
161 static inline void mmio_outsb(void __iomem
*addr
, const u8
*src
, int count
)
163 while (--count
>= 0) {
164 __raw_writeb(*src
, addr
);
168 static inline void mmio_outsw(void __iomem
*addr
, const u16
*src
, int count
)
170 while (--count
>= 0) {
171 __raw_writew(*src
, addr
);
175 static inline void mmio_outsl(void __iomem
*addr
, const u32
*src
, int count
)
177 while (--count
>= 0) {
178 __raw_writel(*src
, addr
);
184 void fastcall
ioread8_rep(void __iomem
*addr
, void *dst
, unsigned long count
)
186 IO_COND(addr
, insb(port
,dst
,count
), mmio_insb(addr
, dst
, count
));
188 void fastcall
ioread16_rep(void __iomem
*addr
, void *dst
, unsigned long count
)
190 IO_COND(addr
, insw(port
,dst
,count
), mmio_insw(addr
, dst
, count
));
192 void fastcall
ioread32_rep(void __iomem
*addr
, void *dst
, unsigned long count
)
194 IO_COND(addr
, insl(port
,dst
,count
), mmio_insl(addr
, dst
, count
));
196 EXPORT_SYMBOL(ioread8_rep
);
197 EXPORT_SYMBOL(ioread16_rep
);
198 EXPORT_SYMBOL(ioread32_rep
);
200 void fastcall
iowrite8_rep(void __iomem
*addr
, const void *src
, unsigned long count
)
202 IO_COND(addr
, outsb(port
, src
, count
), mmio_outsb(addr
, src
, count
));
204 void fastcall
iowrite16_rep(void __iomem
*addr
, const void *src
, unsigned long count
)
206 IO_COND(addr
, outsw(port
, src
, count
), mmio_outsw(addr
, src
, count
));
208 void fastcall
iowrite32_rep(void __iomem
*addr
, const void *src
, unsigned long count
)
210 IO_COND(addr
, outsl(port
, src
,count
), mmio_outsl(addr
, src
, count
));
212 EXPORT_SYMBOL(iowrite8_rep
);
213 EXPORT_SYMBOL(iowrite16_rep
);
214 EXPORT_SYMBOL(iowrite32_rep
);
216 /* Create a virtual mapping cookie for an IO port range */
217 void __iomem
*ioport_map(unsigned long port
, unsigned int nr
)
221 return (void __iomem
*) (unsigned long) (port
+ PIO_OFFSET
);
224 void ioport_unmap(void __iomem
*addr
)
228 EXPORT_SYMBOL(ioport_map
);
229 EXPORT_SYMBOL(ioport_unmap
);
231 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
232 void __iomem
*pci_iomap(struct pci_dev
*dev
, int bar
, unsigned long maxlen
)
234 unsigned long start
= pci_resource_start(dev
, bar
);
235 unsigned long len
= pci_resource_len(dev
, bar
);
236 unsigned long flags
= pci_resource_flags(dev
, bar
);
240 if (maxlen
&& len
> maxlen
)
242 if (flags
& IORESOURCE_IO
)
243 return ioport_map(start
, len
);
244 if (flags
& IORESOURCE_MEM
) {
245 if (flags
& IORESOURCE_CACHEABLE
)
246 return ioremap(start
, len
);
247 return ioremap_nocache(start
, len
);
253 void pci_iounmap(struct pci_dev
*dev
, void __iomem
* addr
)
255 IO_COND(addr
, /* nothing */, iounmap(addr
));
257 EXPORT_SYMBOL(pci_iomap
);
258 EXPORT_SYMBOL(pci_iounmap
);
260 #endif /* CONFIG_GENERIC_IOMAP */
263 * Generic iomap devres
265 static void devm_ioport_map_release(struct device
*dev
, void *res
)
267 ioport_unmap(*(void __iomem
**)res
);
270 static int devm_ioport_map_match(struct device
*dev
, void *res
,
273 return *(void **)res
== match_data
;
277 * devm_ioport_map - Managed ioport_map()
278 * @dev: Generic device to map ioport for
280 * @nr: Number of ports to map
282 * Managed ioport_map(). Map is automatically unmapped on driver
285 void __iomem
* devm_ioport_map(struct device
*dev
, unsigned long port
,
288 void __iomem
**ptr
, *addr
;
290 ptr
= devres_alloc(devm_ioport_map_release
, sizeof(*ptr
), GFP_KERNEL
);
294 addr
= ioport_map(port
, nr
);
297 devres_add(dev
, ptr
);
303 EXPORT_SYMBOL(devm_ioport_map
);
306 * devm_ioport_unmap - Managed ioport_unmap()
307 * @dev: Generic device to unmap for
308 * @addr: Address to unmap
310 * Managed ioport_unmap(). @addr must have been mapped using
313 void devm_ioport_unmap(struct device
*dev
, void __iomem
*addr
)
316 WARN_ON(devres_destroy(dev
, devm_ioport_map_release
,
317 devm_ioport_map_match
, (void *)addr
));
319 EXPORT_SYMBOL(devm_ioport_unmap
);
321 static void devm_ioremap_release(struct device
*dev
, void *res
)
323 iounmap(*(void __iomem
**)res
);
326 static int devm_ioremap_match(struct device
*dev
, void *res
, void *match_data
)
328 return *(void **)res
== match_data
;
332 * devm_ioremap - Managed ioremap()
333 * @dev: Generic device to remap IO address for
334 * @offset: BUS offset to map
337 * Managed ioremap(). Map is automatically unmapped on driver detach.
339 void __iomem
*devm_ioremap(struct device
*dev
, unsigned long offset
,
342 void __iomem
**ptr
, *addr
;
344 ptr
= devres_alloc(devm_ioremap_release
, sizeof(*ptr
), GFP_KERNEL
);
348 addr
= ioremap(offset
, size
);
351 devres_add(dev
, ptr
);
357 EXPORT_SYMBOL(devm_ioremap
);
360 * devm_ioremap_nocache - Managed ioremap_nocache()
361 * @dev: Generic device to remap IO address for
362 * @offset: BUS offset to map
365 * Managed ioremap_nocache(). Map is automatically unmapped on driver
368 void __iomem
*devm_ioremap_nocache(struct device
*dev
, unsigned long offset
,
371 void __iomem
**ptr
, *addr
;
373 ptr
= devres_alloc(devm_ioremap_release
, sizeof(*ptr
), GFP_KERNEL
);
377 addr
= ioremap_nocache(offset
, size
);
380 devres_add(dev
, ptr
);
386 EXPORT_SYMBOL(devm_ioremap_nocache
);
389 * devm_iounmap - Managed iounmap()
390 * @dev: Generic device to unmap for
391 * @addr: Address to unmap
393 * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
395 void devm_iounmap(struct device
*dev
, void __iomem
*addr
)
398 WARN_ON(devres_destroy(dev
, devm_ioremap_release
, devm_ioremap_match
,
401 EXPORT_SYMBOL(devm_iounmap
);
406 #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
408 struct pcim_iomap_devres
{
409 void __iomem
*table
[PCIM_IOMAP_MAX
];
412 static void pcim_iomap_release(struct device
*gendev
, void *res
)
414 struct pci_dev
*dev
= container_of(gendev
, struct pci_dev
, dev
);
415 struct pcim_iomap_devres
*this = res
;
418 for (i
= 0; i
< PCIM_IOMAP_MAX
; i
++)
420 pci_iounmap(dev
, this->table
[i
]);
424 * pcim_iomap_table - access iomap allocation table
425 * @pdev: PCI device to access iomap table for
427 * Access iomap allocation table for @dev. If iomap table doesn't
428 * exist and @pdev is managed, it will be allocated. All iomaps
429 * recorded in the iomap table are automatically unmapped on driver
432 * This function might sleep when the table is first allocated but can
433 * be safely called without context and guaranteed to succed once
436 void __iomem
* const * pcim_iomap_table(struct pci_dev
*pdev
)
438 struct pcim_iomap_devres
*dr
, *new_dr
;
440 dr
= devres_find(&pdev
->dev
, pcim_iomap_release
, NULL
, NULL
);
444 new_dr
= devres_alloc(pcim_iomap_release
, sizeof(*new_dr
), GFP_KERNEL
);
447 dr
= devres_get(&pdev
->dev
, new_dr
, NULL
, NULL
);
450 EXPORT_SYMBOL(pcim_iomap_table
);
453 * pcim_iomap - Managed pcim_iomap()
454 * @pdev: PCI device to iomap for
456 * @maxlen: Maximum length of iomap
458 * Managed pci_iomap(). Map is automatically unmapped on driver
461 void __iomem
* pcim_iomap(struct pci_dev
*pdev
, int bar
, unsigned long maxlen
)
465 BUG_ON(bar
>= PCIM_IOMAP_MAX
);
467 tbl
= (void __iomem
**)pcim_iomap_table(pdev
);
468 if (!tbl
|| tbl
[bar
]) /* duplicate mappings not allowed */
471 tbl
[bar
] = pci_iomap(pdev
, bar
, maxlen
);
474 EXPORT_SYMBOL(pcim_iomap
);
477 * pcim_iounmap - Managed pci_iounmap()
478 * @pdev: PCI device to iounmap for
479 * @addr: Address to unmap
481 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
483 void pcim_iounmap(struct pci_dev
*pdev
, void __iomem
*addr
)
488 pci_iounmap(pdev
, addr
);
490 tbl
= (void __iomem
**)pcim_iomap_table(pdev
);
493 for (i
= 0; i
< PCIM_IOMAP_MAX
; i
++)
494 if (tbl
[i
] == addr
) {
500 EXPORT_SYMBOL(pcim_iounmap
);
503 * pcim_iomap_regions - Request and iomap PCI BARs
504 * @pdev: PCI device to map IO resources for
505 * @mask: Mask of BARs to request and iomap
506 * @name: Name used when requesting regions
508 * Request and iomap regions specified by @mask.
510 int pcim_iomap_regions(struct pci_dev
*pdev
, u16 mask
, const char *name
)
512 void __iomem
* const *iomap
;
515 iomap
= pcim_iomap_table(pdev
);
519 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
522 if (!(mask
& (1 << i
)))
526 len
= pci_resource_len(pdev
, i
);
530 rc
= pci_request_region(pdev
, i
, name
);
535 if (!pcim_iomap(pdev
, i
, 0))
542 pcim_iounmap(pdev
, iomap
[i
]);
544 pci_release_region(pdev
, i
);
547 pcim_iounmap(pdev
, iomap
[i
]);
548 pci_release_region(pdev
, i
);
553 EXPORT_SYMBOL(pcim_iomap_regions
);