[PATCH] zoneid: fix up calculations for ZONEID_PGSHIFT
[linux-2.6/mini2440.git] / lib / iomap.c
blob4990c736bc4b6864df839b45b85dcfa8b3d361e2
1 /*
2 * Implement the default iomap interfaces
4 * (C) Copyright 2004 Linus Torvalds
5 */
6 #include <linux/pci.h>
7 #include <linux/io.h>
9 #ifdef CONFIG_GENERIC_IOMAP
10 #include <linux/module.h>
13 * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
14 * access or a MMIO access, these functions don't care. The info is
15 * encoded in the hardware mapping set up by the mapping functions
16 * (or the cookie itself, depending on implementation and hw).
18 * The generic routines don't assume any hardware mappings, and just
19 * encode the PIO/MMIO as part of the cookie. They coldly assume that
20 * the MMIO IO mappings are not in the low address range.
22 * Architectures for which this is not true can't use this generic
23 * implementation and should do their own copy.
26 #ifndef HAVE_ARCH_PIO_SIZE
28 * We encode the physical PIO addresses (0-0xffff) into the
29 * pointer by offsetting them with a constant (0x10000) and
30 * assuming that all the low addresses are always PIO. That means
31 * we can do some sanity checks on the low bits, and don't
32 * need to just take things for granted.
34 #define PIO_OFFSET 0x10000UL
35 #define PIO_MASK 0x0ffffUL
36 #define PIO_RESERVED 0x40000UL
37 #endif
40 * Ugly macros are a way of life.
42 #define VERIFY_PIO(port) BUG_ON((port & ~PIO_MASK) != PIO_OFFSET)
44 #define IO_COND(addr, is_pio, is_mmio) do { \
45 unsigned long port = (unsigned long __force)addr; \
46 if (port < PIO_RESERVED) { \
47 VERIFY_PIO(port); \
48 port &= PIO_MASK; \
49 is_pio; \
50 } else { \
51 is_mmio; \
52 } \
53 } while (0)
55 #ifndef pio_read16be
56 #define pio_read16be(port) swab16(inw(port))
57 #define pio_read32be(port) swab32(inl(port))
58 #endif
60 #ifndef mmio_read16be
61 #define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr))
62 #define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr))
63 #endif
65 unsigned int fastcall ioread8(void __iomem *addr)
67 IO_COND(addr, return inb(port), return readb(addr));
69 unsigned int fastcall ioread16(void __iomem *addr)
71 IO_COND(addr, return inw(port), return readw(addr));
73 unsigned int fastcall ioread16be(void __iomem *addr)
75 IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
77 unsigned int fastcall ioread32(void __iomem *addr)
79 IO_COND(addr, return inl(port), return readl(addr));
81 unsigned int fastcall ioread32be(void __iomem *addr)
83 IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
85 EXPORT_SYMBOL(ioread8);
86 EXPORT_SYMBOL(ioread16);
87 EXPORT_SYMBOL(ioread16be);
88 EXPORT_SYMBOL(ioread32);
89 EXPORT_SYMBOL(ioread32be);
91 #ifndef pio_write16be
92 #define pio_write16be(val,port) outw(swab16(val),port)
93 #define pio_write32be(val,port) outl(swab32(val),port)
94 #endif
96 #ifndef mmio_write16be
97 #define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port)
98 #define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port)
99 #endif
101 void fastcall iowrite8(u8 val, void __iomem *addr)
103 IO_COND(addr, outb(val,port), writeb(val, addr));
105 void fastcall iowrite16(u16 val, void __iomem *addr)
107 IO_COND(addr, outw(val,port), writew(val, addr));
109 void fastcall iowrite16be(u16 val, void __iomem *addr)
111 IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr));
113 void fastcall iowrite32(u32 val, void __iomem *addr)
115 IO_COND(addr, outl(val,port), writel(val, addr));
117 void fastcall iowrite32be(u32 val, void __iomem *addr)
119 IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr));
121 EXPORT_SYMBOL(iowrite8);
122 EXPORT_SYMBOL(iowrite16);
123 EXPORT_SYMBOL(iowrite16be);
124 EXPORT_SYMBOL(iowrite32);
125 EXPORT_SYMBOL(iowrite32be);
128 * These are the "repeat MMIO read/write" functions.
129 * Note the "__raw" accesses, since we don't want to
130 * convert to CPU byte order. We write in "IO byte
131 * order" (we also don't have IO barriers).
133 #ifndef mmio_insb
134 static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
136 while (--count >= 0) {
137 u8 data = __raw_readb(addr);
138 *dst = data;
139 dst++;
142 static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
144 while (--count >= 0) {
145 u16 data = __raw_readw(addr);
146 *dst = data;
147 dst++;
150 static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
152 while (--count >= 0) {
153 u32 data = __raw_readl(addr);
154 *dst = data;
155 dst++;
158 #endif
160 #ifndef mmio_outsb
161 static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
163 while (--count >= 0) {
164 __raw_writeb(*src, addr);
165 src++;
168 static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
170 while (--count >= 0) {
171 __raw_writew(*src, addr);
172 src++;
175 static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
177 while (--count >= 0) {
178 __raw_writel(*src, addr);
179 src++;
182 #endif
184 void fastcall ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
186 IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
188 void fastcall ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
190 IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
192 void fastcall ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
194 IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
196 EXPORT_SYMBOL(ioread8_rep);
197 EXPORT_SYMBOL(ioread16_rep);
198 EXPORT_SYMBOL(ioread32_rep);
200 void fastcall iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
202 IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count));
204 void fastcall iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
206 IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count));
208 void fastcall iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
210 IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count));
212 EXPORT_SYMBOL(iowrite8_rep);
213 EXPORT_SYMBOL(iowrite16_rep);
214 EXPORT_SYMBOL(iowrite32_rep);
216 /* Create a virtual mapping cookie for an IO port range */
217 void __iomem *ioport_map(unsigned long port, unsigned int nr)
219 if (port > PIO_MASK)
220 return NULL;
221 return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
224 void ioport_unmap(void __iomem *addr)
226 /* Nothing to do */
228 EXPORT_SYMBOL(ioport_map);
229 EXPORT_SYMBOL(ioport_unmap);
231 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
232 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
234 unsigned long start = pci_resource_start(dev, bar);
235 unsigned long len = pci_resource_len(dev, bar);
236 unsigned long flags = pci_resource_flags(dev, bar);
238 if (!len || !start)
239 return NULL;
240 if (maxlen && len > maxlen)
241 len = maxlen;
242 if (flags & IORESOURCE_IO)
243 return ioport_map(start, len);
244 if (flags & IORESOURCE_MEM) {
245 if (flags & IORESOURCE_CACHEABLE)
246 return ioremap(start, len);
247 return ioremap_nocache(start, len);
249 /* What? */
250 return NULL;
253 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
255 IO_COND(addr, /* nothing */, iounmap(addr));
257 EXPORT_SYMBOL(pci_iomap);
258 EXPORT_SYMBOL(pci_iounmap);
260 #endif /* CONFIG_GENERIC_IOMAP */
263 * Generic iomap devres
265 static void devm_ioport_map_release(struct device *dev, void *res)
267 ioport_unmap(*(void __iomem **)res);
270 static int devm_ioport_map_match(struct device *dev, void *res,
271 void *match_data)
273 return *(void **)res == match_data;
277 * devm_ioport_map - Managed ioport_map()
278 * @dev: Generic device to map ioport for
279 * @port: Port to map
280 * @nr: Number of ports to map
282 * Managed ioport_map(). Map is automatically unmapped on driver
283 * detach.
285 void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
286 unsigned int nr)
288 void __iomem **ptr, *addr;
290 ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
291 if (!ptr)
292 return NULL;
294 addr = ioport_map(port, nr);
295 if (addr) {
296 *ptr = addr;
297 devres_add(dev, ptr);
298 } else
299 devres_free(ptr);
301 return addr;
303 EXPORT_SYMBOL(devm_ioport_map);
306 * devm_ioport_unmap - Managed ioport_unmap()
307 * @dev: Generic device to unmap for
308 * @addr: Address to unmap
310 * Managed ioport_unmap(). @addr must have been mapped using
311 * devm_ioport_map().
313 void devm_ioport_unmap(struct device *dev, void __iomem *addr)
315 ioport_unmap(addr);
316 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
317 devm_ioport_map_match, (void *)addr));
319 EXPORT_SYMBOL(devm_ioport_unmap);
321 static void devm_ioremap_release(struct device *dev, void *res)
323 iounmap(*(void __iomem **)res);
326 static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
328 return *(void **)res == match_data;
332 * devm_ioremap - Managed ioremap()
333 * @dev: Generic device to remap IO address for
334 * @offset: BUS offset to map
335 * @size: Size of map
337 * Managed ioremap(). Map is automatically unmapped on driver detach.
339 void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
340 unsigned long size)
342 void __iomem **ptr, *addr;
344 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
345 if (!ptr)
346 return NULL;
348 addr = ioremap(offset, size);
349 if (addr) {
350 *ptr = addr;
351 devres_add(dev, ptr);
352 } else
353 devres_free(ptr);
355 return addr;
357 EXPORT_SYMBOL(devm_ioremap);
360 * devm_ioremap_nocache - Managed ioremap_nocache()
361 * @dev: Generic device to remap IO address for
362 * @offset: BUS offset to map
363 * @size: Size of map
365 * Managed ioremap_nocache(). Map is automatically unmapped on driver
366 * detach.
368 void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
369 unsigned long size)
371 void __iomem **ptr, *addr;
373 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
374 if (!ptr)
375 return NULL;
377 addr = ioremap_nocache(offset, size);
378 if (addr) {
379 *ptr = addr;
380 devres_add(dev, ptr);
381 } else
382 devres_free(ptr);
384 return addr;
386 EXPORT_SYMBOL(devm_ioremap_nocache);
389 * devm_iounmap - Managed iounmap()
390 * @dev: Generic device to unmap for
391 * @addr: Address to unmap
393 * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
395 void devm_iounmap(struct device *dev, void __iomem *addr)
397 iounmap(addr);
398 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
399 (void *)addr));
401 EXPORT_SYMBOL(devm_iounmap);
404 * PCI iomap devres
406 #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
408 struct pcim_iomap_devres {
409 void __iomem *table[PCIM_IOMAP_MAX];
412 static void pcim_iomap_release(struct device *gendev, void *res)
414 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
415 struct pcim_iomap_devres *this = res;
416 int i;
418 for (i = 0; i < PCIM_IOMAP_MAX; i++)
419 if (this->table[i])
420 pci_iounmap(dev, this->table[i]);
424 * pcim_iomap_table - access iomap allocation table
425 * @pdev: PCI device to access iomap table for
427 * Access iomap allocation table for @dev. If iomap table doesn't
428 * exist and @pdev is managed, it will be allocated. All iomaps
429 * recorded in the iomap table are automatically unmapped on driver
430 * detach.
432 * This function might sleep when the table is first allocated but can
433 * be safely called without context and guaranteed to succed once
434 * allocated.
436 void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
438 struct pcim_iomap_devres *dr, *new_dr;
440 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
441 if (dr)
442 return dr->table;
444 new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
445 if (!new_dr)
446 return NULL;
447 dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
448 return dr->table;
450 EXPORT_SYMBOL(pcim_iomap_table);
453 * pcim_iomap - Managed pcim_iomap()
454 * @pdev: PCI device to iomap for
455 * @bar: BAR to iomap
456 * @maxlen: Maximum length of iomap
458 * Managed pci_iomap(). Map is automatically unmapped on driver
459 * detach.
461 void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
463 void __iomem **tbl;
465 BUG_ON(bar >= PCIM_IOMAP_MAX);
467 tbl = (void __iomem **)pcim_iomap_table(pdev);
468 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
469 return NULL;
471 tbl[bar] = pci_iomap(pdev, bar, maxlen);
472 return tbl[bar];
474 EXPORT_SYMBOL(pcim_iomap);
477 * pcim_iounmap - Managed pci_iounmap()
478 * @pdev: PCI device to iounmap for
479 * @addr: Address to unmap
481 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
483 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
485 void __iomem **tbl;
486 int i;
488 pci_iounmap(pdev, addr);
490 tbl = (void __iomem **)pcim_iomap_table(pdev);
491 BUG_ON(!tbl);
493 for (i = 0; i < PCIM_IOMAP_MAX; i++)
494 if (tbl[i] == addr) {
495 tbl[i] = NULL;
496 return;
498 WARN_ON(1);
500 EXPORT_SYMBOL(pcim_iounmap);
503 * pcim_iomap_regions - Request and iomap PCI BARs
504 * @pdev: PCI device to map IO resources for
505 * @mask: Mask of BARs to request and iomap
506 * @name: Name used when requesting regions
508 * Request and iomap regions specified by @mask.
510 int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
512 void __iomem * const *iomap;
513 int i, rc;
515 iomap = pcim_iomap_table(pdev);
516 if (!iomap)
517 return -ENOMEM;
519 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
520 unsigned long len;
522 if (!(mask & (1 << i)))
523 continue;
525 rc = -EINVAL;
526 len = pci_resource_len(pdev, i);
527 if (!len)
528 goto err_inval;
530 rc = pci_request_region(pdev, i, name);
531 if (rc)
532 goto err_region;
534 rc = -ENOMEM;
535 if (!pcim_iomap(pdev, i, 0))
536 goto err_iomap;
539 return 0;
541 err_iomap:
542 pcim_iounmap(pdev, iomap[i]);
543 err_region:
544 pci_release_region(pdev, i);
545 err_inval:
546 while (--i >= 0) {
547 pcim_iounmap(pdev, iomap[i]);
548 pci_release_region(pdev, i);
551 return rc;
553 EXPORT_SYMBOL(pcim_iomap_regions);