2 * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
3 * Bjorn Helgaas <bjorn.helgaas@hp.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/compiler.h>
11 #include <linux/module.h>
12 #include <linux/efi.h>
14 #include <linux/vmalloc.h>
16 #include <asm/meminit.h>
18 static inline void __iomem
*
19 __ioremap (unsigned long phys_addr
)
21 return (void __iomem
*) (__IA64_UNCACHED_OFFSET
| phys_addr
);
25 ioremap (unsigned long phys_addr
, unsigned long size
)
28 struct vm_struct
*area
;
32 unsigned long gran_base
, gran_size
;
33 unsigned long page_base
;
36 * For things in kern_memmap, we must use the same attribute
37 * as the rest of the kernel. For more details, see
38 * Documentation/ia64/aliasing.txt.
40 attr
= kern_mem_attribute(phys_addr
, size
);
41 if (attr
& EFI_MEMORY_WB
)
42 return (void __iomem
*) phys_to_virt(phys_addr
);
43 else if (attr
& EFI_MEMORY_UC
)
44 return __ioremap(phys_addr
);
47 * Some chipsets don't support UC access to memory. If
48 * WB is supported for the whole granule, we prefer that.
50 gran_base
= GRANULEROUNDDOWN(phys_addr
);
51 gran_size
= GRANULEROUNDUP(phys_addr
+ size
) - gran_base
;
52 if (efi_mem_attribute(gran_base
, gran_size
) & EFI_MEMORY_WB
)
53 return (void __iomem
*) phys_to_virt(phys_addr
);
56 * WB is not supported for the whole granule, so we can't use
57 * the region 7 identity mapping. If we can safely cover the
58 * area with kernel page table mappings, we can use those
61 page_base
= phys_addr
& PAGE_MASK
;
62 size
= PAGE_ALIGN(phys_addr
+ size
) - page_base
;
63 if (efi_mem_attribute(page_base
, size
) & EFI_MEMORY_WB
) {
67 * Mappings have to be page-aligned
69 offset
= phys_addr
& ~PAGE_MASK
;
70 phys_addr
&= PAGE_MASK
;
75 area
= get_vm_area(size
, VM_IOREMAP
);
79 area
->phys_addr
= phys_addr
;
80 addr
= (void __iomem
*) area
->addr
;
81 if (ioremap_page_range((unsigned long) addr
,
82 (unsigned long) addr
+ size
, phys_addr
, prot
)) {
83 vunmap((void __force
*) addr
);
87 return (void __iomem
*) (offset
+ (char __iomem
*)addr
);
90 return __ioremap(phys_addr
);
92 EXPORT_SYMBOL(ioremap
);
95 ioremap_nocache (unsigned long phys_addr
, unsigned long size
)
97 if (kern_mem_attribute(phys_addr
, size
) & EFI_MEMORY_WB
)
100 return __ioremap(phys_addr
);
102 EXPORT_SYMBOL(ioremap_nocache
);
105 iounmap (volatile void __iomem
*addr
)
107 if (REGION_NUMBER(addr
) == RGN_GATE
)
108 vunmap((void *) ((unsigned long) addr
& PAGE_MASK
));
110 EXPORT_SYMBOL(iounmap
);