2 * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
3 * Bjorn Helgaas <bjorn.helgaas@hp.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/compiler.h>
11 #include <linux/module.h>
12 #include <linux/efi.h>
14 #include <linux/vmalloc.h>
16 #include <asm/meminit.h>
18 static inline void __iomem
*
19 __ioremap (unsigned long phys_addr
)
21 return (void __iomem
*) (__IA64_UNCACHED_OFFSET
| phys_addr
);
25 early_ioremap (unsigned long phys_addr
, unsigned long size
)
27 return __ioremap(phys_addr
);
31 ioremap (unsigned long phys_addr
, unsigned long size
)
34 struct vm_struct
*area
;
38 unsigned long gran_base
, gran_size
;
39 unsigned long page_base
;
42 * For things in kern_memmap, we must use the same attribute
43 * as the rest of the kernel. For more details, see
44 * Documentation/ia64/aliasing.txt.
46 attr
= kern_mem_attribute(phys_addr
, size
);
47 if (attr
& EFI_MEMORY_WB
)
48 return (void __iomem
*) phys_to_virt(phys_addr
);
49 else if (attr
& EFI_MEMORY_UC
)
50 return __ioremap(phys_addr
);
53 * Some chipsets don't support UC access to memory. If
54 * WB is supported for the whole granule, we prefer that.
56 gran_base
= GRANULEROUNDDOWN(phys_addr
);
57 gran_size
= GRANULEROUNDUP(phys_addr
+ size
) - gran_base
;
58 if (efi_mem_attribute(gran_base
, gran_size
) & EFI_MEMORY_WB
)
59 return (void __iomem
*) phys_to_virt(phys_addr
);
62 * WB is not supported for the whole granule, so we can't use
63 * the region 7 identity mapping. If we can safely cover the
64 * area with kernel page table mappings, we can use those
67 page_base
= phys_addr
& PAGE_MASK
;
68 size
= PAGE_ALIGN(phys_addr
+ size
) - page_base
;
69 if (efi_mem_attribute(page_base
, size
) & EFI_MEMORY_WB
) {
73 * Mappings have to be page-aligned
75 offset
= phys_addr
& ~PAGE_MASK
;
76 phys_addr
&= PAGE_MASK
;
81 area
= get_vm_area(size
, VM_IOREMAP
);
85 area
->phys_addr
= phys_addr
;
86 addr
= (void __iomem
*) area
->addr
;
87 if (ioremap_page_range((unsigned long) addr
,
88 (unsigned long) addr
+ size
, phys_addr
, prot
)) {
89 vunmap((void __force
*) addr
);
93 return (void __iomem
*) (offset
+ (char __iomem
*)addr
);
96 return __ioremap(phys_addr
);
98 EXPORT_SYMBOL(ioremap
);
101 ioremap_nocache (unsigned long phys_addr
, unsigned long size
)
103 if (kern_mem_attribute(phys_addr
, size
) & EFI_MEMORY_WB
)
106 return __ioremap(phys_addr
);
108 EXPORT_SYMBOL(ioremap_nocache
);
111 early_iounmap (volatile void __iomem
*addr
, unsigned long size
)
116 iounmap (volatile void __iomem
*addr
)
118 if (REGION_NUMBER(addr
) == RGN_GATE
)
119 vunmap((void *) ((unsigned long) addr
& PAGE_MASK
));
121 EXPORT_SYMBOL(iounmap
);