1 // SPDX-License-Identifier: GPL-2.0
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
7 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/vmalloc.h>
11 #include <linux/sched.h>
13 #include <linux/export.h>
14 #include <asm/cacheflush.h>
15 #include <asm/pgtable.h>
17 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
18 static int __read_mostly ioremap_p4d_capable
;
19 static int __read_mostly ioremap_pud_capable
;
20 static int __read_mostly ioremap_pmd_capable
;
21 static int __read_mostly ioremap_huge_disabled
;
23 static int __init
set_nohugeiomap(char *str
)
25 ioremap_huge_disabled
= 1;
28 early_param("nohugeiomap", set_nohugeiomap
);
30 void __init
ioremap_huge_init(void)
32 if (!ioremap_huge_disabled
) {
33 if (arch_ioremap_pud_supported())
34 ioremap_pud_capable
= 1;
35 if (arch_ioremap_pmd_supported())
36 ioremap_pmd_capable
= 1;
40 static inline int ioremap_p4d_enabled(void)
42 return ioremap_p4d_capable
;
45 static inline int ioremap_pud_enabled(void)
47 return ioremap_pud_capable
;
50 static inline int ioremap_pmd_enabled(void)
52 return ioremap_pmd_capable
;
55 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
56 static inline int ioremap_p4d_enabled(void) { return 0; }
57 static inline int ioremap_pud_enabled(void) { return 0; }
58 static inline int ioremap_pmd_enabled(void) { return 0; }
59 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
61 static int ioremap_pte_range(pmd_t
*pmd
, unsigned long addr
,
62 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
)
67 pfn
= phys_addr
>> PAGE_SHIFT
;
68 pte
= pte_alloc_kernel(pmd
, addr
);
72 BUG_ON(!pte_none(*pte
));
73 set_pte_at(&init_mm
, addr
, pte
, pfn_pte(pfn
, prot
));
75 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
79 static inline int ioremap_pmd_range(pud_t
*pud
, unsigned long addr
,
80 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
)
86 pmd
= pmd_alloc(&init_mm
, pud
, addr
);
90 next
= pmd_addr_end(addr
, end
);
92 if (ioremap_pmd_enabled() &&
93 ((next
- addr
) == PMD_SIZE
) &&
94 IS_ALIGNED(phys_addr
+ addr
, PMD_SIZE
) &&
95 pmd_free_pte_page(pmd
, addr
)) {
96 if (pmd_set_huge(pmd
, phys_addr
+ addr
, prot
))
100 if (ioremap_pte_range(pmd
, addr
, next
, phys_addr
+ addr
, prot
))
102 } while (pmd
++, addr
= next
, addr
!= end
);
106 static inline int ioremap_pud_range(p4d_t
*p4d
, unsigned long addr
,
107 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
)
113 pud
= pud_alloc(&init_mm
, p4d
, addr
);
117 next
= pud_addr_end(addr
, end
);
119 if (ioremap_pud_enabled() &&
120 ((next
- addr
) == PUD_SIZE
) &&
121 IS_ALIGNED(phys_addr
+ addr
, PUD_SIZE
) &&
122 pud_free_pmd_page(pud
, addr
)) {
123 if (pud_set_huge(pud
, phys_addr
+ addr
, prot
))
127 if (ioremap_pmd_range(pud
, addr
, next
, phys_addr
+ addr
, prot
))
129 } while (pud
++, addr
= next
, addr
!= end
);
133 static inline int ioremap_p4d_range(pgd_t
*pgd
, unsigned long addr
,
134 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
)
140 p4d
= p4d_alloc(&init_mm
, pgd
, addr
);
144 next
= p4d_addr_end(addr
, end
);
146 if (ioremap_p4d_enabled() &&
147 ((next
- addr
) == P4D_SIZE
) &&
148 IS_ALIGNED(phys_addr
+ addr
, P4D_SIZE
)) {
149 if (p4d_set_huge(p4d
, phys_addr
+ addr
, prot
))
153 if (ioremap_pud_range(p4d
, addr
, next
, phys_addr
+ addr
, prot
))
155 } while (p4d
++, addr
= next
, addr
!= end
);
159 int ioremap_page_range(unsigned long addr
,
160 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
)
172 pgd
= pgd_offset_k(addr
);
174 next
= pgd_addr_end(addr
, end
);
175 err
= ioremap_p4d_range(pgd
, addr
, next
, phys_addr
+addr
, prot
);
178 } while (pgd
++, addr
= next
, addr
!= end
);
180 flush_cache_vmap(start
, end
);