x86, cpu: Call verify_cpu during 32bit CPU startup
[linux-2.6.git] / arch / arm / mm / highmem.c
blobc435fd9e1da95c9fdc9d7fab83b3a42caef1b905
1 /*
2 * arch/arm/mm/highmem.c -- ARM highmem support
4 * Author: Nicolas Pitre
5 * Created: september 8, 2008
6 * Copyright: Marvell Semiconductors Inc.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/interrupt.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include "mm.h"
21 void *kmap(struct page *page)
23 might_sleep();
24 if (!PageHighMem(page))
25 return page_address(page);
26 return kmap_high(page);
28 EXPORT_SYMBOL(kmap);
30 void kunmap(struct page *page)
32 BUG_ON(in_interrupt());
33 if (!PageHighMem(page))
34 return;
35 kunmap_high(page);
37 EXPORT_SYMBOL(kunmap);
39 void *__kmap_atomic(struct page *page)
41 unsigned int idx;
42 unsigned long vaddr;
43 void *kmap;
44 int type;
46 pagefault_disable();
47 if (!PageHighMem(page))
48 return page_address(page);
50 #ifdef CONFIG_DEBUG_HIGHMEM
52 * There is no cache coherency issue when non VIVT, so force the
53 * dedicated kmap usage for better debugging purposes in that case.
55 if (!cache_is_vivt())
56 kmap = NULL;
57 else
58 #endif
59 kmap = kmap_high_get(page);
60 if (kmap)
61 return kmap;
63 type = kmap_atomic_idx_push();
65 idx = type + KM_TYPE_NR * smp_processor_id();
66 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
67 #ifdef CONFIG_DEBUG_HIGHMEM
69 * With debugging enabled, kunmap_atomic forces that entry to 0.
70 * Make sure it was indeed properly unmapped.
72 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
73 #endif
74 set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
76 * When debugging is off, kunmap_atomic leaves the previous mapping
77 * in place, so this TLB flush ensures the TLB is updated with the
78 * new mapping.
80 local_flush_tlb_kernel_page(vaddr);
82 return (void *)vaddr;
84 EXPORT_SYMBOL(__kmap_atomic);
86 void __kunmap_atomic(void *kvaddr)
88 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
89 int idx, type;
91 if (kvaddr >= (void *)FIXADDR_START) {
92 type = kmap_atomic_idx();
93 idx = type + KM_TYPE_NR * smp_processor_id();
95 if (cache_is_vivt())
96 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
97 #ifdef CONFIG_DEBUG_HIGHMEM
98 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
99 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
100 local_flush_tlb_kernel_page(vaddr);
101 #else
102 (void) idx; /* to kill a warning */
103 #endif
104 kmap_atomic_idx_pop();
105 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
106 /* this address was obtained through kmap_high_get() */
107 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
109 pagefault_enable();
111 EXPORT_SYMBOL(__kunmap_atomic);
113 void *kmap_atomic_pfn(unsigned long pfn)
115 unsigned long vaddr;
116 int idx, type;
118 pagefault_disable();
120 type = kmap_atomic_idx_push();
121 idx = type + KM_TYPE_NR * smp_processor_id();
122 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
123 #ifdef CONFIG_DEBUG_HIGHMEM
124 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
125 #endif
126 set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
127 local_flush_tlb_kernel_page(vaddr);
129 return (void *)vaddr;
132 struct page *kmap_atomic_to_page(const void *ptr)
134 unsigned long vaddr = (unsigned long)ptr;
135 pte_t *pte;
137 if (vaddr < FIXADDR_START)
138 return virt_to_page(ptr);
140 pte = TOP_PTE(vaddr);
141 return pte_page(*pte);
144 #ifdef CONFIG_CPU_CACHE_VIPT
146 #include <linux/percpu.h>
149 * The VIVT cache of a highmem page is always flushed before the page
150 * is unmapped. Hence unmapped highmem pages need no cache maintenance
151 * in that case.
153 * However unmapped pages may still be cached with a VIPT cache, and
154 * it is not possible to perform cache maintenance on them using physical
155 * addresses unfortunately. So we have no choice but to set up a temporary
156 * virtual mapping for that purpose.
158 * Yet this VIPT cache maintenance may be triggered from DMA support
159 * functions which are possibly called from interrupt context. As we don't
160 * want to keep interrupt disabled all the time when such maintenance is
161 * taking place, we therefore allow for some reentrancy by preserving and
162 * restoring the previous fixmap entry before the interrupted context is
163 * resumed. If the reentrancy depth is 0 then there is no need to restore
164 * the previous fixmap, and leaving the current one in place allow it to
165 * be reused the next time without a TLB flush (common with DMA).
168 static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
170 void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
172 unsigned int idx, cpu;
173 int *depth;
174 unsigned long vaddr, flags;
175 pte_t pte, *ptep;
177 if (!in_interrupt())
178 preempt_disable();
180 cpu = smp_processor_id();
181 depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
183 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
184 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
185 ptep = TOP_PTE(vaddr);
186 pte = mk_pte(page, kmap_prot);
188 raw_local_irq_save(flags);
189 (*depth)++;
190 if (pte_val(*ptep) == pte_val(pte)) {
191 *saved_pte = pte;
192 } else {
193 *saved_pte = *ptep;
194 set_pte_ext(ptep, pte, 0);
195 local_flush_tlb_kernel_page(vaddr);
197 raw_local_irq_restore(flags);
199 return (void *)vaddr;
202 void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
204 unsigned int idx, cpu = smp_processor_id();
205 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
206 unsigned long vaddr, flags;
207 pte_t pte, *ptep;
209 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
210 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
211 ptep = TOP_PTE(vaddr);
212 pte = mk_pte(page, kmap_prot);
214 BUG_ON(pte_val(*ptep) != pte_val(pte));
215 BUG_ON(*depth <= 0);
217 raw_local_irq_save(flags);
218 (*depth)--;
219 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
220 set_pte_ext(ptep, saved_pte, 0);
221 local_flush_tlb_kernel_page(vaddr);
223 raw_local_irq_restore(flags);
225 if (!in_interrupt())
226 preempt_enable();
229 #endif /* CONFIG_CPU_CACHE_VIPT */