1 #include <linux/module.h>
2 #include <linux/highmem.h>
3 #include <asm/tlbflush.h>
5 void *__kmap(struct page
*page
)
10 if (!PageHighMem(page
))
11 return page_address(page
);
12 addr
= kmap_high(page
);
13 flush_tlb_one((unsigned long)addr
);
18 void __kunmap(struct page
*page
)
22 if (!PageHighMem(page
))
28 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
29 * no global lock is needed and because the kmap code must perform a global TLB
30 * invalidation when the kmap pool wraps.
32 * However when holding an atomic kmap is is not legal to sleep, so atomic
33 * kmaps are appropriate for short, tight code paths only.
37 * need an array per cpu, and each array has to be cache aligned
46 struct kmap_map map
[KM_TYPE_NR
];
47 } ____cacheline_aligned_in_smp kmap_atomic_maps
[NR_CPUS
];
52 kmap_atomic_page_address(struct page
*page
)
56 for (i
= 0; i
< KM_TYPE_NR
; i
++)
57 if (kmap_atomic_maps
[smp_processor_id()].map
[i
].page
== page
)
58 return(kmap_atomic_maps
[smp_processor_id()].map
[i
].vaddr
);
60 return((struct page
*)0);
63 void *__kmap_atomic(struct page
*page
, enum km_type type
)
69 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
71 if (!PageHighMem(page
))
72 return page_address(page
);
74 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
75 pfn
= page_to_pfn(page
);
76 vaddr
= fix_to_virt_noalias(VALIAS_IDX(FIX_KMAP_BEGIN
+ idx
), pfn
);
77 #ifdef CONFIG_DEBUG_HIGHMEM
78 if (!pte_none(*(kmap_pte
-(virt_to_fix(vaddr
) - VALIAS_IDX(FIX_KMAP_BEGIN
)))))
81 /* Vaddr could have been adjusted to avoid virt aliasing,
82 * recalculate the idx from vaddr.
84 set_pte(kmap_pte
-(virt_to_fix(vaddr
) - VALIAS_IDX(FIX_KMAP_BEGIN
)), \
85 mk_pte(page
, kmap_prot
));
86 local_flush_tlb_one((unsigned long)vaddr
);
88 kmap_atomic_maps
[smp_processor_id()].map
[type
].page
= page
;
89 kmap_atomic_maps
[smp_processor_id()].map
[type
].vaddr
= (void *)vaddr
;
90 kmap_atomic_maps
[smp_processor_id()].map
[type
].pfn
= pfn
;
95 void __kunmap_atomic(void *kvaddr
, enum km_type type
)
97 unsigned long vaddr
= (unsigned long) kvaddr
& PAGE_MASK
;
98 unsigned int idx
= type
+ KM_TYPE_NR
*smp_processor_id();
99 unsigned long pfn
= kmap_atomic_maps
[smp_processor_id()].map
[type
].pfn
;
101 if (vaddr
< FIXADDR_START
) { // FIXME
106 if (vaddr
!= fix_to_virt_noalias(VALIAS_IDX(FIX_KMAP_BEGIN
+ idx
), pfn
))
110 * Protect against multiple unmaps
111 * Can't cache flush an unmapped page.
113 if ( kmap_atomic_maps
[smp_processor_id()].map
[type
].vaddr
) {
114 kmap_atomic_maps
[smp_processor_id()].map
[type
].page
= (struct page
*)0;
115 kmap_atomic_maps
[smp_processor_id()].map
[type
].vaddr
= (void *) 0;
116 kmap_atomic_maps
[smp_processor_id()].map
[type
].pfn
= 0;
118 flush_data_cache_page((unsigned long)vaddr
);
121 #ifdef CONFIG_DEBUG_HIGHMEM
123 * force other mappings to Oops if they'll try to access
124 * this pte without first remap it
126 pte_clear(&init_mm
, vaddr
, kmap_pte
-(virt_to_fix(vaddr
) - VALIAS_IDX(FIX_KMAP_BEGIN
)));
127 local_flush_tlb_one(vaddr
);
134 * This is the same as kmap_atomic() but can map memory that doesn't
135 * have a struct page associated with it.
137 void *kmap_atomic_pfn(unsigned long pfn
, enum km_type type
)
144 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
145 vaddr
= fix_to_virt_noalias(VALIAS_IDX(FIX_KMAP_BEGIN
+ idx
), pfn
);
146 /* Vaddr could have been adjusted to avoid virt aliasing,
147 * recalculate the idx from vaddr.
149 set_pte(kmap_pte
-(virt_to_fix(vaddr
) - VALIAS_IDX(FIX_KMAP_BEGIN
)), \
150 pfn_pte(pfn
, kmap_prot
));
152 kmap_atomic_maps
[smp_processor_id()].map
[type
].page
= (struct page
*)0;
153 kmap_atomic_maps
[smp_processor_id()].map
[type
].vaddr
= (void *) vaddr
;
154 kmap_atomic_maps
[smp_processor_id()].map
[type
].pfn
= pfn
;
156 flush_tlb_one(vaddr
);
158 return (void*) vaddr
;
161 struct page
*__kmap_atomic_to_page(void *ptr
)
163 unsigned long vaddr
= (unsigned long)ptr
;
166 if (vaddr
< FIXADDR_START
)
167 return virt_to_page(ptr
);
169 pte
= kmap_pte
- (virt_to_fix(vaddr
) - VALIAS_IDX(FIX_KMAP_BEGIN
));
170 return pte_page(*pte
);
173 EXPORT_SYMBOL(__kmap
);
174 EXPORT_SYMBOL(__kunmap
);
175 EXPORT_SYMBOL(__kmap_atomic
);
176 EXPORT_SYMBOL(__kunmap_atomic
);
177 EXPORT_SYMBOL(__kmap_atomic_to_page
);