RT-AC66 3.0.0.4.374.130 core
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / arch / i386 / mm / highmem.c
blobad8d86cc683ee3f6786142c86a95f1fffb7e4f94
1 #include <linux/highmem.h>
2 #include <linux/module.h>
4 void *kmap(struct page *page)
6 might_sleep();
7 if (!PageHighMem(page))
8 return page_address(page);
9 return kmap_high(page);
12 void kunmap(struct page *page)
14 if (in_interrupt())
15 BUG();
16 if (!PageHighMem(page))
17 return;
18 kunmap_high(page);
22 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
23 * no global lock is needed and because the kmap code must perform a global TLB
24 * invalidation when the kmap pool wraps.
26 * However when holding an atomic kmap is is not legal to sleep, so atomic
27 * kmaps are appropriate for short, tight code paths only.
29 void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
31 enum fixed_addresses idx;
32 unsigned long vaddr;
34 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
35 pagefault_disable();
37 idx = type + KM_TYPE_NR*smp_processor_id();
38 BUG_ON(!pte_none(*(kmap_pte-idx)));
40 if (!PageHighMem(page))
41 return page_address(page);
43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
44 set_pte(kmap_pte-idx, mk_pte(page, prot));
45 arch_flush_lazy_mmu_mode();
47 return (void*) vaddr;
50 void *kmap_atomic(struct page *page, enum km_type type)
52 return kmap_atomic_prot(page, type, kmap_prot);
55 void kunmap_atomic(void *kvaddr, enum km_type type)
57 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
58 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
61 * Force other mappings to Oops if they'll try to access this pte
62 * without first remap it. Keeping stale mappings around is a bad idea
63 * also, in case the page changes cacheability attributes or becomes
64 * a protected page in a hypervisor.
66 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
67 kpte_clear_flush(kmap_pte-idx, vaddr);
68 else {
69 #ifdef CONFIG_DEBUG_HIGHMEM
70 BUG_ON(vaddr < PAGE_OFFSET);
71 BUG_ON(vaddr >= (unsigned long)high_memory);
72 #endif
75 arch_flush_lazy_mmu_mode();
76 pagefault_enable();
79 /* This is the same as kmap_atomic() but can map memory that doesn't
80 * have a struct page associated with it.
82 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
84 enum fixed_addresses idx;
85 unsigned long vaddr;
87 pagefault_disable();
89 idx = type + KM_TYPE_NR*smp_processor_id();
90 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
91 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
92 arch_flush_lazy_mmu_mode();
94 return (void*) vaddr;
97 struct page *kmap_atomic_to_page(void *ptr)
99 unsigned long idx, vaddr = (unsigned long)ptr;
100 pte_t *pte;
102 if (vaddr < FIXADDR_START)
103 return virt_to_page(ptr);
105 idx = virt_to_fix(vaddr);
106 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
107 return pte_page(*pte);
110 EXPORT_SYMBOL(kmap);
111 EXPORT_SYMBOL(kunmap);
112 EXPORT_SYMBOL(kmap_atomic);
113 EXPORT_SYMBOL(kunmap_atomic);
114 EXPORT_SYMBOL(kmap_atomic_to_page);