Merge git://git.infradead.org/mtd-2.6
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / mips / mm / highmem.c
blob2b1309b2580a79e09a4be60d315317842f1a9295
1 #include <linux/module.h>
2 #include <linux/highmem.h>
3 #include <asm/fixmap.h>
4 #include <asm/tlbflush.h>
6 static pte_t *kmap_pte;
8 unsigned long highstart_pfn, highend_pfn;
10 void *__kmap(struct page *page)
12 void *addr;
14 might_sleep();
15 if (!PageHighMem(page))
16 return page_address(page);
17 addr = kmap_high(page);
18 flush_tlb_one((unsigned long)addr);
20 return addr;
22 EXPORT_SYMBOL(__kmap);
24 void __kunmap(struct page *page)
26 BUG_ON(in_interrupt());
27 if (!PageHighMem(page))
28 return;
29 kunmap_high(page);
31 EXPORT_SYMBOL(__kunmap);
34 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
35 * no global lock is needed and because the kmap code must perform a global TLB
36 * invalidation when the kmap pool wraps.
38 * However when holding an atomic kmap is is not legal to sleep, so atomic
39 * kmaps are appropriate for short, tight code paths only.
42 void *__kmap_atomic(struct page *page, enum km_type type)
44 enum fixed_addresses idx;
45 unsigned long vaddr;
47 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
48 pagefault_disable();
49 if (!PageHighMem(page))
50 return page_address(page);
52 debug_kmap_atomic(type);
53 idx = type + KM_TYPE_NR*smp_processor_id();
54 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
55 #ifdef CONFIG_DEBUG_HIGHMEM
56 BUG_ON(!pte_none(*(kmap_pte - idx)));
57 #endif
58 set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
59 local_flush_tlb_one((unsigned long)vaddr);
61 return (void*) vaddr;
63 EXPORT_SYMBOL(__kmap_atomic);
65 void __kunmap_atomic(void *kvaddr, enum km_type type)
67 #ifdef CONFIG_DEBUG_HIGHMEM
68 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
69 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
71 if (vaddr < FIXADDR_START) { // FIXME
72 pagefault_enable();
73 return;
76 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
79 * force other mappings to Oops if they'll try to access
80 * this pte without first remap it
82 pte_clear(&init_mm, vaddr, kmap_pte-idx);
83 local_flush_tlb_one(vaddr);
84 #endif
86 pagefault_enable();
88 EXPORT_SYMBOL(__kunmap_atomic);
91 * This is the same as kmap_atomic() but can map memory that doesn't
92 * have a struct page associated with it.
94 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
96 enum fixed_addresses idx;
97 unsigned long vaddr;
99 pagefault_disable();
101 debug_kmap_atomic(type);
102 idx = type + KM_TYPE_NR*smp_processor_id();
103 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
104 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
105 flush_tlb_one(vaddr);
107 return (void*) vaddr;
110 struct page *__kmap_atomic_to_page(void *ptr)
112 unsigned long idx, vaddr = (unsigned long)ptr;
113 pte_t *pte;
115 if (vaddr < FIXADDR_START)
116 return virt_to_page(ptr);
118 idx = virt_to_fix(vaddr);
119 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
120 return pte_page(*pte);
123 void __init kmap_init(void)
125 unsigned long kmap_vstart;
127 /* cache the first kmap pte */
128 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
129 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);