K2.6 patches and update.
[tomato.git] / release / src-rt / linux / linux-2.6 / arch / mips / mm / highmem.c
blob2feb5b7a631c8839d3da2f6a232b033754736d90
1 #include <linux/module.h>
2 #include <linux/highmem.h>
3 #include <asm/tlbflush.h>
5 void *__kmap(struct page *page)
7 void *addr;
9 might_sleep();
10 if (!PageHighMem(page))
11 return page_address(page);
12 addr = kmap_high(page);
13 flush_tlb_one((unsigned long)addr);
15 return addr;
18 void __kunmap(struct page *page)
20 if (in_interrupt())
21 BUG();
22 if (!PageHighMem(page))
23 return;
24 kunmap_high(page);
28 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
29 * no global lock is needed and because the kmap code must perform a global TLB
30 * invalidation when the kmap pool wraps.
32 * However when holding an atomic kmap is is not legal to sleep, so atomic
33 * kmaps are appropriate for short, tight code paths only.
37 * need an array per cpu, and each array has to be cache aligned
39 struct kmap_map {
40 struct page *page;
41 void *vaddr;
42 unsigned long pfn;
45 struct {
46 struct kmap_map map[KM_TYPE_NR];
47 } ____cacheline_aligned_in_smp kmap_atomic_maps[NR_CPUS];
51 void *
52 kmap_atomic_page_address(struct page *page)
54 int i;
56 for (i = 0; i < KM_TYPE_NR; i++)
57 if (kmap_atomic_maps[smp_processor_id()].map[i].page == page)
58 return(kmap_atomic_maps[smp_processor_id()].map[i].vaddr);
60 return((struct page *)0);
63 void *__kmap_atomic(struct page *page, enum km_type type)
65 unsigned int idx;
66 unsigned long vaddr;
67 unsigned long pfn;
69 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
70 pagefault_disable();
71 if (!PageHighMem(page))
72 return page_address(page);
74 idx = type + KM_TYPE_NR*smp_processor_id();
75 pfn = page_to_pfn(page);
76 vaddr = fix_to_virt_noalias(VALIAS_IDX(FIX_KMAP_BEGIN + idx), pfn);
77 #ifdef CONFIG_DEBUG_HIGHMEM
78 if (!pte_none(*(kmap_pte-(virt_to_fix(vaddr) - VALIAS_IDX(FIX_KMAP_BEGIN)))))
79 BUG();
80 #endif
81 /* Vaddr could have been adjusted to avoid virt aliasing,
82 * recalculate the idx from vaddr.
84 set_pte(kmap_pte-(virt_to_fix(vaddr) - VALIAS_IDX(FIX_KMAP_BEGIN)), \
85 mk_pte(page, kmap_prot));
86 local_flush_tlb_one((unsigned long)vaddr);
88 kmap_atomic_maps[smp_processor_id()].map[type].page = page;
89 kmap_atomic_maps[smp_processor_id()].map[type].vaddr = (void *)vaddr;
90 kmap_atomic_maps[smp_processor_id()].map[type].pfn = pfn;
92 return (void*) vaddr;
95 void __kunmap_atomic(void *kvaddr, enum km_type type)
97 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
98 unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
99 unsigned long pfn = kmap_atomic_maps[smp_processor_id()].map[type].pfn;
101 if (vaddr < FIXADDR_START) { // FIXME
102 pagefault_enable();
103 return;
106 if (vaddr != fix_to_virt_noalias(VALIAS_IDX(FIX_KMAP_BEGIN + idx), pfn))
107 BUG();
110 * Protect against multiple unmaps
111 * Can't cache flush an unmapped page.
113 if ( kmap_atomic_maps[smp_processor_id()].map[type].vaddr ) {
114 kmap_atomic_maps[smp_processor_id()].map[type].page = (struct page *)0;
115 kmap_atomic_maps[smp_processor_id()].map[type].vaddr = (void *) 0;
116 kmap_atomic_maps[smp_processor_id()].map[type].pfn = 0;
118 flush_data_cache_page((unsigned long)vaddr);
121 #ifdef CONFIG_DEBUG_HIGHMEM
123 * force other mappings to Oops if they'll try to access
124 * this pte without first remap it
126 pte_clear(&init_mm, vaddr, kmap_pte-(virt_to_fix(vaddr) - VALIAS_IDX(FIX_KMAP_BEGIN)));
127 local_flush_tlb_one(vaddr);
128 #endif
130 pagefault_enable();
134 * This is the same as kmap_atomic() but can map memory that doesn't
135 * have a struct page associated with it.
137 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
139 unsigned int idx;
140 unsigned long vaddr;
142 pagefault_disable();
144 idx = type + KM_TYPE_NR*smp_processor_id();
145 vaddr = fix_to_virt_noalias(VALIAS_IDX(FIX_KMAP_BEGIN + idx), pfn);
146 /* Vaddr could have been adjusted to avoid virt aliasing,
147 * recalculate the idx from vaddr.
149 set_pte(kmap_pte-(virt_to_fix(vaddr) - VALIAS_IDX(FIX_KMAP_BEGIN)), \
150 pfn_pte(pfn, kmap_prot));
152 kmap_atomic_maps[smp_processor_id()].map[type].page = (struct page *)0;
153 kmap_atomic_maps[smp_processor_id()].map[type].vaddr = (void *) vaddr;
154 kmap_atomic_maps[smp_processor_id()].map[type].pfn = pfn;
156 flush_tlb_one(vaddr);
158 return (void*) vaddr;
161 struct page *__kmap_atomic_to_page(void *ptr)
163 unsigned long vaddr = (unsigned long)ptr;
164 pte_t *pte;
166 if (vaddr < FIXADDR_START)
167 return virt_to_page(ptr);
169 pte = kmap_pte - (virt_to_fix(vaddr) - VALIAS_IDX(FIX_KMAP_BEGIN));
170 return pte_page(*pte);
173 EXPORT_SYMBOL(__kmap);
174 EXPORT_SYMBOL(__kunmap);
175 EXPORT_SYMBOL(__kmap_atomic);
176 EXPORT_SYMBOL(__kunmap_atomic);
177 EXPORT_SYMBOL(__kmap_atomic_to_page);