x86: unify kmap_atomic_pfn() and iomap_atomic_prot_pfn()
[linux-2.6/mini2440.git] / arch / x86 / mm / highmem_32.c
blobae4c8dae2669d4e9d18cb11414ecd925f7572c1b
1 #include <linux/highmem.h>
2 #include <linux/module.h>
3 #include <linux/swap.h> /* for totalram_pages */
5 void *kmap(struct page *page)
7 might_sleep();
8 if (!PageHighMem(page))
9 return page_address(page);
10 return kmap_high(page);
13 void kunmap(struct page *page)
15 if (in_interrupt())
16 BUG();
17 if (!PageHighMem(page))
18 return;
19 kunmap_high(page);
22 static void debug_kmap_atomic_prot(enum km_type type)
24 #ifdef CONFIG_DEBUG_HIGHMEM
25 static unsigned warn_count = 10;
27 if (unlikely(warn_count == 0))
28 return;
30 if (unlikely(in_interrupt())) {
31 if (in_irq()) {
32 if (type != KM_IRQ0 && type != KM_IRQ1 &&
33 type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
34 type != KM_BOUNCE_READ) {
35 WARN_ON(1);
36 warn_count--;
38 } else if (!irqs_disabled()) { /* softirq */
39 if (type != KM_IRQ0 && type != KM_IRQ1 &&
40 type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
41 type != KM_SKB_SUNRPC_DATA &&
42 type != KM_SKB_DATA_SOFTIRQ &&
43 type != KM_BOUNCE_READ) {
44 WARN_ON(1);
45 warn_count--;
50 if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
51 type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
52 if (!irqs_disabled()) {
53 WARN_ON(1);
54 warn_count--;
56 } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
57 if (irq_count() == 0 && !irqs_disabled()) {
58 WARN_ON(1);
59 warn_count--;
62 #endif
66 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
67 * no global lock is needed and because the kmap code must perform a global TLB
68 * invalidation when the kmap pool wraps.
70 * However when holding an atomic kmap is is not legal to sleep, so atomic
71 * kmaps are appropriate for short, tight code paths only.
73 void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
75 enum fixed_addresses idx;
76 unsigned long vaddr;
78 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
79 pagefault_disable();
81 if (!PageHighMem(page))
82 return page_address(page);
84 debug_kmap_atomic_prot(type);
86 idx = type + KM_TYPE_NR*smp_processor_id();
87 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
88 BUG_ON(!pte_none(*(kmap_pte-idx)));
89 set_pte(kmap_pte-idx, mk_pte(page, prot));
90 arch_flush_lazy_mmu_mode();
92 return (void *)vaddr;
95 void *kmap_atomic(struct page *page, enum km_type type)
97 return kmap_atomic_prot(page, type, kmap_prot);
100 void kunmap_atomic(void *kvaddr, enum km_type type)
102 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
103 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
106 * Force other mappings to Oops if they'll try to access this pte
107 * without first remap it. Keeping stale mappings around is a bad idea
108 * also, in case the page changes cacheability attributes or becomes
109 * a protected page in a hypervisor.
111 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
112 kpte_clear_flush(kmap_pte-idx, vaddr);
113 else {
114 #ifdef CONFIG_DEBUG_HIGHMEM
115 BUG_ON(vaddr < PAGE_OFFSET);
116 BUG_ON(vaddr >= (unsigned long)high_memory);
117 #endif
120 arch_flush_lazy_mmu_mode();
121 pagefault_enable();
124 void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
126 enum fixed_addresses idx;
127 unsigned long vaddr;
129 pagefault_disable();
131 idx = type + KM_TYPE_NR * smp_processor_id();
132 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
133 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
134 arch_flush_lazy_mmu_mode();
136 return (void*) vaddr;
139 /* This is the same as kmap_atomic() but can map memory that doesn't
140 * have a struct page associated with it.
142 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
144 return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
146 EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
148 struct page *kmap_atomic_to_page(void *ptr)
150 unsigned long idx, vaddr = (unsigned long)ptr;
151 pte_t *pte;
153 if (vaddr < FIXADDR_START)
154 return virt_to_page(ptr);
156 idx = virt_to_fix(vaddr);
157 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
158 return pte_page(*pte);
161 EXPORT_SYMBOL(kmap);
162 EXPORT_SYMBOL(kunmap);
163 EXPORT_SYMBOL(kmap_atomic);
164 EXPORT_SYMBOL(kunmap_atomic);
166 void __init set_highmem_pages_init(void)
168 struct zone *zone;
169 int nid;
171 for_each_zone(zone) {
172 unsigned long zone_start_pfn, zone_end_pfn;
174 if (!is_highmem(zone))
175 continue;
177 zone_start_pfn = zone->zone_start_pfn;
178 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
180 nid = zone_to_nid(zone);
181 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
182 zone->name, nid, zone_start_pfn, zone_end_pfn);
184 add_highpages_with_active_regions(nid, zone_start_pfn,
185 zone_end_pfn);
187 totalram_pages += totalhigh_pages;