mm: stack based kmap_atomic()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / mm / highmem_32.c
blobd723e369003cb3658eb60f4e7eed1a94a6a4afda
1 #include <linux/highmem.h>
2 #include <linux/module.h>
3 #include <linux/swap.h> /* for totalram_pages */
5 void *kmap(struct page *page)
7 might_sleep();
8 if (!PageHighMem(page))
9 return page_address(page);
10 return kmap_high(page);
12 EXPORT_SYMBOL(kmap);
14 void kunmap(struct page *page)
16 if (in_interrupt())
17 BUG();
18 if (!PageHighMem(page))
19 return;
20 kunmap_high(page);
22 EXPORT_SYMBOL(kunmap);
25 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
26 * no global lock is needed and because the kmap code must perform a global TLB
27 * invalidation when the kmap pool wraps.
29 * However when holding an atomic kmap it is not legal to sleep, so atomic
30 * kmaps are appropriate for short, tight code paths only.
32 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
34 unsigned long vaddr;
35 int idx, type;
37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
38 pagefault_disable();
40 if (!PageHighMem(page))
41 return page_address(page);
43 type = kmap_atomic_idx_push();
44 idx = type + KM_TYPE_NR*smp_processor_id();
45 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
46 BUG_ON(!pte_none(*(kmap_pte-idx)));
47 set_pte(kmap_pte-idx, mk_pte(page, prot));
49 return (void *)vaddr;
51 EXPORT_SYMBOL(kmap_atomic_prot);
53 void *__kmap_atomic(struct page *page)
55 return kmap_atomic_prot(page, kmap_prot);
57 EXPORT_SYMBOL(__kmap_atomic);
60 * This is the same as kmap_atomic() but can map memory that doesn't
61 * have a struct page associated with it.
63 void *kmap_atomic_pfn(unsigned long pfn)
65 return kmap_atomic_prot_pfn(pfn, kmap_prot);
67 EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
69 void __kunmap_atomic(void *kvaddr)
71 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
73 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
74 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
75 int idx, type;
77 type = kmap_atomic_idx_pop();
78 idx = type + KM_TYPE_NR * smp_processor_id();
80 #ifdef CONFIG_DEBUG_HIGHMEM
81 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
82 #endif
84 * Force other mappings to Oops if they'll try to access this
85 * pte without first remap it. Keeping stale mappings around
86 * is a bad idea also, in case the page changes cacheability
87 * attributes or becomes a protected page in a hypervisor.
89 kpte_clear_flush(kmap_pte-idx, vaddr);
91 #ifdef CONFIG_DEBUG_HIGHMEM
92 else {
93 BUG_ON(vaddr < PAGE_OFFSET);
94 BUG_ON(vaddr >= (unsigned long)high_memory);
96 #endif
98 pagefault_enable();
100 EXPORT_SYMBOL(__kunmap_atomic);
102 struct page *kmap_atomic_to_page(void *ptr)
104 unsigned long idx, vaddr = (unsigned long)ptr;
105 pte_t *pte;
107 if (vaddr < FIXADDR_START)
108 return virt_to_page(ptr);
110 idx = virt_to_fix(vaddr);
111 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
112 return pte_page(*pte);
114 EXPORT_SYMBOL(kmap_atomic_to_page);
116 void __init set_highmem_pages_init(void)
118 struct zone *zone;
119 int nid;
121 for_each_zone(zone) {
122 unsigned long zone_start_pfn, zone_end_pfn;
124 if (!is_highmem(zone))
125 continue;
127 zone_start_pfn = zone->zone_start_pfn;
128 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
130 nid = zone_to_nid(zone);
131 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
132 zone->name, nid, zone_start_pfn, zone_end_pfn);
134 add_highpages_with_active_regions(nid, zone_start_pfn,
135 zone_end_pfn);
137 totalram_pages += totalhigh_pages;