x86: set_highmem_pages_init() cleanup, fix !CONFIG_NUMA && CONFIG_HIGHMEM=y
[linux-2.6/mini2440.git] / arch / x86 / mm / highmem_32.c
blob00f127c80b0e38fa50eec531e8e85c8d1f18fd6d
1 #include <linux/highmem.h>
2 #include <linux/module.h>
3 #include <linux/swap.h> /* for totalram_pages */
5 void *kmap(struct page *page)
7 might_sleep();
8 if (!PageHighMem(page))
9 return page_address(page);
10 return kmap_high(page);
13 void kunmap(struct page *page)
15 if (in_interrupt())
16 BUG();
17 if (!PageHighMem(page))
18 return;
19 kunmap_high(page);
22 static void debug_kmap_atomic_prot(enum km_type type)
24 #ifdef CONFIG_DEBUG_HIGHMEM
25 static unsigned warn_count = 10;
27 if (unlikely(warn_count == 0))
28 return;
30 if (unlikely(in_interrupt())) {
31 if (in_irq()) {
32 if (type != KM_IRQ0 && type != KM_IRQ1 &&
33 type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
34 type != KM_BOUNCE_READ) {
35 WARN_ON(1);
36 warn_count--;
38 } else if (!irqs_disabled()) { /* softirq */
39 if (type != KM_IRQ0 && type != KM_IRQ1 &&
40 type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
41 type != KM_SKB_SUNRPC_DATA &&
42 type != KM_SKB_DATA_SOFTIRQ &&
43 type != KM_BOUNCE_READ) {
44 WARN_ON(1);
45 warn_count--;
50 if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
51 type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
52 if (!irqs_disabled()) {
53 WARN_ON(1);
54 warn_count--;
56 } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
57 if (irq_count() == 0 && !irqs_disabled()) {
58 WARN_ON(1);
59 warn_count--;
62 #endif
66 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
67 * no global lock is needed and because the kmap code must perform a global TLB
68 * invalidation when the kmap pool wraps.
70 * However when holding an atomic kmap is is not legal to sleep, so atomic
71 * kmaps are appropriate for short, tight code paths only.
73 void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
75 enum fixed_addresses idx;
76 unsigned long vaddr;
78 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
79 pagefault_disable();
81 if (!PageHighMem(page))
82 return page_address(page);
84 debug_kmap_atomic_prot(type);
86 idx = type + KM_TYPE_NR*smp_processor_id();
87 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
88 BUG_ON(!pte_none(*(kmap_pte-idx)));
89 set_pte(kmap_pte-idx, mk_pte(page, prot));
90 arch_flush_lazy_mmu_mode();
92 return (void *)vaddr;
95 void *kmap_atomic(struct page *page, enum km_type type)
97 return kmap_atomic_prot(page, type, kmap_prot);
100 void kunmap_atomic(void *kvaddr, enum km_type type)
102 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
103 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
106 * Force other mappings to Oops if they'll try to access this pte
107 * without first remap it. Keeping stale mappings around is a bad idea
108 * also, in case the page changes cacheability attributes or becomes
109 * a protected page in a hypervisor.
111 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
112 kpte_clear_flush(kmap_pte-idx, vaddr);
113 else {
114 #ifdef CONFIG_DEBUG_HIGHMEM
115 BUG_ON(vaddr < PAGE_OFFSET);
116 BUG_ON(vaddr >= (unsigned long)high_memory);
117 #endif
120 arch_flush_lazy_mmu_mode();
121 pagefault_enable();
124 /* This is the same as kmap_atomic() but can map memory that doesn't
125 * have a struct page associated with it.
127 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
129 enum fixed_addresses idx;
130 unsigned long vaddr;
132 pagefault_disable();
134 idx = type + KM_TYPE_NR*smp_processor_id();
135 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
136 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
137 arch_flush_lazy_mmu_mode();
139 return (void*) vaddr;
141 EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
143 struct page *kmap_atomic_to_page(void *ptr)
145 unsigned long idx, vaddr = (unsigned long)ptr;
146 pte_t *pte;
148 if (vaddr < FIXADDR_START)
149 return virt_to_page(ptr);
151 idx = virt_to_fix(vaddr);
152 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
153 return pte_page(*pte);
156 EXPORT_SYMBOL(kmap);
157 EXPORT_SYMBOL(kunmap);
158 EXPORT_SYMBOL(kmap_atomic);
159 EXPORT_SYMBOL(kunmap_atomic);
161 #ifdef CONFIG_NUMA
162 void __init set_highmem_pages_init(void)
164 struct zone *zone;
165 int nid;
167 for_each_zone(zone) {
168 unsigned long zone_start_pfn, zone_end_pfn;
170 if (!is_highmem(zone))
171 continue;
173 zone_start_pfn = zone->zone_start_pfn;
174 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
176 nid = zone_to_nid(zone);
177 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
178 zone->name, nid, zone_start_pfn, zone_end_pfn);
180 add_highpages_with_active_regions(nid, zone_start_pfn,
181 zone_end_pfn);
183 totalram_pages += totalhigh_pages;
185 #else
186 void __init set_highmem_pages_init(void)
188 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
190 totalram_pages += totalhigh_pages;
192 #endif /* CONFIG_NUMA */