License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-2.6/btrfs-unstable.git] / arch / metag / mm / highmem.c
blob83527fc7c8a745da38db0187e8aa8157126d272f
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/export.h>
3 #include <linux/highmem.h>
4 #include <linux/sched.h>
5 #include <linux/smp.h>
6 #include <linux/interrupt.h>
7 #include <asm/fixmap.h>
8 #include <asm/tlbflush.h>
10 static pte_t *kmap_pte;
12 unsigned long highstart_pfn, highend_pfn;
14 void *kmap(struct page *page)
16 might_sleep();
17 if (!PageHighMem(page))
18 return page_address(page);
19 return kmap_high(page);
21 EXPORT_SYMBOL(kmap);
23 void kunmap(struct page *page)
25 BUG_ON(in_interrupt());
26 if (!PageHighMem(page))
27 return;
28 kunmap_high(page);
30 EXPORT_SYMBOL(kunmap);
33 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
34 * no global lock is needed and because the kmap code must perform a global TLB
35 * invalidation when the kmap pool wraps.
37 * However when holding an atomic kmap is is not legal to sleep, so atomic
38 * kmaps are appropriate for short, tight code paths only.
41 void *kmap_atomic(struct page *page)
43 enum fixed_addresses idx;
44 unsigned long vaddr;
45 int type;
47 preempt_disable();
48 pagefault_disable();
49 if (!PageHighMem(page))
50 return page_address(page);
52 type = kmap_atomic_idx_push();
53 idx = type + KM_TYPE_NR * smp_processor_id();
54 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
55 #ifdef CONFIG_DEBUG_HIGHMEM
56 BUG_ON(!pte_none(*(kmap_pte - idx)));
57 #endif
58 set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL));
60 return (void *)vaddr;
62 EXPORT_SYMBOL(kmap_atomic);
64 void __kunmap_atomic(void *kvaddr)
66 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
67 int idx, type;
69 if (kvaddr >= (void *)FIXADDR_START) {
70 type = kmap_atomic_idx();
71 idx = type + KM_TYPE_NR * smp_processor_id();
74 * Force other mappings to Oops if they'll try to access this
75 * pte without first remap it. Keeping stale mappings around
76 * is a bad idea also, in case the page changes cacheability
77 * attributes or becomes a protected page in a hypervisor.
79 pte_clear(&init_mm, vaddr, kmap_pte-idx);
80 flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
82 kmap_atomic_idx_pop();
85 pagefault_enable();
86 preempt_enable();
88 EXPORT_SYMBOL(__kunmap_atomic);
91 * This is the same as kmap_atomic() but can map memory that doesn't
92 * have a struct page associated with it.
94 void *kmap_atomic_pfn(unsigned long pfn)
96 enum fixed_addresses idx;
97 unsigned long vaddr;
98 int type;
100 preempt_disable();
101 pagefault_disable();
103 type = kmap_atomic_idx_push();
104 idx = type + KM_TYPE_NR * smp_processor_id();
105 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
106 #ifdef CONFIG_DEBUG_HIGHMEM
107 BUG_ON(!pte_none(*(kmap_pte - idx)));
108 #endif
109 set_pte(kmap_pte - idx, pfn_pte(pfn, PAGE_KERNEL));
110 flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
112 return (void *)vaddr;
115 void __init kmap_init(void)
117 unsigned long kmap_vstart;
119 /* cache the first kmap pte */
120 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
121 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);