- Kai Germaschewski: ymfpci cleanups and resource leak fixes
[davej-history.git] / include / asm-sparc / highmem.h
blob179f1a3cbd95c891116fb4b2a9bc00f856b2c6a4
1 /*
2 * highmem.h: virtual kernel memory mappings for high memory
4 * Used in CONFIG_HIGHMEM systems for memory pages which
5 * are not addressable by direct kernel virtual adresses.
7 * Copyright (C) 1999 Gerhard Wichert, Siemens AG
8 * Gerhard.Wichert@pdb.siemens.de
11 * Redesigned the x86 32-bit VM architecture to deal with
12 * up to 16 Terrabyte physical memory. With current x86 CPUs
13 * we now support up to 64 Gigabytes physical RAM.
15 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
18 #ifndef _ASM_HIGHMEM_H
19 #define _ASM_HIGHMEM_H
21 #ifdef __KERNEL__
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <asm/vaddrs.h>
26 #include <asm/kmap_types.h>
27 #include <asm/pgtable.h>
29 /* undef for production */
30 #define HIGHMEM_DEBUG 1
32 /* declarations for highmem.c */
33 extern unsigned long highstart_pfn, highend_pfn;
35 extern pte_t *kmap_pte;
36 extern pgprot_t kmap_prot;
37 extern pte_t *pkmap_page_table;
39 extern void kmap_init(void) __init;
42 * Right now we initialize only a single pte table. It can be extended
43 * easily, subsequent pte tables have to be allocated in one physical
44 * chunk of RAM.
46 #define LAST_PKMAP 1024
48 #define LAST_PKMAP_MASK (LAST_PKMAP-1)
49 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
50 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
52 extern void *kmap_high(struct page *page);
53 extern void kunmap_high(struct page *page);
55 static inline void *kmap(struct page *page)
57 if (in_interrupt())
58 BUG();
59 if (page < highmem_start_page)
60 return page_address(page);
61 return kmap_high(page);
64 static inline void kunmap(struct page *page)
66 if (in_interrupt())
67 BUG();
68 if (page < highmem_start_page)
69 return;
70 kunmap_high(page);
74 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
75 * gives a more generic (and caching) interface. But kmap_atomic can
76 * be used in IRQ contexts, so in some (very limited) cases we need
77 * it.
79 static inline void *kmap_atomic(struct page *page, enum km_type type)
81 unsigned long idx;
82 unsigned long vaddr;
84 if (page < highmem_start_page)
85 return page_address(page);
87 idx = type + KM_TYPE_NR*smp_processor_id();
88 vaddr = FIX_KMAP_BEGIN + idx * PAGE_SIZE;
90 /* XXX Fix - Anton */
91 #if 0
92 __flush_cache_one(vaddr);
93 #else
94 flush_cache_all();
95 #endif
97 #if HIGHMEM_DEBUG
98 if (!pte_none(*(kmap_pte+idx)))
99 BUG();
100 #endif
101 set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
102 /* XXX Fix - Anton */
103 #if 0
104 __flush_tlb_one(vaddr);
105 #else
106 flush_tlb_all();
107 #endif
109 return (void*) vaddr;
112 static inline void kunmap_atomic(void *kvaddr, enum km_type type)
114 #if HIGHMEM_DEBUG
115 unsigned long vaddr = (unsigned long) kvaddr;
116 unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
118 #if 0
119 if (vaddr < FIXADDR_START) // FIXME
120 return;
121 #endif
123 if (vaddr != FIX_KMAP_BEGIN + idx * PAGE_SIZE)
124 BUG();
126 /* XXX Fix - Anton */
127 #if 0
128 __flush_cache_one(vaddr);
129 #else
130 flush_cache_all();
131 #endif
134 * force other mappings to Oops if they'll try to access
135 * this pte without first remap it
137 pte_clear(kmap_pte+idx);
138 /* XXX Fix - Anton */
139 #if 0
140 __flush_tlb_one(vaddr);
141 #else
142 flush_tlb_all();
143 #endif
144 #endif
147 #endif /* __KERNEL__ */
149 #endif /* _ASM_HIGHMEM_H */