2 * highmem.h: virtual kernel memory mappings for high memory
4 * Used in CONFIG_HIGHMEM systems for memory pages which
5 * are not addressable by direct kernel virtual adresses.
7 * Copyright (C) 1999 Gerhard Wichert, Siemens AG
8 * Gerhard.Wichert@pdb.siemens.de
11 * Redesigned the x86 32-bit VM architecture to deal with
12 * up to 16 Terrabyte physical memory. With current x86 CPUs
13 * we now support up to 64 Gigabytes physical RAM.
15 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
18 #ifndef _ASM_HIGHMEM_H
19 #define _ASM_HIGHMEM_H
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <asm/vaddrs.h>
26 #include <asm/kmap_types.h>
27 #include <asm/pgtable.h>
29 /* undef for production */
30 #define HIGHMEM_DEBUG 1
32 /* declarations for highmem.c */
33 extern unsigned long highstart_pfn
, highend_pfn
;
35 extern pte_t
*kmap_pte
;
36 extern pgprot_t kmap_prot
;
37 extern pte_t
*pkmap_page_table
;
39 extern void kmap_init(void) __init
;
42 * Right now we initialize only a single pte table. It can be extended
43 * easily, subsequent pte tables have to be allocated in one physical
46 #define LAST_PKMAP 1024
48 #define LAST_PKMAP_MASK (LAST_PKMAP-1)
49 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
50 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
52 extern void *kmap_high(struct page
*page
);
53 extern void kunmap_high(struct page
*page
);
55 static inline void *kmap(struct page
*page
)
59 if (page
< highmem_start_page
)
60 return page_address(page
);
61 return kmap_high(page
);
64 static inline void kunmap(struct page
*page
)
68 if (page
< highmem_start_page
)
74 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
75 * gives a more generic (and caching) interface. But kmap_atomic can
76 * be used in IRQ contexts, so in some (very limited) cases we need
79 static inline void *kmap_atomic(struct page
*page
, enum km_type type
)
84 if (page
< highmem_start_page
)
85 return page_address(page
);
87 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
88 vaddr
= FIX_KMAP_BEGIN
+ idx
* PAGE_SIZE
;
92 __flush_cache_one(vaddr
);
98 if (!pte_none(*(kmap_pte
+idx
)))
101 set_pte(kmap_pte
+idx
, mk_pte(page
, kmap_prot
));
102 /* XXX Fix - Anton */
104 __flush_tlb_one(vaddr
);
109 return (void*) vaddr
;
112 static inline void kunmap_atomic(void *kvaddr
, enum km_type type
)
115 unsigned long vaddr
= (unsigned long) kvaddr
;
116 unsigned long idx
= type
+ KM_TYPE_NR
*smp_processor_id();
119 if (vaddr
< FIXADDR_START
) // FIXME
123 if (vaddr
!= FIX_KMAP_BEGIN
+ idx
* PAGE_SIZE
)
126 /* XXX Fix - Anton */
128 __flush_cache_one(vaddr
);
134 * force other mappings to Oops if they'll try to access
135 * this pte without first remap it
137 pte_clear(kmap_pte
+idx
);
138 /* XXX Fix - Anton */
140 __flush_tlb_one(vaddr
);
147 #endif /* __KERNEL__ */
149 #endif /* _ASM_HIGHMEM_H */