2 * highmem.h: virtual kernel memory mappings for high memory
4 * PowerPC version, stolen from the i386 version.
6 * Used in CONFIG_HIGHMEM systems for memory pages which
7 * are not addressable by direct kernel virtual addresses.
9 * Copyright (C) 1999 Gerhard Wichert, Siemens AG
10 * Gerhard.Wichert@pdb.siemens.de
13 * Redesigned the x86 32-bit VM architecture to deal with
14 * up to 16 Terrabyte physical memory. With current x86 CPUs
15 * we now support up to 64 Gigabytes physical RAM.
17 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
20 #ifndef _ASM_HIGHMEM_H
21 #define _ASM_HIGHMEM_H
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <asm/kmap_types.h>
28 #include <asm/tlbflush.h>
31 /* undef for production */
32 #define HIGHMEM_DEBUG 1
34 extern pte_t
*kmap_pte
;
35 extern pgprot_t kmap_prot
;
36 extern pte_t
*pkmap_page_table
;
39 * Right now we initialize only a single pte table. It can be extended
40 * easily, subsequent pte tables have to be allocated in one physical
43 #define PKMAP_BASE CONFIG_HIGHMEM_START
44 #define LAST_PKMAP (1 << PTE_SHIFT)
45 #define LAST_PKMAP_MASK (LAST_PKMAP-1)
46 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
47 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
49 #define KMAP_FIX_BEGIN (PKMAP_BASE + 0x00400000UL)
51 extern void *kmap_high(struct page
*page
);
52 extern void kunmap_high(struct page
*page
);
54 static inline void *kmap(struct page
*page
)
57 if (!PageHighMem(page
))
58 return page_address(page
);
59 return kmap_high(page
);
62 static inline void kunmap(struct page
*page
)
64 BUG_ON(in_interrupt());
65 if (!PageHighMem(page
))
71 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
72 * gives a more generic (and caching) interface. But kmap_atomic can
73 * be used in IRQ contexts, so in some (very limited) cases we need
76 static inline void *kmap_atomic(struct page
*page
, enum km_type type
)
81 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
83 if (!PageHighMem(page
))
84 return page_address(page
);
86 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
87 vaddr
= KMAP_FIX_BEGIN
+ idx
* PAGE_SIZE
;
89 BUG_ON(!pte_none(*(kmap_pte
+idx
)));
91 set_pte_at(&init_mm
, vaddr
, kmap_pte
+idx
, mk_pte(page
, kmap_prot
));
92 flush_tlb_page(NULL
, vaddr
);
97 static inline void kunmap_atomic(void *kvaddr
, enum km_type type
)
100 unsigned long vaddr
= (unsigned long) kvaddr
& PAGE_MASK
;
101 unsigned int idx
= type
+ KM_TYPE_NR
*smp_processor_id();
103 if (vaddr
< KMAP_FIX_BEGIN
) { // FIXME
108 BUG_ON(vaddr
!= KMAP_FIX_BEGIN
+ idx
* PAGE_SIZE
);
111 * force other mappings to Oops if they'll try to access
112 * this pte without first remap it
114 pte_clear(&init_mm
, vaddr
, kmap_pte
+idx
);
115 flush_tlb_page(NULL
, vaddr
);
120 static inline struct page
*kmap_atomic_to_page(void *ptr
)
122 unsigned long idx
, vaddr
= (unsigned long) ptr
;
124 if (vaddr
< KMAP_FIX_BEGIN
)
125 return virt_to_page(ptr
);
127 idx
= (vaddr
- KMAP_FIX_BEGIN
) >> PAGE_SHIFT
;
128 return pte_page(kmap_pte
[idx
]);
131 #define flush_cache_kmaps() flush_cache_all()
133 #endif /* __KERNEL__ */
135 #endif /* _ASM_HIGHMEM_H */