2 * highmem.h: virtual kernel memory mappings for high memory
4 * Used in CONFIG_HIGHMEM systems for memory pages which
5 * are not addressable by direct kernel virtual adresses.
7 * Copyright (C) 1999 Gerhard Wichert, Siemens AG
8 * Gerhard.Wichert@pdb.siemens.de
11 * Redesigned the x86 32-bit VM architecture to deal with
12 * up to 16 Terrabyte physical memory. With current x86 CPUs
13 * we now support up to 64 Gigabytes physical RAM.
15 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
18 #ifndef _ASM_HIGHMEM_H
19 #define _ASM_HIGHMEM_H
23 #include <linux/config.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <asm/kmap_types.h>
27 #include <asm/pgtable.h>
29 /* undef for production */
30 #define HIGHMEM_DEBUG 1
32 /* declarations for highmem.c */
33 extern unsigned long highstart_pfn
, highend_pfn
;
35 extern pte_t
*kmap_pte
;
36 extern pgprot_t kmap_prot
;
37 extern pte_t
*pkmap_page_table
;
39 extern void kmap_init(void) __init
;
42 * Right now we initialize only a single pte table. It can be extended
43 * easily, subsequent pte tables have to be allocated in one physical
46 #define PKMAP_BASE (0xfe000000UL)
48 #define LAST_PKMAP 512
50 #define LAST_PKMAP 1024
52 #define LAST_PKMAP_MASK (LAST_PKMAP-1)
53 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
54 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
56 extern void * FASTCALL(kmap_high(struct page
*page
));
57 extern void FASTCALL(kunmap_high(struct page
*page
));
59 static inline void *kmap(struct page
*page
)
63 if (page
< highmem_start_page
)
64 return page_address(page
);
65 return kmap_high(page
);
68 static inline void kunmap(struct page
*page
)
72 if (page
< highmem_start_page
)
78 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
79 * gives a more generic (and caching) interface. But kmap_atomic can
80 * be used in IRQ contexts, so in some (very limited) cases we need
83 static inline void *kmap_atomic(struct page
*page
, enum km_type type
)
85 enum fixed_addresses idx
;
88 if (page
< highmem_start_page
)
89 return page_address(page
);
91 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
92 vaddr
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
94 if (!pte_none(*(kmap_pte
-idx
)))
97 set_pte(kmap_pte
-idx
, mk_pte(page
, kmap_prot
));
98 __flush_tlb_one(vaddr
);
100 return (void*) vaddr
;
103 static inline void kunmap_atomic(void *kvaddr
, enum km_type type
)
106 unsigned long vaddr
= (unsigned long) kvaddr
;
107 enum fixed_addresses idx
= type
+ KM_TYPE_NR
*smp_processor_id();
109 if (vaddr
< FIXADDR_START
) // FIXME
112 if (vaddr
!= __fix_to_virt(FIX_KMAP_BEGIN
+idx
))
116 * force other mappings to Oops if they'll try to access
117 * this pte without first remap it
119 pte_clear(kmap_pte
-idx
);
120 __flush_tlb_one(vaddr
);
124 #endif /* __KERNEL__ */
126 #endif /* _ASM_HIGHMEM_H */