nilfs2: insert explanations in gcinode file
[linux-2.6/mini2440.git] / include / asm-mn10300 / highmem.h
blob90f2abb04bfdc5178ab50645852fe04ab812d3da
1 /* MN10300 Virtual kernel memory mappings for high memory
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from include/asm-i386/highmem.h
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
12 #ifndef _ASM_HIGHMEM_H
13 #define _ASM_HIGHMEM_H
15 #ifdef __KERNEL__
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/highmem.h>
20 #include <asm/kmap_types.h>
21 #include <asm/pgtable.h>
23 /* undef for production */
24 #undef HIGHMEM_DEBUG
26 /* declarations for highmem.c */
27 extern unsigned long highstart_pfn, highend_pfn;
29 extern pte_t *kmap_pte;
30 extern pgprot_t kmap_prot;
31 extern pte_t *pkmap_page_table;
33 extern void __init kmap_init(void);
36 * Right now we initialize only a single pte table. It can be extended
37 * easily, subsequent pte tables have to be allocated in one physical
38 * chunk of RAM.
40 #define PKMAP_BASE 0xfe000000UL
41 #define LAST_PKMAP 1024
42 #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
43 #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
44 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
46 extern unsigned long kmap_high(struct page *page);
47 extern void kunmap_high(struct page *page);
49 static inline unsigned long kmap(struct page *page)
51 if (in_interrupt())
52 BUG();
53 if (page < highmem_start_page)
54 return page_address(page);
55 return kmap_high(page);
58 static inline void kunmap(struct page *page)
60 if (in_interrupt())
61 BUG();
62 if (page < highmem_start_page)
63 return;
64 kunmap_high(page);
68 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
69 * gives a more generic (and caching) interface. But kmap_atomic can
70 * be used in IRQ contexts, so in some (very limited) cases we need
71 * it.
73 static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
75 enum fixed_addresses idx;
76 unsigned long vaddr;
78 if (page < highmem_start_page)
79 return page_address(page);
81 debug_kmap_atomic(type);
82 idx = type + KM_TYPE_NR * smp_processor_id();
83 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
84 #if HIGHMEM_DEBUG
85 if (!pte_none(*(kmap_pte - idx)))
86 BUG();
87 #endif
88 set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
89 __flush_tlb_one(vaddr);
91 return vaddr;
94 static inline void kunmap_atomic(unsigned long vaddr, enum km_type type)
96 #if HIGHMEM_DEBUG
97 enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id();
99 if (vaddr < FIXADDR_START) /* FIXME */
100 return;
102 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
103 BUG();
106 * force other mappings to Oops if they'll try to access
107 * this pte without first remap it
109 pte_clear(kmap_pte - idx);
110 __flush_tlb_one(vaddr);
111 #endif
114 #endif /* __KERNEL__ */
116 #endif /* _ASM_HIGHMEM_H */