Introduce page colors. So far, only sparc64 uses correct page color bits. Other archi...
[helenos.git] / kernel / arch / ia32xen / include / mm / page.h
blob1b171145bfe7ba95f04849e83d44f8b15da8ad64
1 /*
2 * Copyright (C) 2006 Martin Decky
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup ia32xen_mm
30 * @{
32 /** @file
35 #ifndef KERN_ia32xen_PAGE_H_
36 #define KERN_ia32xen_PAGE_H_
38 #include <arch/mm/frame.h>
40 #define PAGE_WIDTH FRAME_WIDTH
41 #define PAGE_SIZE FRAME_SIZE
43 #define PAGE_COLOR_BITS 0 /* dummy */
45 #ifdef KERNEL
47 #ifndef __ASM__
48 # define KA2PA(x) (((uintptr_t) (x)) - 0x80000000)
49 # define PA2KA(x) (((uintptr_t) (x)) + 0x80000000)
50 #else
51 # define KA2PA(x) ((x) - 0x80000000)
52 # define PA2KA(x) ((x) + 0x80000000)
53 #endif
56 * Implementation of generic 4-level page table interface.
57 * IA-32 has 2-level page tables, so PTL1 and PTL2 are left out.
59 #define PTL0_ENTRIES_ARCH 1024
60 #define PTL1_ENTRIES_ARCH 0
61 #define PTL2_ENTRIES_ARCH 0
62 #define PTL3_ENTRIES_ARCH 1024
64 #define PTL0_INDEX_ARCH(vaddr) (((vaddr) >> 22) & 0x3ff)
65 #define PTL1_INDEX_ARCH(vaddr) 0
66 #define PTL2_INDEX_ARCH(vaddr) 0
67 #define PTL3_INDEX_ARCH(vaddr) (((vaddr) >> 12) & 0x3ff)
69 #define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *) MA2PA((((pte_t *) (ptl0))[(i)].frame_address) << 12))
70 #define GET_PTL2_ADDRESS_ARCH(ptl1, i) (ptl1)
71 #define GET_PTL3_ADDRESS_ARCH(ptl2, i) (ptl2)
72 #define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((uintptr_t) MA2PA((((pte_t *) (ptl3))[(i)].frame_address) << 12))
74 #define SET_PTL0_ADDRESS_ARCH(ptl0) { \
75 mmuext_op_t mmu_ext; \
77 mmu_ext.cmd = MMUEXT_NEW_BASEPTR; \
78 mmu_ext.mfn = ADDR2PFN(PA2MA(ptl0)); \
79 xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF); \
82 #define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) { \
83 mmu_update_t update; \
85 update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl0))[(i)])); \
86 update.val = PA2MA(a) | 0x0003; \
87 xen_mmu_update(&update, 1, NULL, DOMID_SELF); \
89 #define SET_PTL2_ADDRESS_ARCH(ptl1, i, a)
90 #define SET_PTL3_ADDRESS_ARCH(ptl2, i, a)
91 #define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) (((pte_t *) (ptl3))[(i)].frame_address = PA2MA(a) >> 12)
93 #define GET_PTL1_FLAGS_ARCH(ptl0, i) get_pt_flags((pte_t *) (ptl0), (index_t)(i))
94 #define GET_PTL2_FLAGS_ARCH(ptl1, i) PAGE_PRESENT
95 #define GET_PTL3_FLAGS_ARCH(ptl2, i) PAGE_PRESENT
96 #define GET_FRAME_FLAGS_ARCH(ptl3, i) get_pt_flags((pte_t *) (ptl3), (index_t)(i))
98 #define SET_PTL1_FLAGS_ARCH(ptl0, i, x) set_pt_flags((pte_t *) (ptl0), (index_t)(i), (x))
99 #define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
100 #define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
101 #define SET_FRAME_FLAGS_ARCH(ptl3, i, x) set_pt_flags((pte_t *) (ptl3), (index_t)(i), (x))
103 #define PTE_VALID_ARCH(p) (*((uint32_t *) (p)) != 0)
104 #define PTE_PRESENT_ARCH(p) ((p)->present != 0)
105 #define PTE_GET_FRAME_ARCH(p) ((p)->frame_address << FRAME_WIDTH)
106 #define PTE_WRITABLE_ARCH(p) ((p)->writeable != 0)
107 #define PTE_EXECUTABLE_ARCH(p) 1
109 #ifndef __ASM__
111 #include <mm/page.h>
112 #include <arch/types.h>
113 #include <arch/mm/frame.h>
114 #include <typedefs.h>
115 #include <arch/hypercall.h>
117 /* Page fault error codes. */
119 /** When bit on this position is 0, the page fault was caused by a not-present page. */
120 #define PFERR_CODE_P (1 << 0)
122 /** When bit on this position is 1, the page fault was caused by a write. */
123 #define PFERR_CODE_RW (1 << 1)
125 /** When bit on this position is 1, the page fault was caused in user mode. */
126 #define PFERR_CODE_US (1 << 2)
128 /** When bit on this position is 1, a reserved bit was set in page directory. */
129 #define PFERR_CODE_RSVD (1 << 3)
131 /** Page Table Entry. */
132 struct page_specifier {
133 unsigned present : 1;
134 unsigned writeable : 1;
135 unsigned uaccessible : 1;
136 unsigned page_write_through : 1;
137 unsigned page_cache_disable : 1;
138 unsigned accessed : 1;
139 unsigned dirty : 1;
140 unsigned pat : 1;
141 unsigned global : 1;
142 unsigned soft_valid : 1; /**< Valid content even if the present bit is not set. */
143 unsigned avl : 2;
144 unsigned frame_address : 20;
145 } __attribute__ ((packed));
147 typedef struct {
148 uint64_t ptr; /**< Machine address of PTE */
149 union { /**< New contents of PTE */
150 uint64_t val;
151 pte_t pte;
153 } mmu_update_t;
155 typedef struct {
156 unsigned int cmd;
157 union {
158 unsigned long mfn;
159 unsigned long linear_addr;
161 union {
162 unsigned int nr_ents;
163 void *vcpumask;
165 } mmuext_op_t;
167 static inline int xen_update_va_mapping(const void *va, const pte_t pte, const unsigned int flags)
169 return hypercall4(XEN_UPDATE_VA_MAPPING, va, pte, 0, flags);
172 static inline int xen_mmu_update(const mmu_update_t *req, const unsigned int count, unsigned int *success_count, domid_t domid)
174 return hypercall4(XEN_MMU_UPDATE, req, count, success_count, domid);
177 static inline int xen_mmuext_op(const mmuext_op_t *op, const unsigned int count, unsigned int *success_count, domid_t domid)
179 return hypercall4(XEN_MMUEXT_OP, op, count, success_count, domid);
182 static inline int get_pt_flags(pte_t *pt, index_t i)
184 pte_t *p = &pt[i];
186 return (
187 (!p->page_cache_disable)<<PAGE_CACHEABLE_SHIFT |
188 (!p->present)<<PAGE_PRESENT_SHIFT |
189 p->uaccessible<<PAGE_USER_SHIFT |
190 1<<PAGE_READ_SHIFT |
191 p->writeable<<PAGE_WRITE_SHIFT |
192 1<<PAGE_EXEC_SHIFT |
193 p->global<<PAGE_GLOBAL_SHIFT
197 static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
199 pte_t *p = &pt[i];
201 p->page_cache_disable = !(flags & PAGE_CACHEABLE);
202 p->present = !(flags & PAGE_NOT_PRESENT);
203 p->uaccessible = (flags & PAGE_USER) != 0;
204 p->writeable = (flags & PAGE_WRITE) != 0;
205 p->global = (flags & PAGE_GLOBAL) != 0;
208 * Ensure that there is at least one bit set even if the present bit is cleared.
210 p->soft_valid = true;
213 extern void page_arch_init(void);
214 extern void page_fault(int n, istate_t *istate);
216 #endif /* __ASM__ */
218 #endif /* KERNEL */
220 #endif
222 /** @}