amd64 port: mainly on the pmap headers, identify_cpu and initcpu
[dragonfly/port-amd64.git] / sys / cpu / amd64 / include / pmap.h
blob4ba6d33419f99754308177dbea0662c2ab60a5fa
1 /*
2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
26 * $DragonFly: src/sys/cpu/amd64/include/pmap.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
28 #ifndef _CPU_PMAP_H_
29 #define _CPU_PMAP_H_
32 * A four level page table is implemented by the amd64 hardware. Each
33 * page table represents 9 address bits and eats 4KB of space. There are
34 * 512 8-byte entries in each table. The last page table contains PTE's
35 * representing 4K pages (12 bits of address space).
37 * The page tables are named:
38 * PML4 Represents 512GB per entry (256TB total) LEVEL4
39 * PDP Represents 1GB per entry LEVEL3
40 * PDE Represents 2MB per entry LEVEL2
41 * PTE Represents 4KB per entry LEVEL1
43 * PG_PAE PAE 2MB extension. In the PDE. If 0 there is another level
44 * of page table and PG_D and PG_G are ignored. If 1 this is
45 * the terminating page table and PG_D and PG_G apply.
47 * PG_PWT Page write through. If 1 caching is disabled for data
48 * represented by the page.
49 * PG_PCD Page Cache Disable. If 1 the page table entry will not
50 * be cached in the data cache.
52 * Each entry in the PML4 table represents a 512GB VA space. We use a fixed
53 * PML4 and adjust entries within it to switch user spaces.
56 #define PG_V 0x0001LL /* P Present */
57 #define PG_RW 0x0002LL /* R/W Writable */
58 #define PG_U 0x0004LL /* U/S User */
59 #define PG_PWT 0x0008LL /* PWT Page Write Through */
60 #define PG_PCD 0x0010LL /* PCD Page Cache Disable */
61 #define PG_A 0x0020LL /* A Accessed */
62 #define PG_D 0x0040LL /* D Dirty (pte only) */
63 #define PG_PAT 0x0080LL /* PAT (pte only) */
64 #define PG_G 0x0100LL /* G Global (pte only) */
65 #define PG_AVL0 0x0200LL /* available to os */
66 #define PG_AVL1 0x0400LL /* available to os */
67 #define PG_AVL2 0x0800LL /* available to os */
69 #define PG_PTE_PAT PG_PAT /* PAT bit for 4K pages */
70 #define PG_PDE_PAT 0x1000LL /* PAT bit for 2M pages */
71 #define PG_PDPE_PAT 0x1000LL /* PAT bit for 1G pages */
73 #define PG_FRAME 0x000000FFFFFF0000LL /* 40 bit phys address */
74 #define PG_PHYSRESERVED 0x000FFF0000000000LL /* reserved for future PA */
75 #define PG_AVLN 0x0010000000000000LL /* avilable to os, bit 52-62 */
76 #define PG_NX (1ul<<63) /* No-execute, bit 63*/
79 * OS assignments
81 #define PG_W PG_AVL0 /* Wired */
82 #define PG_MANAGED PG_AVL1 /* Managed */
83 #define PG_PROT (PG_RW|PG_U) /* all protection bits . */
84 #define PG_N (PG_PWT|PG_PCD) /* Non-cacheable */
87 * Page Protection Exception bits
90 #define PGEX_P 0x01 /* Protection violation vs. not present */
91 #define PGEX_W 0x02 /* during a Write cycle */
92 #define PGEX_U 0x04 /* access from User mode (UPL) */
94 #define PGEX_MAILBOX 0x40
95 #define PGEX_FPFAULT 0x80
98 * Pte related macros. This is complicated by having to deal with
99 * the sign extension of the 48th bit.
101 #define KVADDR(l4, l3, l2, l1) ( \
102 ((unsigned long)-1 << 47) | \
103 ((unsigned long)(l4) << PML4SHIFT) | \
104 ((unsigned long)(l3) << PDPSHIFT) | \
105 ((unsigned long)(l2) << PDRSHIFT) | \
106 ((unsigned long)(l1) << PAGE_SHIFT))
108 #define UVADDR(l4, l3, l2, l1) ( \
109 ((unsigned long)(l4) << PML4SHIFT) | \
110 ((unsigned long)(l3) << PDPSHIFT) | \
111 ((unsigned long)(l2) << PDRSHIFT) | \
112 ((unsigned long)(l1) << PAGE_SHIFT))
115 * The six fields in the long mode virtual address
117 * |63 48|47 39|38 30|29 21|20 12|11 0|
118 * |--------|----------|---------|--------|--------|----------|
119 * | Ext | PML4 | PDP | PD | PT | offset |
120 * |--------|----------|---------|--------|--------|----------|
122 * There are totally 512 entries in each table of PML4, PDP, PD, or PT field.
123 * Each entry takes 64-bit (8 bytes) and a 4-Kb page is able to hold exactly
124 * the 512 entries. Each PML4 entry covers 512GB (2**39) vm space; each PDP
125 * entry covers 1GB (2**30) vm space; each PD entry cover 2MB (2**21) vm space
126 * and each PT entry covers 4KB(2**12) vm space, the page size.
128 * We allocate the 512 PML4 entries in this way:
129 * User space: one PML4 entry (512GB);
130 * Kernel space: one PML4 entry;
131 * Each cpu in the system: one PML4 entry is reserved for private use;
132 * On PML4 entry is used to directly map all the physical space;
135 #define NKPML4E 1 /* number of PML4 entries for kernel map */
136 #define NKPDPE 1 /* number of PDP entries for kernel map */
137 #define NKPDE (NKPDPE*NPDEPG) /* number of PD entries for kernel map */
139 #define NUPML4E 1 /* number of PML4 entries for user map */
140 #define NUPDPE (NUPML4E*NPDPEPG)
141 #define NUPDE (NUPDPE*NPDEPG)
143 #define NDMPML4E 1 /* number of PML4 entries for direct map */
146 * The *PTDI values control the layout of virtual memory
148 #define KPML4I (NPML4EPG-1) /* pml4 entry for kernel map */
149 #define KPDPI (NPDPEPG-1) /* pdp entry for kernel map */
151 #define UPML4I (NPML4EPG-2) /* pml4 entry for user map */
153 #define DMPML4I (NPML4EPG-3) /* pml4 entry for direct map */
155 /* The index for per-cpu PML4 entries, starting from 256 till the end (511).
156 * So we totally have 256 PML4 entries, meaning support up to 256 CPUs */
157 #define MPPML4I (NPML4EPG/2) /* pml4 entries per-cpu */
160 * XXX doesn't really belong here I guess...
162 #define ISA_HOLE_START 0xa0000
163 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
165 #ifndef LOCORE
167 #include <sys/queue.h>
170 * Address of current and alternate address space page table maps
171 * and directories.
173 #ifdef _KERNEL
174 extern pt_entry_t PTmap[], APTmap[], Upte;
175 extern pd_entry_t PTD[], APTD[], PTDpde, APTDpde, Upde;
177 extern pd_entry_t IdlePTD; /* physical address of "Idle" state directory */
178 #endif
180 #ifdef _KERNEL
182 * virtual address to page table entry and
183 * to physical address. Likewise for alternate address space.
184 * Note: these work recursively, thus vtopte of a pte will give
185 * the corresponding pde that in turn maps it.
187 #define vtopte(va) (PTmap + i386_btop(va))
189 #define avtopte(va) (APTmap + i386_btop(va))
192 * Routine: pmap_kextract
193 * Function:
194 * Extract the physical page address associated
195 * kernel virtual address.
197 static __inline vm_paddr_t
198 pmap_kextract(vm_offset_t va)
200 vm_paddr_t pa;
202 if ((pa = (vm_offset_t) PTD[va >> PDRSHIFT]) & PG_PAT) {
203 pa = (pa & ~(NBPDR - 1)) | (va & (NBPDR - 1));
204 } else {
205 pa = *(vm_offset_t *)vtopte(va);
206 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
208 return pa;
212 * XXX
214 #define vtophys(va) pmap_kextract(((vm_offset_t)(va)))
215 #define vtophys_pte(va) ((pt_entry_t)pmap_kextract(((vm_offset_t)(va))))
217 #define avtophys(va) (((vm_offset_t) (*avtopte(va))&PG_FRAME) | ((vm_offset_t)(va) & PAGE_MASK))
219 #endif
222 * Pmap stuff
224 struct pv_entry;
226 struct md_page {
227 int pv_list_count;
228 TAILQ_HEAD(,pv_entry) pv_list;
232 * Each machine dependent implementation is expected to
233 * keep certain statistics. They may do this anyway they
234 * so choose, but are expected to return the statistics
235 * in the following structure.
237 struct pmap_statistics {
238 long resident_count; /* # of pages mapped (total) */
239 long wired_count; /* # of pages wired */
241 typedef struct pmap_statistics *pmap_statistics_t;
243 struct vm_object;
244 struct vm_page;
246 struct pmap {
247 pd_entry_t *pm_pdir; /* KVA of page directory */
248 struct vm_object *pm_pteobj; /* Container for pte's */
249 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
250 int pm_count; /* reference count */
251 cpumask_t pm_active; /* active on cpus */
252 struct pmap_statistics pm_stats; /* pmap statistics */
253 struct vm_page *pm_ptphint; /* pmap ptp hint */
256 #define pmap_resident_count(pmap) (pmap)->pm_stats.resident_count
258 typedef struct pmap *pmap_t;
260 #ifdef _KERNEL
261 extern struct pmap kernel_pmap;
262 #endif
265 * For each vm_page_t, there is a list of all currently valid virtual
266 * mappings of that page. An entry is a pv_entry_t, the list is pv_list
268 typedef struct pv_entry {
269 pmap_t pv_pmap; /* pmap where mapping lies */
270 vm_offset_t pv_va; /* virtual address for mapping */
271 TAILQ_ENTRY(pv_entry) pv_list;
272 TAILQ_ENTRY(pv_entry) pv_plist;
273 struct vm_page *pv_ptem; /* VM page for pte */
274 } *pv_entry_t;
276 #ifdef _KERNEL
278 #define NPPROVMTRR 8
279 #define PPRO_VMTRRphysBase0 0x200
280 #define PPRO_VMTRRphysMask0 0x201
281 struct ppro_vmtrr {
282 u_int64_t base, mask;
284 extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
286 extern caddr_t CADDR1;
287 extern pt_entry_t *CMAP1;
288 extern vm_paddr_t avail_end;
289 extern vm_paddr_t avail_start;
290 extern vm_offset_t clean_eva;
291 extern vm_offset_t clean_sva;
292 extern char *ptvmmap; /* poor name! */
293 extern vm_offset_t virtual_avail;
295 void pmap_bootstrap ( vm_paddr_t, vm_paddr_t);
296 pmap_t pmap_kernel (void);
297 void *pmap_mapdev (vm_paddr_t, vm_size_t);
298 void pmap_unmapdev (vm_offset_t, vm_size_t);
299 unsigned *pmap_pte (pmap_t, vm_offset_t) __pure2;
300 struct vm_page *pmap_use_pt (pmap_t, vm_offset_t);
301 #ifdef SMP
302 void pmap_set_opt (void);
303 #endif
305 #endif /* _KERNEL */
307 #endif /* !LOCORE */
309 #endif /* !_CPU_PMAP_H_ */