Merge commit '74ecdb5171c9f3673b9393b1a3dc6f3a65e93895'
[unleashed.git] / arch / x86 / kernel / platform / i86pc / include / vm / htable.h
blob8f4aac7e399fc9a4c80f6a883ac636cce61ba45d
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2014 by Delphix. All rights reserved.
27 * Copyright 2018 Joyent, Inc.
30 #ifndef _VM_HTABLE_H
31 #define _VM_HTABLE_H
33 #ifdef __cplusplus
34 extern "C" {
35 #endif
37 #if defined(__GNUC__) && defined(_ASM_INLINES) && defined(_KERNEL)
38 #include <asm/htable.h>
39 #endif
41 extern void atomic_andb(uint8_t *addr, uint8_t value);
42 extern void atomic_orb(uint8_t *addr, uint8_t value);
43 extern void atomic_inc16(uint16_t *addr);
44 extern void atomic_dec16(uint16_t *addr);
47 * Each hardware page table has an htable_t describing it.
49 * We use a reference counter mechanism to detect when we can free an htable.
50 * In the implmentation the reference count is split into 2 separate counters:
52 * ht_busy is a traditional reference count of uses of the htable pointer
54 * ht_valid_cnt is a count of how references are implied by valid PTE/PTP
55 * entries in the pagetable
57 * ht_busy is only incremented by htable_lookup() or htable_create()
58 * while holding the appropriate hash_table mutex. While installing a new
59 * valid PTE or PTP, in order to increment ht_valid_cnt a thread must have
60 * done an htable_lookup() or htable_create() but not the htable_release yet.
62 * htable_release(), while holding the mutex, can know that if
63 * busy == 1 and valid_cnt == 0, the htable can be free'd.
65 * The fields have been ordered to make htable_lookup() fast. Hence,
66 * ht_hat, ht_vaddr, ht_level and ht_next need to be clustered together.
68 struct htable {
69 struct htable *ht_next; /* forward link for hash table */
70 struct hat *ht_hat; /* hat this mapping comes from */
71 uintptr_t ht_vaddr; /* virt addr at start of this table */
72 int8_t ht_level; /* page table level: 0=4K, 1=2M, ... */
73 uint8_t ht_flags; /* see below */
74 int16_t ht_busy; /* implements locking protocol */
75 int16_t ht_valid_cnt; /* # of valid entries in this table */
76 uint32_t ht_lock_cnt; /* # of locked entries in this table */
77 /* never used for kernel hat */
78 pfn_t ht_pfn; /* pfn of page of the pagetable */
79 struct htable *ht_prev; /* backward link for hash table */
80 struct htable *ht_parent; /* htable that points to this htable */
81 struct htable *ht_shares; /* for HTABLE_SHARED_PFN only */
83 typedef struct htable htable_t;
86 * Flags values for htable ht_flags field:
88 * HTABLE_COPIED - This is the top level htable of a HAT being used with per-CPU
89 * pagetables.
91 * HTABLE_SHARED_PFN - this htable had its PFN assigned from sharing another
92 * htable. Used by hat_share() for ISM.
94 #define HTABLE_COPIED (0x01)
95 #define HTABLE_SHARED_PFN (0x02)
98 * The htable hash table hashing function. The 28 is so that high
99 * order bits are include in the hash index to skew the wrap
100 * around of addresses. Even though the hash buckets are stored per
101 * hat we include the value of hat pointer in the hash function so
102 * that the secondary hash for the htable mutex winds up begin different in
103 * every address space.
105 #define HTABLE_HASH(hat, va, lvl) \
106 ((((va) >> LEVEL_SHIFT(1)) + ((va) >> 28) + (lvl) + \
107 ((uintptr_t)(hat) >> 4)) & ((hat)->hat_num_hash - 1))
110 * Each CPU gets a unique hat_cpu_info structure in cpu_hat_info. For more
111 * information on its use and members, see uts/i86pc/vm/hat_i86.c.
113 struct hat_cpu_info {
114 kmutex_t hci_mutex; /* mutex to ensure sequential usage */
115 #if defined(__amd64)
116 pfn_t hci_pcp_l3pfn; /* pfn of hci_pcp_l3ptes */
117 pfn_t hci_pcp_l2pfn; /* pfn of hci_pcp_l2ptes */
118 x86pte_t *hci_pcp_l3ptes; /* PCP Level==3 pagetable (top) */
119 x86pte_t *hci_pcp_l2ptes; /* PCP Level==2 pagetable */
120 struct hat *hci_user_hat; /* CPU specific HAT */
121 pfn_t hci_user_l3pfn; /* pfn of hci_user_l3ptes */
122 x86pte_t *hci_user_l3ptes; /* PCP User L3 pagetable */
123 #endif /* __amd64 */
128 * Compute the last page aligned VA mapped by an htable.
130 * Given a va and a level, compute the virtual address of the start of the
131 * next page at that level.
133 * XX64 - The check for the VA hole needs to be better generalized.
135 #if defined(__amd64)
136 #define HTABLE_NUM_PTES(ht) (((ht)->ht_flags & HTABLE_COPIED) ? \
137 (((ht)->ht_level == mmu.max_level) ? 512 : 4) : 512)
139 #define HTABLE_LAST_PAGE(ht) \
140 ((ht)->ht_level == mmu.max_level ? ((uintptr_t)0UL - MMU_PAGESIZE) :\
141 ((ht)->ht_vaddr - MMU_PAGESIZE + \
142 ((uintptr_t)HTABLE_NUM_PTES(ht) << LEVEL_SHIFT((ht)->ht_level))))
144 #define NEXT_ENTRY_VA(va, l) \
145 ((va & LEVEL_MASK(l)) + LEVEL_SIZE(l) == mmu.hole_start ? \
146 mmu.hole_end : (va & LEVEL_MASK(l)) + LEVEL_SIZE(l))
148 #elif defined(__i386)
150 #define HTABLE_NUM_PTES(ht) \
151 (!mmu.pae_hat ? 1024 : ((ht)->ht_level == 2 ? 4 : 512))
153 #define HTABLE_LAST_PAGE(ht) ((ht)->ht_vaddr - MMU_PAGESIZE + \
154 ((uintptr_t)HTABLE_NUM_PTES(ht) << LEVEL_SHIFT((ht)->ht_level)))
156 #define NEXT_ENTRY_VA(va, l) ((va & LEVEL_MASK(l)) + LEVEL_SIZE(l))
158 #endif
160 #if defined(_KERNEL)
163 * initialization function called from hat_init()
165 extern void htable_init(void);
168 * Functions to lookup, or "lookup and create", the htable corresponding
169 * to the virtual address "vaddr" in the "hat" at the given "level" of
170 * page tables. htable_lookup() may return NULL if no such entry exists.
172 * On return the given htable is marked busy (a shared lock) - this prevents
173 * the htable from being stolen or freed) until htable_release() is called.
175 * If kalloc_flag is set on an htable_create() we can't call kmem allocation
176 * routines for this htable, since it's for the kernel hat itself.
178 * htable_acquire() is used when an htable pointer has been extracted from
179 * an hment and we need to get a reference to the htable.
181 extern htable_t *htable_lookup(struct hat *hat, uintptr_t vaddr, level_t level);
182 extern htable_t *htable_create(struct hat *hat, uintptr_t vaddr, level_t level,
183 htable_t *shared);
184 extern void htable_acquire(htable_t *);
186 extern void htable_release(htable_t *ht);
187 extern void htable_destroy(htable_t *ht);
190 * Code to free all remaining htables for a hat. Called after the hat is no
191 * longer in use by any thread.
193 extern void htable_purge_hat(struct hat *hat);
196 * Find the htable, page table entry index, and PTE of the given virtual
197 * address. If not found returns NULL. When found, returns the htable_t *,
198 * sets entry, and has a hold on the htable.
200 extern htable_t *htable_getpte(struct hat *, uintptr_t, uint_t *, x86pte_t *,
201 level_t);
204 * Similar to hat_getpte(), except that this only succeeds if a valid
205 * page mapping is present.
207 extern htable_t *htable_getpage(struct hat *hat, uintptr_t va, uint_t *entry);
210 * Called to allocate initial/additional htables for reserve.
212 extern void htable_initial_reserve(uint_t);
213 extern void htable_reserve(uint_t);
216 * Used to readjust the htable reserve after the reserve list has been used.
217 * Also called after boot to release left over boot reserves.
219 extern void htable_adjust_reserve(void);
222 * return number of bytes mapped by all the htables in a given hat
224 extern size_t htable_mapped(struct hat *);
228 * Attach initial pagetables as htables
230 extern void htable_attach(struct hat *, uintptr_t, level_t, struct htable *,
231 pfn_t);
234 * Routine to find the next populated htable at or above a given virtual
235 * address. Can specify an upper limit, or HTABLE_WALK_TO_END to indicate
236 * that it should search the entire address space. Similar to
237 * hat_getpte(), but used for walking through address ranges. It can be
238 * used like this:
240 * va = ...
241 * ht = NULL;
242 * while (va < end_va) {
243 * pte = htable_walk(hat, &ht, &va, end_va);
244 * if (!pte)
245 * break;
247 * ... code to operate on page at va ...
249 * va += LEVEL_SIZE(ht->ht_level);
251 * if (ht)
252 * htable_release(ht);
255 extern x86pte_t htable_walk(struct hat *hat, htable_t **ht, uintptr_t *va,
256 uintptr_t eaddr);
258 #define HTABLE_WALK_TO_END ((uintptr_t)-1)
261 * Utilities convert between virtual addresses and page table entry indeces.
263 extern uint_t htable_va2entry(uintptr_t va, htable_t *ht);
264 extern uintptr_t htable_e2va(htable_t *ht, uint_t entry);
267 * Interfaces that provide access to page table entries via the htable.
269 * Note that all accesses except x86pte_copy() and x86pte_zero() are atomic.
271 extern void x86pte_cpu_init(cpu_t *);
272 extern void x86pte_cpu_fini(cpu_t *);
274 extern x86pte_t x86pte_get(htable_t *, uint_t entry);
277 * x86pte_set returns LPAGE_ERROR if it's asked to overwrite a page table
278 * link with a large page mapping.
280 #define LPAGE_ERROR (-(x86pte_t)1)
281 extern x86pte_t x86pte_set(htable_t *, uint_t entry, x86pte_t new, void *);
283 extern x86pte_t x86pte_inval(htable_t *ht, uint_t entry,
284 x86pte_t old, x86pte_t *ptr, boolean_t tlb);
286 extern x86pte_t x86pte_update(htable_t *ht, uint_t entry,
287 x86pte_t old, x86pte_t new);
289 extern void x86pte_copy(htable_t *src, htable_t *dest, uint_t entry,
290 uint_t cnt);
293 * access to a pagetable knowing only the pfn
295 extern x86pte_t *x86pte_mapin(pfn_t, uint_t, htable_t *);
296 extern void x86pte_mapout(void);
299 * these are actually inlines for "lock; incw", "lock; decw", etc. instructions.
301 #define HTABLE_INC(x) atomic_inc16((uint16_t *)&x)
302 #define HTABLE_DEC(x) atomic_dec16((uint16_t *)&x)
303 #define HTABLE_LOCK_INC(ht) atomic_inc_32(&(ht)->ht_lock_cnt)
304 #define HTABLE_LOCK_DEC(ht) atomic_dec_32(&(ht)->ht_lock_cnt)
306 #ifdef __xpv
307 extern void xen_flush_va(caddr_t va);
308 extern void xen_gflush_va(caddr_t va, cpuset_t);
309 extern void xen_flush_tlb(void);
310 extern void xen_gflush_tlb(cpuset_t);
311 extern void xen_pin(pfn_t, level_t);
312 extern void xen_unpin(pfn_t);
313 extern int xen_kpm_page(pfn_t, uint_t);
316 * The hypervisor maps all page tables into our address space read-only.
317 * Under normal circumstances, the hypervisor then handles all updates to
318 * the page tables underneath the covers for us. However, when we are
319 * trying to dump core after a hypervisor panic, the hypervisor is no
320 * longer available to do these updates. To work around the protection
321 * problem, we simply disable write-protect checking for the duration of a
322 * pagetable update operation.
324 #define XPV_ALLOW_PAGETABLE_UPDATES() \
326 if (IN_XPV_PANIC()) \
327 setcr0((getcr0() & ~CR0_WP) & 0xffffffff); \
329 #define XPV_DISALLOW_PAGETABLE_UPDATES() \
331 if (IN_XPV_PANIC() > 0) \
332 setcr0((getcr0() | CR0_WP) & 0xffffffff); \
335 #else /* __xpv */
337 #define XPV_ALLOW_PAGETABLE_UPDATES()
338 #define XPV_DISALLOW_PAGETABLE_UPDATES()
340 #endif
342 #endif /* _KERNEL */
345 #ifdef __cplusplus
347 #endif
349 #endif /* _VM_HTABLE_H */