6514 AS_* lock macros simplification
[illumos-gate.git] / usr / src / uts / common / vm / hat.h
blob1d91475e38412db9af2cb895c8e9e6c4f78d67da
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
30 * University Copyright- Copyright (c) 1982, 1986, 1988
31 * The Regents of the University of California
32 * All Rights Reserved
34 * University Acknowledgment- Portions of this document are derived from
35 * software developed by the University of California, Berkeley, and its
36 * contributors.
39 #ifndef _VM_HAT_H
40 #define _VM_HAT_H
42 #include <sys/types.h>
43 #include <sys/t_lock.h>
44 #include <vm/faultcode.h>
45 #include <sys/kstat.h>
46 #include <sys/siginfo.h>
48 #ifdef __cplusplus
49 extern "C" {
50 #endif
53 * VM - Hardware Address Translation management.
55 * This file describes the machine independent interfaces to
56 * the hardware address translation management routines. Other
57 * machine specific interfaces and structures are defined
58 * in <vm/hat_xxx.h>. The hat layer manages the address
59 * translation hardware as a cache driven by calls from the
60 * higher levels of the VM system.
63 struct hat;
64 struct kpme;
65 struct memseg;
67 #include <vm/page.h>
70 * a callback used with hat_unload_callback()
71 * start and end mark are set to a range of unloaded addresses
72 * and the function is invoked with a pointer to this data structure
74 typedef struct hat_callback {
75 caddr_t hcb_start_addr;
76 caddr_t hcb_end_addr;
77 void (*hcb_function)(struct hat_callback *);
78 void *hcb_data;
79 } hat_callback_t;
81 typedef void *hat_region_cookie_t;
83 #ifdef _KERNEL
86 * One time hat initialization
88 void hat_init(void);
91 * Notify hat of a system dump
93 void hat_dump(void);
96 * Operations on an address space:
98 * struct hat *hat_alloc(as)
99 * allocated a hat structure for as.
101 * void hat_free_start(hat)
102 * informs hat layer process has finished executing but as has not
103 * been cleaned up yet.
105 * void hat_free_end(hat)
106 * informs hat layer as is being destroyed. hat layer cannot use as
107 * pointer after this call.
109 * void hat_swapin(hat)
110 * allocate any hat resources required for process being swapped in.
112 * void hat_swapout(hat)
113 * deallocate hat resources for process being swapped out.
115 * size_t hat_get_mapped_size(hat)
116 * returns number of bytes that have valid mappings in hat.
118 * void hat_stats_enable(hat)
119 * void hat_stats_disable(hat)
120 * enables/disables collection of stats for hat.
122 * int hat_dup(parenthat, childhat, addr, len, flags)
123 * Duplicate address translations of the parent to the child. Supports
124 * the entire address range or a range depending on flag,
125 * zero returned on success, non-zero on error
127 * void hat_thread_exit(thread)
128 * Notifies the HAT that a thread is exiting, called after it has been
129 * reassigned to the kernel AS.
132 struct hat *hat_alloc(struct as *);
133 void hat_free_start(struct hat *);
134 void hat_free_end(struct hat *);
135 int hat_dup(struct hat *, struct hat *, caddr_t, size_t, uint_t);
136 void hat_swapin(struct hat *);
137 void hat_swapout(struct hat *);
138 size_t hat_get_mapped_size(struct hat *);
139 int hat_stats_enable(struct hat *);
140 void hat_stats_disable(struct hat *);
141 void hat_thread_exit(kthread_t *);
144 * Operations on a named address within a segment:
146 * void hat_memload(hat, addr, pp, attr, flags)
147 * load/lock the given page struct
149 * void hat_memload_array(hat, addr, len, ppa, attr, flags)
150 * load/lock the given array of page structs
152 * void hat_devload(hat, addr, len, pf, attr, flags)
153 * load/lock the given page frame number
155 * void hat_unlock(hat, addr, len)
156 * unlock a given range of addresses
158 * void hat_unload(hat, addr, len, flags)
159 * void hat_unload_callback(hat, addr, len, flags, callback)
160 * unload a given range of addresses (has optional callback)
162 * void hat_sync(hat, addr, len, flags)
163 * synchronize mapping with software data structures
165 * void hat_map(hat, addr, len, flags)
167 * void hat_setattr(hat, addr, len, attr)
168 * void hat_clrattr(hat, addr, len, attr)
169 * void hat_chgattr(hat, addr, len, attr)
170 * modify attributes for a range of addresses. skips any invalid mappings
172 * uint_t hat_getattr(hat, addr, *attr)
173 * returns attr for <hat,addr> in *attr. returns 0 if there was a
174 * mapping and *attr is valid, nonzero if there was no mapping and
175 * *attr is not valid.
177 * size_t hat_getpagesize(hat, addr)
178 * returns pagesize in bytes for <hat, addr>. returns -1 if there is
179 * no mapping. This is an advisory call.
181 * pfn_t hat_getpfnum(hat, addr)
182 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
184 * int hat_probe(hat, addr)
185 * return 0 if no valid mapping is present. Faster version
186 * of hat_getattr in certain architectures.
188 * int hat_share(dhat, daddr, shat, saddr, len, szc)
190 * void hat_unshare(hat, addr, len, szc)
192 * void hat_chgprot(hat, addr, len, vprot)
193 * This is a deprecated call. New segment drivers should store
194 * all attributes and use hat_*attr calls.
195 * Change the protections in the virtual address range
196 * given to the specified virtual protection. If vprot is ~PROT_WRITE,
197 * then remove write permission, leaving the other permissions
198 * unchanged. If vprot is ~PROT_USER, remove user permissions.
200 * void hat_flush_range(hat, addr, size)
201 * Invalidate a virtual address translation for the local CPU.
204 void hat_memload(struct hat *, caddr_t, struct page *, uint_t, uint_t);
205 void hat_memload_array(struct hat *, caddr_t, size_t, struct page **,
206 uint_t, uint_t);
207 void hat_memload_region(struct hat *, caddr_t, struct page *, uint_t,
208 uint_t, hat_region_cookie_t);
209 void hat_memload_array_region(struct hat *, caddr_t, size_t, struct page **,
210 uint_t, uint_t, hat_region_cookie_t);
212 void hat_devload(struct hat *, caddr_t, size_t, pfn_t, uint_t, int);
214 void hat_unlock(struct hat *, caddr_t, size_t);
215 void hat_unlock_region(struct hat *, caddr_t, size_t, hat_region_cookie_t);
217 void hat_unload(struct hat *, caddr_t, size_t, uint_t);
218 void hat_unload_callback(struct hat *, caddr_t, size_t, uint_t,
219 hat_callback_t *);
220 void hat_flush_range(struct hat *, caddr_t, size_t);
221 void hat_sync(struct hat *, caddr_t, size_t, uint_t);
222 void hat_map(struct hat *, caddr_t, size_t, uint_t);
223 void hat_setattr(struct hat *, caddr_t, size_t, uint_t);
224 void hat_clrattr(struct hat *, caddr_t, size_t, uint_t);
225 void hat_chgattr(struct hat *, caddr_t, size_t, uint_t);
226 uint_t hat_getattr(struct hat *, caddr_t, uint_t *);
227 ssize_t hat_getpagesize(struct hat *, caddr_t);
228 pfn_t hat_getpfnum(struct hat *, caddr_t);
229 int hat_probe(struct hat *, caddr_t);
230 int hat_share(struct hat *, caddr_t, struct hat *, caddr_t, size_t, uint_t);
231 void hat_unshare(struct hat *, caddr_t, size_t, uint_t);
232 void hat_chgprot(struct hat *, caddr_t, size_t, uint_t);
233 void hat_reserve(struct as *, caddr_t, size_t);
234 pfn_t va_to_pfn(void *);
235 uint64_t va_to_pa(void *);
238 * Kernel Physical Mapping (segkpm) hat interface routines.
240 caddr_t hat_kpm_mapin(struct page *, struct kpme *);
241 void hat_kpm_mapout(struct page *, struct kpme *, caddr_t);
242 caddr_t hat_kpm_mapin_pfn(pfn_t);
243 void hat_kpm_mapout_pfn(pfn_t);
244 caddr_t hat_kpm_page2va(struct page *, int);
245 struct page *hat_kpm_vaddr2page(caddr_t);
246 int hat_kpm_fault(struct hat *, caddr_t);
247 void hat_kpm_mseghash_clear(int);
248 void hat_kpm_mseghash_update(pgcnt_t, struct memseg *);
249 void hat_kpm_addmem_mseg_update(struct memseg *, pgcnt_t, offset_t);
250 void hat_kpm_addmem_mseg_insert(struct memseg *);
251 void hat_kpm_addmem_memsegs_update(struct memseg *);
252 caddr_t hat_kpm_mseg_reuse(struct memseg *);
253 void hat_kpm_delmem_mseg_update(struct memseg *, struct memseg **);
254 void hat_kpm_split_mseg_update(struct memseg *, struct memseg **,
255 struct memseg *, struct memseg *, struct memseg *);
256 void hat_kpm_walk(void (*)(void *, void *, size_t), void *);
259 * Operations on all translations for a given page(s)
261 * void hat_page_setattr(pp, flag)
262 * void hat_page_clrattr(pp, flag)
263 * used to set/clr red/mod bits.
265 * uint hat_page_getattr(pp, flag)
266 * If flag is specified, returns 0 if attribute is disabled
267 * and non zero if enabled. If flag specifes multiple attributs
268 * then returns 0 if ALL atriibutes are disabled. This is an advisory
269 * call.
271 * int hat_pageunload(pp, forceflag)
272 * unload all translations attached to pp.
274 * uint_t hat_pagesync(pp, flags)
275 * get hw stats from hardware into page struct and reset hw stats
276 * returns attributes of page
278 * ulong_t hat_page_getshare(pp)
279 * returns approx number of mappings to this pp. A return of 0 implies
280 * there are no mappings to the page.
282 * faultcode_t hat_softlock(hat, addr, lenp, ppp, flags);
283 * called to softlock pages for zero copy tcp
285 * void hat_page_demote(pp);
286 * unload all large mappings to pp and decrease p_szc of all
287 * constituent pages according to the remaining mappings.
290 void hat_page_setattr(struct page *, uint_t);
291 void hat_page_clrattr(struct page *, uint_t);
292 uint_t hat_page_getattr(struct page *, uint_t);
293 int hat_pageunload(struct page *, uint_t);
294 uint_t hat_pagesync(struct page *, uint_t);
295 ulong_t hat_page_getshare(struct page *);
296 int hat_page_checkshare(struct page *, ulong_t);
297 faultcode_t hat_softlock(struct hat *, caddr_t, size_t *,
298 struct page **, uint_t);
299 void hat_page_demote(struct page *);
302 * Rountine to expose supported HAT features to PIM.
304 enum hat_features {
305 HAT_SHARED_PT, /* Shared page tables */
306 HAT_DYNAMIC_ISM_UNMAP, /* hat_pageunload() handles ISM pages */
307 HAT_VMODSORT, /* support for VMODSORT flag of vnode */
308 HAT_SHARED_REGIONS /* shared regions support */
311 int hat_supported(enum hat_features, void *);
314 * Services provided to the hat:
316 * void as_signal_proc(as, siginfo)
317 * deliver signal to all processes that have this as.
319 * int hat_setstat(as, addr, len, rmbits)
320 * informs hatstat layer that ref/mod bits need to be updated for
321 * address range. Returns 0 on success, 1 for failure.
323 void as_signal_proc(struct as *, k_siginfo_t *siginfo);
324 void hat_setstat(struct as *, caddr_t, size_t, uint_t);
327 * Flags to pass to hat routines.
329 * Certain flags only apply to some interfaces:
331 * HAT_LOAD Default flags to load a translation to the page.
332 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(),
333 * and hat_devload().
334 * HAT_LOAD_ADV Advisory load - Load translation if and only if
335 * sufficient MMU resources exist (i.e., do not steal).
336 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables
337 * that map some user pages (not kas) is shared by more
338 * than one process (eg. ISM).
339 * HAT_LOAD_CONTIG Pages are contigous
340 * HAT_LOAD_NOCONSIST Do not add mapping to mapping list.
341 * HAT_LOAD_REMAP Reload a valid pte with a different page frame.
342 * HAT_RELOAD_SHARE Reload a shared page table entry. Some platforms
343 * may require different actions than on the first
344 * load of a shared mapping.
345 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this
346 * point, it's setting up mapping to allocate internal
347 * hat layer data structures. This flag forces hat layer
348 * to tap its reserves in order to prevent infinite
349 * recursion.
350 * HAT_LOAD_TEXT A flag to hat_memload() to indicate loading text pages.
354 * Flags for hat_memload/hat_devload
356 #define HAT_FLAGS_RESV 0xFF000000 /* resv for hat impl */
357 #define HAT_LOAD 0x00
358 #define HAT_LOAD_LOCK 0x01
359 #define HAT_LOAD_ADV 0x04
360 #define HAT_LOAD_CONTIG 0x10
361 #define HAT_LOAD_NOCONSIST 0x20
362 #define HAT_LOAD_SHARE 0x40
363 #define HAT_LOAD_REMAP 0x80
364 #define HAT_RELOAD_SHARE 0x100
365 #define HAT_NO_KALLOC 0x200
366 #define HAT_LOAD_TEXT 0x400
369 * Flags for initializing disable_*large_pages.
371 * HAT_AUTO_TEXT Get MMU specific disable_auto_text_large_pages
372 * HAT_AUTO_DATA Get MMU specific disable_auto_data_large_pages
374 #define HAT_AUTO_TEXT 0x800
375 #define HAT_AUTO_DATA 0x1000
378 * Attributes for hat_memload/hat_devload/hat_*attr
379 * are a superset of prot flags defined in mman.h.
381 #define HAT_PLAT_ATTR_MASK 0xF00000
382 #define HAT_PROT_MASK 0x0F
384 #define HAT_NOFAULT 0x10
385 #define HAT_NOSYNC 0x20
388 * Advisory ordering attributes. Apply only to device mappings.
390 * HAT_STRICTORDER: the CPU must issue the references in order, as the
391 * programmer specified. This is the default.
392 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
393 * of reordering; store or load with store or load).
394 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
395 * to consecutive locations (for example, turn two consecutive byte
396 * stores into one halfword store), and it may batch individual loads
397 * (for example, turn two consecutive byte loads into one halfword load).
398 * This also implies re-ordering.
399 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
400 * until another store occurs. The default is to fetch new data
401 * on every load. This also implies merging.
402 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
403 * the device (perhaps with other data) at a later time. The default is
404 * to push the data right away. This also implies load caching.
406 #define HAT_STRICTORDER 0x0000
407 #define HAT_UNORDERED_OK 0x0100
408 #define HAT_MERGING_OK 0x0200
409 #define HAT_LOADCACHING_OK 0x0300
410 #define HAT_STORECACHING_OK 0x0400
411 #define HAT_ORDER_MASK 0x0700
413 /* endian attributes */
414 #define HAT_NEVERSWAP 0x0000
415 #define HAT_STRUCTURE_BE 0x1000
416 #define HAT_STRUCTURE_LE 0x2000
417 #define HAT_ENDIAN_MASK 0x3000
419 /* flags for hat_softlock */
420 #define HAT_COW 0x0001
423 * Flags for hat_unload
425 #define HAT_UNLOAD 0x00
426 #define HAT_UNLOAD_NOSYNC 0x02
427 #define HAT_UNLOAD_UNLOCK 0x04
428 #define HAT_UNLOAD_OTHER 0x08
429 #define HAT_UNLOAD_UNMAP 0x10
432 * Flags for hat_pagesync, hat_getstat, hat_sync
434 #define HAT_SYNC_DONTZERO 0x00
435 #define HAT_SYNC_ZERORM 0x01
436 /* Additional flags for hat_pagesync */
437 #define HAT_SYNC_STOPON_REF 0x02
438 #define HAT_SYNC_STOPON_MOD 0x04
439 #define HAT_SYNC_STOPON_RM (HAT_SYNC_STOPON_REF | HAT_SYNC_STOPON_MOD)
440 #define HAT_SYNC_STOPON_SHARED 0x08
443 * Flags for hat_dup
445 * HAT_DUP_ALL dup entire address space
446 * HAT_DUP_COW dup plus hat_clrattr(..PROT_WRITE) on newas
448 #define HAT_DUP_ALL 1
449 #define HAT_DUP_COW 2
450 #define HAT_DUP_SRD 3
454 * Flags for hat_map
456 #define HAT_MAP 0x00
459 * Flag for hat_pageunload
461 #define HAT_ADV_PGUNLOAD 0x00
462 #define HAT_FORCE_PGUNLOAD 0x01
465 * Attributes for hat_page_*attr, hat_setstats and
466 * returned by hat_pagesync.
468 #define P_MOD 0x1 /* the modified bit */
469 #define P_REF 0x2 /* the referenced bit */
470 #define P_RO 0x4 /* Read only page */
471 #define P_NSH 0x8 /* Not to shuffle v_pages */
473 #define hat_ismod(pp) (hat_page_getattr(pp, P_MOD))
474 #define hat_isref(pp) (hat_page_getattr(pp, P_REF))
475 #define hat_isro(pp) (hat_page_getattr(pp, P_RO))
477 #define hat_setmod(pp) (hat_page_setattr(pp, P_MOD))
478 #define hat_setmod_only(pp) (hat_page_setattr(pp, P_MOD|P_NSH))
479 #define hat_setref(pp) (hat_page_setattr(pp, P_REF))
480 #define hat_setrefmod(pp) (hat_page_setattr(pp, P_REF|P_MOD))
482 #define hat_clrmod(pp) (hat_page_clrattr(pp, P_MOD))
483 #define hat_clrref(pp) (hat_page_clrattr(pp, P_REF))
484 #define hat_clrrefmod(pp) (hat_page_clrattr(pp, P_REF|P_MOD))
486 #define hat_page_is_mapped(pp) (hat_page_getshare(pp))
489 * hat_setup is being used in sparc/os/sundep.c
491 void hat_setup(struct hat *, int);
494 * Flags for hat_setup
496 #define HAT_DONTALLOC 0
497 #define HAT_ALLOC 1
498 #define HAT_INIT 2
501 * Other routines, for statistics
503 int hat_startstat(struct as *);
504 void hat_getstat(struct as *, caddr_t, size_t, uint_t, char *, int);
505 void hat_freestat(struct as *, int);
506 void hat_resvstat(size_t, struct as *, caddr_t);
509 * Relocation callback routines. Currently only sfmmu HAT supports
510 * these.
512 extern int hat_add_callback(id_t, caddr_t, uint_t, uint_t, void *,
513 pfn_t *, void **);
514 extern id_t hat_register_callback(int,
515 int (*prehandler)(caddr_t, uint_t, uint_t, void *),
516 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
517 int (*errhandler)(caddr_t, uint_t, uint_t, void *), int);
518 extern void hat_delete_callback(caddr_t, uint_t, void *, uint_t, void *);
521 * hat_add_callback()/hat_delete_callback() flags.
523 #define HAC_NOSLEEP 0x0
524 #define HAC_SLEEP 0x1
525 #define HAC_PAGELOCK 0x2
528 * Suspend/unsuspend handler callback arguments.
530 #define HAT_SUSPEND 0x0010
531 #define HAT_UNSUSPEND 0x0010
532 #define HAT_PRESUSPEND 0x0020
533 #define HAT_POSTUNSUSPEND 0x0020
536 * Error handler callback arguments. See the block comments
537 * before the implementation of hat_add_callback() for an
538 * explanation of what these mean.
540 #define HAT_CB_ERR_LEAKED 0x1
542 #endif /* _KERNEL */
545 * The size of the bit array for ref and mod bit storage must be a power of 2.
546 * 2 bits are collected for each page. Below the power used is 4,
547 * which is 16 8-bit characters = 128 bits, ref and mod bit information
548 * for 64 pages.
550 #define HRM_SHIFT 4
551 #define HRM_BYTES (1 << HRM_SHIFT)
552 #define HRM_PAGES ((HRM_BYTES * NBBY) / 2)
553 #define HRM_PGPERBYTE (NBBY/2)
554 #define HRM_PGBYTEMASK (HRM_PGPERBYTE-1)
556 #define HRM_PGOFFMASK ((HRM_PGPERBYTE-1) << MMU_PAGESHIFT)
557 #define HRM_BASEOFFSET (((MMU_PAGESIZE * HRM_PAGES) - 1))
558 #define HRM_BASEMASK (~(HRM_BASEOFFSET))
560 #define HRM_BASESHIFT (MMU_PAGESHIFT + (HRM_SHIFT + 2))
561 #define HRM_PAGEMASK (MMU_PAGEMASK ^ HRM_BASEMASK)
563 #define HRM_HASHSIZE 0x200
564 #define HRM_HASHMASK (HRM_HASHSIZE - 1)
566 #define HRM_BLIST_INCR 0x200
569 * The structure for maintaining referenced and modified information
571 struct hrmstat {
572 struct as *hrm_as; /* stat block belongs to this as */
573 uintptr_t hrm_base; /* base of block */
574 ushort_t hrm_id; /* opaque identifier, one of a_vbits */
575 struct hrmstat *hrm_anext; /* as statistics block list */
576 struct hrmstat *hrm_hnext; /* list for hashed blocks */
577 uchar_t hrm_bits[HRM_BYTES]; /* the ref and mod bits */
580 extern struct hrmstat **hrm_hashtab;
583 * For global monitoring of the reference and modified bits
584 * of all address spaces we reserve one id bit.
586 #define HRM_SWSMONID 1
589 #ifdef _KERNEL
592 * Hat locking functions
593 * XXX - these two functions are currently being used by hatstats
594 * they can be removed by using a per-as mutex for hatstats.
596 void hat_enter(struct hat *);
597 void hat_exit(struct hat *);
599 typedef void (*hat_rgn_cb_func_t)(caddr_t, caddr_t, caddr_t,
600 size_t, void *, u_offset_t);
602 void hat_join_srd(struct hat *, vnode_t *);
604 hat_region_cookie_t hat_join_region(struct hat *, caddr_t, size_t, void *,
605 u_offset_t, uchar_t, uchar_t, hat_rgn_cb_func_t,
606 uint_t);
607 void hat_leave_region(struct hat *, hat_region_cookie_t,
608 uint_t);
609 void hat_dup_region(struct hat *, hat_region_cookie_t);
611 #define HAT_INVALID_REGION_COOKIE ((hat_region_cookie_t)-1)
612 #define HAT_IS_REGION_COOKIE_VALID(c) ((c) != HAT_INVALID_REGION_COOKIE)
614 /* hat_join_region() flags */
616 #define HAT_REGION_TEXT 0x1 /* passed by segvn */
617 #define HAT_REGION_ISM 0x2 /* for hat_share()/hat_unshare() */
619 #define HAT_REGION_TYPE_MASK (0x7)
621 #endif /* _KERNEL */
623 #ifdef __cplusplus
625 #endif
627 #endif /* _VM_HAT_H */