4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 /* Copyright 2013 OmniTI Computer Consulting, Inc. All rights reserved. */
25 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
29 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
30 /* All Rights Reserved */
32 #include <sys/types.h>
33 #include <sys/inttypes.h>
34 #include <sys/param.h>
35 #include <sys/sysmacros.h>
36 #include <sys/systm.h>
37 #include <sys/signal.h>
39 #include <sys/errno.h>
42 #include <sys/tuneable.h>
43 #include <sys/debug.h>
44 #include <sys/cmn_err.h>
46 #include <sys/vnode.h>
51 #include <sys/vmparam.h>
52 #include <sys/fcntl.h>
53 #include <sys/lwpchan_impl.h>
54 #include <sys/nbmlock.h>
59 #include <vm/seg_dev.h>
60 #include <vm/seg_vn.h>
65 static int brk_lpg(caddr_t nva
);
66 static int grow_lpg(caddr_t sp
);
75 * Serialize brk operations on an address space.
76 * This also serves as the lock protecting p_brksize
79 as_rangelock(p
->p_as
);
80 if (use_brk_lpg
&& (p
->p_flag
& SAUTOLPG
) != 0) {
83 error
= brk_internal(nva
, p
->p_brkpageszc
);
85 as_rangeunlock(p
->p_as
);
86 return ((error
!= 0 ? set_errno(error
) : 0));
90 * Algorithm: call arch-specific map_pgsz to get best page size to use,
91 * then call brk_internal().
92 * Returns 0 on success.
97 struct proc
*p
= curproc
;
100 caddr_t bssbase
= p
->p_bssbase
;
101 caddr_t brkbase
= p
->p_brkbase
;
105 oszc
= p
->p_brkpageszc
;
108 * If p_brkbase has not yet been set, the first call
109 * to brk_internal() will initialize it.
112 return (brk_internal(nva
, oszc
));
117 pgsz
= map_pgsz(MAPPGSZ_HEAP
, p
, bssbase
, len
, 0);
118 szc
= page_szc(pgsz
);
122 * 1. page_szc() returns -1 for invalid page size, so we want to
123 * ignore it in that case.
124 * 2. By design we never decrease page size, as it is more stable.
127 err
= brk_internal(nva
, oszc
);
128 /* If failed, back off to base page size. */
129 if (err
!= 0 && oszc
!= 0) {
130 err
= brk_internal(nva
, 0);
135 err
= brk_internal(nva
, szc
);
136 /* If using szc failed, map with base page size and return. */
139 err
= brk_internal(nva
, 0);
145 * Round up brk base to a large page boundary and remap
146 * anything in the segment already faulted in beyond that
149 addr
= (caddr_t
)P2ROUNDUP((uintptr_t)p
->p_bssbase
, pgsz
);
150 brkend
= brkbase
+ p
->p_brksize
;
152 /* Check that len is not negative. Update page size code for heap. */
153 if (addr
>= p
->p_bssbase
&& brkend
> addr
&& IS_P2ALIGNED(len
, pgsz
)) {
154 (void) as_setpagesize(p
->p_as
, addr
, len
, szc
, B_FALSE
);
155 p
->p_brkpageszc
= szc
;
159 return (err
); /* should always be 0 */
163 * Returns 0 on success.
166 brk_internal(caddr_t nva
, uint_t brkszc
)
168 caddr_t ova
; /* current break address */
171 struct proc
*p
= curproc
;
172 struct as
*as
= p
->p_as
;
178 * extend heap to brkszc alignment but use current p->p_brkpageszc
179 * for the newly created segment. This allows the new extension
180 * segment to be concatenated successfully with the existing brk
183 if ((szc
= brkszc
) != 0) {
184 pgsz
= page_get_pagesize(szc
);
185 ASSERT(pgsz
> PAGESIZE
);
190 mutex_enter(&p
->p_lock
);
191 as_rctl
= rctl_enforced_value(rctlproc_legacy
[RLIMIT_DATA
],
193 mutex_exit(&p
->p_lock
);
196 * If p_brkbase has not yet been set, the first call
197 * to brk() will initialize it.
199 if (p
->p_brkbase
== 0)
203 * Before multiple page size support existed p_brksize was the value
204 * not rounded to the pagesize (i.e. it stored the exact user request
205 * for heap size). If pgsz is greater than PAGESIZE calculate the
206 * heap size as the real new heap size by rounding it up to pgsz.
207 * This is useful since we may want to know where the heap ends
208 * without knowing heap pagesize (e.g. some old code) and also if
209 * heap pagesize changes we can update p_brkpageszc but delay adding
210 * new mapping yet still know from p_brksize where the heap really
211 * ends. The user requested heap end is stored in libc variable.
213 if (pgsz
> PAGESIZE
) {
214 caddr_t tnva
= (caddr_t
)P2ROUNDUP((uintptr_t)nva
, pgsz
);
215 size
= tnva
- p
->p_brkbase
;
216 if (tnva
< p
->p_brkbase
|| (size
> p
->p_brksize
&&
217 size
> (size_t)as_rctl
)) {
220 size
= nva
- p
->p_brkbase
;
223 size
= nva
- p
->p_brkbase
;
227 * use PAGESIZE to roundup ova because we want to know the real value
228 * of the current heap end in case p_brkpageszc changes since the last
229 * p_brksize was computed.
231 nva
= (caddr_t
)P2ROUNDUP((uintptr_t)nva
, pgsz
);
232 ova
= (caddr_t
)P2ROUNDUP((uintptr_t)(p
->p_brkbase
+ p
->p_brksize
),
235 if ((nva
< p
->p_brkbase
) || (size
> p
->p_brksize
&&
237 mutex_enter(&p
->p_lock
);
238 (void) rctl_action(rctlproc_legacy
[RLIMIT_DATA
], p
->p_rctls
, p
,
240 mutex_exit(&p
->p_lock
);
245 struct segvn_crargs crargs
=
246 SEGVN_ZFOD_ARGS(PROT_ZFOD
, PROT_ALL
);
248 if (!(p
->p_datprot
& PROT_EXEC
)) {
249 crargs
.prot
&= ~PROT_EXEC
;
253 * Add new zfod mapping to extend UNIX data segment
254 * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies
255 * via map_pgszcvec(). Use AS_MAP_HEAP to get intermediate
256 * page sizes if ova is not aligned to szc's pgsz.
261 rbss
= (caddr_t
)P2ROUNDUP((uintptr_t)p
->p_bssbase
,
263 if (IS_P2ALIGNED(p
->p_bssbase
, pgsz
) || ova
> rbss
) {
264 crargs
.szc
= p
->p_brkpageszc
? p
->p_brkpageszc
:
266 } else if (ova
== rbss
) {
269 crargs
.szc
= AS_MAP_HEAP
;
272 crargs
.szc
= AS_MAP_NO_LPOOB
;
274 crargs
.lgrp_mem_policy_flags
= LGRP_MP_FLAG_EXTEND_UP
;
275 error
= as_map(as
, ova
, (size_t)(nva
- ova
), segvn_create
,
281 } else if (nva
< ova
) {
283 * Release mapping to shrink UNIX data segment.
285 (void) as_unmap(as
, nva
, (size_t)(ova
- nva
));
292 * Grow the stack to include sp. Return 1 if successful, 0 otherwise.
293 * This routine assumes that the stack grows downward.
298 struct proc
*p
= curproc
;
299 struct as
*as
= p
->p_as
;
300 size_t oldsize
= p
->p_stksize
;
305 * Serialize grow operations on an address space.
306 * This also serves as the lock protecting p_stksize
310 if (use_stk_lpg
&& (p
->p_flag
& SAUTOLPG
) != 0) {
313 err
= grow_internal(sp
, p
->p_stkpageszc
);
317 if (err
== 0 && (newsize
= p
->p_stksize
) > oldsize
) {
318 ASSERT(IS_P2ALIGNED(oldsize
, PAGESIZE
));
319 ASSERT(IS_P2ALIGNED(newsize
, PAGESIZE
));
321 * Set up translations so the process doesn't have to fault in
322 * the stack pages we just gave it.
324 (void) as_fault(as
->a_hat
, as
, p
->p_usrstack
- newsize
,
325 newsize
- oldsize
, F_INVAL
, S_WRITE
);
327 return ((err
== 0 ? 1 : 0));
331 * Algorithm: call arch-specific map_pgsz to get best page size to use,
332 * then call grow_internal().
333 * Returns 0 on success.
338 struct proc
*p
= curproc
;
346 newsize
= p
->p_usrstack
- sp
;
348 oszc
= p
->p_stkpageszc
;
349 pgsz
= map_pgsz(MAPPGSZ_STK
, p
, sp
, newsize
, 0);
350 szc
= page_szc(pgsz
);
354 * 1. page_szc() returns -1 for invalid page size, so we want to
355 * ignore it in that case.
356 * 2. By design we never decrease page size, as it is more stable.
357 * This shouldn't happen as the stack never shrinks.
360 err
= grow_internal(sp
, oszc
);
361 /* failed, fall back to base page size */
362 if (err
!= 0 && oszc
!= 0) {
363 err
= grow_internal(sp
, 0);
369 * We've grown sufficiently to switch to a new page size.
370 * So we are going to remap the whole segment with the new page size.
372 err
= grow_internal(sp
, szc
);
373 /* The grow with szc failed, so fall back to base page size. */
376 err
= grow_internal(sp
, 0);
382 * Round up stack pointer to a large page boundary and remap
383 * any pgsz pages in the segment already faulted in beyond that
386 saddr
= p
->p_usrstack
- p
->p_stksize
;
387 addr
= (caddr_t
)P2ROUNDUP((uintptr_t)saddr
, pgsz
);
388 growend
= (caddr_t
)P2ALIGN((uintptr_t)p
->p_usrstack
, pgsz
);
389 len
= growend
- addr
;
390 /* Check that len is not negative. Update page size code for stack. */
391 if (addr
>= saddr
&& growend
> addr
&& IS_P2ALIGNED(len
, pgsz
)) {
392 (void) as_setpagesize(p
->p_as
, addr
, len
, szc
, B_FALSE
);
393 p
->p_stkpageszc
= szc
;
397 return (err
); /* should always be 0 */
401 * This routine assumes that the stack grows downward.
402 * Returns 0 on success, errno on failure.
405 grow_internal(caddr_t sp
, uint_t growszc
)
407 struct proc
*p
= curproc
;
413 struct segvn_crargs crargs
= SEGVN_ZFOD_ARGS(PROT_ZFOD
, PROT_ALL
);
415 ASSERT(sp
< p
->p_usrstack
);
416 sp
= (caddr_t
)P2ALIGN((uintptr_t)sp
, PAGESIZE
);
419 * grow to growszc alignment but use current p->p_stkpageszc for
420 * the segvn_crargs szc passed to segvn_create. For memcntl to
421 * increase the szc, this allows the new extension segment to be
422 * concatenated successfully with the existing stack segment.
424 if ((szc
= growszc
) != 0) {
425 pgsz
= page_get_pagesize(szc
);
426 ASSERT(pgsz
> PAGESIZE
);
427 newsize
= p
->p_usrstack
- (caddr_t
)P2ALIGN((uintptr_t)sp
, pgsz
);
428 if (newsize
> (size_t)p
->p_stk_ctl
) {
431 newsize
= p
->p_usrstack
- sp
;
435 newsize
= p
->p_usrstack
- sp
;
438 if (newsize
> (size_t)p
->p_stk_ctl
) {
439 (void) rctl_action(rctlproc_legacy
[RLIMIT_STACK
], p
->p_rctls
, p
,
445 oldsize
= p
->p_stksize
;
446 ASSERT(P2PHASE(oldsize
, PAGESIZE
) == 0);
448 if (newsize
<= oldsize
) { /* prevent the stack from shrinking */
452 if (!(p
->p_stkprot
& PROT_EXEC
)) {
453 crargs
.prot
&= ~PROT_EXEC
;
456 * extend stack with the proposed new growszc, which is different
457 * than p_stkpageszc only on a memcntl to increase the stack pagesize.
458 * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies via
459 * map_pgszcvec(). Use AS_MAP_STACK to get intermediate page sizes
460 * if not aligned to szc's pgsz.
463 caddr_t oldsp
= p
->p_usrstack
- oldsize
;
464 caddr_t austk
= (caddr_t
)P2ALIGN((uintptr_t)p
->p_usrstack
,
467 if (IS_P2ALIGNED(p
->p_usrstack
, pgsz
) || oldsp
< austk
) {
468 crargs
.szc
= p
->p_stkpageszc
? p
->p_stkpageszc
:
470 } else if (oldsp
== austk
) {
473 crargs
.szc
= AS_MAP_STACK
;
476 crargs
.szc
= AS_MAP_NO_LPOOB
;
478 crargs
.lgrp_mem_policy_flags
= LGRP_MP_FLAG_EXTEND_DOWN
;
480 if ((error
= as_map(p
->p_as
, p
->p_usrstack
- newsize
, newsize
- oldsize
,
481 segvn_create
, &crargs
)) != 0) {
482 if (error
== EAGAIN
) {
483 cmn_err(CE_WARN
, "Sorry, no swap space to grow stack "
484 "for pid %d (%s)", p
->p_pid
, PTOU(p
)->u_comm
);
488 p
->p_stksize
= newsize
;
493 * Find address for user to map.
494 * If MAP_FIXED is not specified, we can pick any address we want, but we will
495 * first try the value in *addrp if it is non-NULL. Thus this is implementing
496 * a way to try and get a preferred address.
499 choose_addr(struct as
*as
, caddr_t
*addrp
, size_t len
, offset_t off
,
500 int vacalign
, uint_t flags
)
502 caddr_t basep
= (caddr_t
)(uintptr_t)((uintptr_t)*addrp
& PAGEMASK
);
505 ASSERT(AS_ISCLAIMGAP(as
)); /* searches should be serialized */
506 if (flags
& MAP_FIXED
) {
507 (void) as_unmap(as
, *addrp
, len
);
509 } else if (basep
!= NULL
&& ((flags
& MAP_ALIGN
) == 0) &&
510 !as_gap(as
, len
, &basep
, &lenp
, 0, *addrp
)) {
511 /* User supplied address was available */
515 * No user supplied address or the address supplied was not
518 map_addr(addrp
, len
, off
, vacalign
, flags
);
527 * Used for MAP_ANON - fast way to get anonymous pages
530 zmap(struct as
*as
, caddr_t
*addrp
, size_t len
, uint_t uprot
, int flags
,
533 struct segvn_crargs vn_a
;
536 if (((PROT_ALL
& uprot
) != uprot
))
539 if ((flags
& MAP_FIXED
) != 0) {
543 * Use the user address. First verify that
544 * the address to be used is page aligned.
545 * Then make some simple bounds checks.
547 if (((uintptr_t)*addrp
& PAGEOFFSET
) != 0)
550 userlimit
= flags
& _MAP_LOW32
?
551 (caddr_t
)USERLIMIT32
: as
->a_userlimit
;
552 switch (valid_usr_range(*addrp
, len
, uprot
, as
, userlimit
)) {
563 * No need to worry about vac alignment for anonymous
564 * pages since this is a "clone" object that doesn't
567 error
= choose_addr(as
, addrp
, len
, pos
, ADDR_NOVACALIGN
, flags
);
573 * Use the seg_vn segment driver; passing in the NULL amp
574 * gives the desired "cloning" effect.
578 vn_a
.type
= flags
& MAP_TYPE
;
580 vn_a
.maxprot
= PROT_ALL
;
581 vn_a
.flags
= flags
& ~MAP_TYPE
;
585 vn_a
.lgrp_mem_policy_flags
= 0;
587 return (as_map(as
, *addrp
, len
, segvn_create
, &vn_a
));
591 smmap_common(caddr_t
*addrp
, size_t len
,
592 int prot
, int flags
, struct file
*fp
, offset_t pos
)
595 struct as
*as
= curproc
->p_as
;
596 uint_t uprot
, maxprot
, type
;
600 if ((flags
& ~(MAP_SHARED
| MAP_PRIVATE
| MAP_FIXED
| _MAP_NEW
|
601 _MAP_LOW32
| MAP_NORESERVE
| MAP_ANON
| MAP_ALIGN
|
602 MAP_TEXT
| MAP_INITDATA
)) != 0) {
603 /* | MAP_RENAME */ /* not implemented, let user know */
607 if ((flags
& MAP_TEXT
) && !(prot
& PROT_EXEC
)) {
611 if ((flags
& (MAP_TEXT
| MAP_INITDATA
)) == (MAP_TEXT
| MAP_INITDATA
)) {
617 * See if this is an "old mmap call". If so, remember this
618 * fact and convert the flags value given to mmap to indicate
619 * the specified address in the system call must be used.
620 * _MAP_NEW is turned set by all new uses of mmap.
622 if ((flags
& _MAP_NEW
) == 0)
627 type
= flags
& MAP_TYPE
;
628 if (type
!= MAP_PRIVATE
&& type
!= MAP_SHARED
)
632 if (flags
& MAP_ALIGN
) {
634 if (flags
& MAP_FIXED
)
637 /* alignment needs to be a power of 2 >= page size */
638 if (((uintptr_t)*addrp
< PAGESIZE
&& (uintptr_t)*addrp
!= 0) ||
639 !ISP2((uintptr_t)*addrp
))
643 * Check for bad lengths and file position.
644 * We let the VOP_MAP routine check for negative lengths
645 * since on some vnode types this might be appropriate.
647 if (len
== 0 || (pos
& (u_offset_t
)PAGEOFFSET
) != 0)
650 maxprot
= PROT_ALL
; /* start out allowing all accesses */
651 uprot
= prot
| PROT_USER
;
654 ASSERT(flags
& MAP_ANON
);
655 /* discard lwpchan mappings, like munmap() */
656 if ((flags
& MAP_FIXED
) && curproc
->p_lcp
!= NULL
)
657 lwpchan_delete_mapping(curproc
, *addrp
, *addrp
+ len
);
659 error
= zmap(as
, addrp
, len
, uprot
, flags
, pos
);
662 * Tell machine specific code that lwp has mapped shared memory
664 if (error
== 0 && (flags
& MAP_SHARED
)) {
666 LWP_MMODEL_SHARED_AS(*addrp
, len
);
669 } else if ((flags
& MAP_ANON
) != 0)
674 /* Can't execute code from "noexec" mounted filesystem. */
675 if ((vp
->v_vfsp
->vfs_flag
& VFS_NOEXEC
) != 0)
676 maxprot
&= ~PROT_EXEC
;
679 * These checks were added as part of large files.
681 * Return ENXIO if the initial position is negative; return EOVERFLOW
682 * if (offset + len) would overflow the maximum allowed offset for the
683 * type of file descriptor being used.
685 if (vp
->v_type
== VREG
) {
688 if ((offset_t
)len
> (OFFSET_MAX(fp
) - pos
))
692 if (type
== MAP_SHARED
&& (fp
->f_flag
& FWRITE
) == 0) {
693 /* no write access allowed */
694 maxprot
&= ~PROT_WRITE
;
698 * XXX - Do we also adjust maxprot based on protections
699 * of the vnode? E.g. if no execute permission is given
700 * on the vnode for the current user, maxprot probably
701 * should disallow PROT_EXEC also? This is different
702 * from the write access as this would be a per vnode
703 * test as opposed to a per fd test for writability.
707 * Verify that the specified protections are not greater than
708 * the maximum allowable protections. Also test to make sure
709 * that the file descriptor does allows for read access since
710 * "write only" mappings are hard to do since normally we do
711 * the read from the file before the page can be written.
713 if (((maxprot
& uprot
) != uprot
) || (fp
->f_flag
& FREAD
) == 0)
717 * If the user specified an address, do some simple checks here
719 if ((flags
& MAP_FIXED
) != 0) {
723 * Use the user address. First verify that
724 * the address to be used is page aligned.
725 * Then make some simple bounds checks.
727 if (((uintptr_t)*addrp
& PAGEOFFSET
) != 0)
730 userlimit
= flags
& _MAP_LOW32
?
731 (caddr_t
)USERLIMIT32
: as
->a_userlimit
;
732 switch (valid_usr_range(*addrp
, len
, uprot
, as
, userlimit
)) {
743 if ((prot
& (PROT_READ
| PROT_WRITE
| PROT_EXEC
)) &&
744 nbl_need_check(vp
)) {
748 nbl_start_crit(vp
, RW_READER
);
750 error
= nbl_svmand(vp
, fp
->f_cred
, &svmand
);
753 if ((prot
& PROT_WRITE
) && (type
== MAP_SHARED
)) {
754 if (prot
& (PROT_READ
| PROT_EXEC
)) {
762 if (nbl_conflict(vp
, nop
, 0, LONG_MAX
, svmand
, NULL
)) {
768 /* discard lwpchan mappings, like munmap() */
769 if ((flags
& MAP_FIXED
) && curproc
->p_lcp
!= NULL
)
770 lwpchan_delete_mapping(curproc
, *addrp
, *addrp
+ len
);
773 * Ok, now let the vnode map routine do its thing to set things up.
775 error
= VOP_MAP(vp
, pos
, as
,
776 addrp
, len
, uprot
, maxprot
, flags
, fp
->f_cred
, NULL
);
780 * Tell machine specific code that lwp has mapped shared memory
782 if (flags
& MAP_SHARED
) {
784 LWP_MMODEL_SHARED_AS(*addrp
, len
);
786 if (vp
->v_type
== VREG
&&
787 (flags
& (MAP_TEXT
| MAP_INITDATA
)) != 0) {
789 * Mark this as an executable vnode
791 mutex_enter(&vp
->v_lock
);
792 vp
->v_flag
|= VVMEXEC
;
793 mutex_exit(&vp
->v_lock
);
805 * LP64 mmap(2) system call: 64-bit offset, 64-bit address.
807 * The "large file" mmap routine mmap64(2) is also mapped to this routine
808 * by the 64-bit version of libc.
810 * Eventually, this should be the only version, and have smmap_common()
811 * folded back into it again. Some day.
814 smmap64(caddr_t addr
, size_t len
, int prot
, int flags
, int fd
, off_t pos
)
819 if (fd
== -1 && (flags
& MAP_ANON
) != 0)
820 error
= smmap_common(&addr
, len
, prot
, flags
,
821 NULL
, (offset_t
)pos
);
822 else if ((fp
= getf(fd
)) != NULL
) {
823 error
= smmap_common(&addr
, len
, prot
, flags
,
829 return (error
? (caddr_t
)(uintptr_t)set_errno(error
) : addr
);
833 #if defined(_SYSCALL32_IMPL) || defined(_ILP32)
836 * ILP32 mmap(2) system call: 32-bit offset, 32-bit address.
839 smmap32(caddr32_t addr
, size32_t len
, int prot
, int flags
, int fd
, off32_t pos
)
843 caddr_t a
= (caddr_t
)(uintptr_t)addr
;
845 if (flags
& _MAP_LOW32
)
847 else if (fd
== -1 && (flags
& MAP_ANON
) != 0)
848 error
= smmap_common(&a
, (size_t)len
, prot
,
849 flags
| _MAP_LOW32
, NULL
, (offset_t
)pos
);
850 else if ((fp
= getf(fd
)) != NULL
) {
851 error
= smmap_common(&a
, (size_t)len
, prot
,
852 flags
| _MAP_LOW32
, fp
, (offset_t
)pos
);
857 ASSERT(error
!= 0 || (uintptr_t)(a
+ len
) < (uintptr_t)UINT32_MAX
);
859 return (error
? (caddr_t
)(uintptr_t)set_errno(error
) : a
);
863 * ILP32 mmap64(2) system call: 64-bit offset, 32-bit address.
865 * Now things really get ugly because we can't use the C-style
866 * calling convention for more than 6 args, and 64-bit parameter
867 * passing on 32-bit systems is less than clean.
875 * 32-bit contents, 64-bit cells
884 * 32-bit contents, 32-bit cells
895 smmaplf32(struct mmaplf32a
*uap
, rval_t
*rvp
)
899 caddr_t a
= uap
->addr
;
900 int flags
= (int)uap
->flags
;
901 int fd
= (int)uap
->fd
;
903 offset_t off
= ((u_offset_t
)uap
->offhi
<< 32) | (u_offset_t
)uap
->offlo
;
905 offset_t off
= ((u_offset_t
)uap
->offlo
<< 32) | (u_offset_t
)uap
->offhi
;
908 if (flags
& _MAP_LOW32
)
910 else if (fd
== -1 && (flags
& MAP_ANON
) != 0)
911 error
= smmap_common(&a
, uap
->len
, (int)uap
->prot
,
912 flags
| _MAP_LOW32
, NULL
, off
);
913 else if ((fp
= getf(fd
)) != NULL
) {
914 error
= smmap_common(&a
, uap
->len
, (int)uap
->prot
,
915 flags
| _MAP_LOW32
, fp
, off
);
921 rvp
->r_val1
= (uintptr_t)a
;
925 #endif /* _SYSCALL32_IMPL || _ILP32 */
928 munmap(caddr_t addr
, size_t len
)
930 struct proc
*p
= curproc
;
931 struct as
*as
= p
->p_as
;
933 if (((uintptr_t)addr
& PAGEOFFSET
) != 0 || len
== 0)
934 return (set_errno(EINVAL
));
936 if (valid_usr_range(addr
, len
, 0, as
, as
->a_userlimit
) != RANGE_OKAY
)
937 return (set_errno(EINVAL
));
940 * Discard lwpchan mappings.
942 if (p
->p_lcp
!= NULL
)
943 lwpchan_delete_mapping(p
, addr
, addr
+ len
);
944 if (as_unmap(as
, addr
, len
) != 0)
945 return (set_errno(EINVAL
));
951 mprotect(caddr_t addr
, size_t len
, int prot
)
953 struct as
*as
= curproc
->p_as
;
954 uint_t uprot
= prot
| PROT_USER
;
957 if (((uintptr_t)addr
& PAGEOFFSET
) != 0 || len
== 0)
958 return (set_errno(EINVAL
));
960 switch (valid_usr_range(addr
, len
, prot
, as
, as
->a_userlimit
)) {
964 return (set_errno(ENOTSUP
));
967 return (set_errno(ENOMEM
));
970 error
= as_setprot(as
, addr
, len
, uprot
);
972 return (set_errno(error
));
976 #define MC_CACHE 128 /* internal result buffer */
977 #define MC_QUANTUM (MC_CACHE * PAGESIZE) /* addresses covered in loop */
980 mincore(caddr_t addr
, size_t len
, char *vecp
)
982 struct as
*as
= curproc
->p_as
;
983 caddr_t ea
; /* end address of loop */
984 size_t rl
; /* inner result length */
985 char vec
[MC_CACHE
]; /* local vector cache */
990 model
= get_udatamodel();
992 * Validate form of address parameters.
994 if (model
== DATAMODEL_NATIVE
) {
997 llen
= (int32_t)(size32_t
)len
;
999 if (((uintptr_t)addr
& PAGEOFFSET
) != 0 || llen
<= 0)
1000 return (set_errno(EINVAL
));
1002 if (valid_usr_range(addr
, len
, 0, as
, as
->a_userlimit
) != RANGE_OKAY
)
1003 return (set_errno(ENOMEM
));
1006 * Loop over subranges of interval [addr : addr + len), recovering
1007 * results internally and then copying them out to caller. Subrange
1008 * is based on the size of MC_CACHE, defined above.
1010 for (ea
= addr
+ len
; addr
< ea
; addr
+= MC_QUANTUM
) {
1011 error
= as_incore(as
, addr
,
1012 (size_t)MIN(MC_QUANTUM
, ea
- addr
), vec
, &rl
);
1014 rl
= (rl
+ PAGESIZE
- 1) / PAGESIZE
;
1015 if (copyout(vec
, vecp
, rl
) != 0)
1016 return (set_errno(EFAULT
));
1020 return (set_errno(ENOMEM
));