2 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. All advertising materials mentioning features or use of this software
13 * must display the following acknowledgement:
14 * This product includes software developed by Adam Glass and Charles
16 * 4. The names of the authors may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "opt_compat.h"
32 #include "opt_sysvipc.h"
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sysproto.h>
37 #include <sys/kernel.h>
38 #include <sys/sysctl.h>
41 #include <sys/malloc.h>
44 #include <sys/sysent.h>
47 #include <sys/mplock2.h>
50 #include <vm/vm_param.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_pager.h>
58 static MALLOC_DEFINE(M_SHM
, "shm", "SVID compatible shared memory segments");
60 static int shmget_allocate_segment (struct proc
*p
, struct shmget_args
*uap
, int mode
);
61 static int shmget_existing (struct proc
*p
, struct shmget_args
*uap
, int mode
, int segnum
);
63 #define SHMSEG_FREE 0x0200
64 #define SHMSEG_REMOVED 0x0400
65 #define SHMSEG_ALLOCATED 0x0800
66 #define SHMSEG_WANTED 0x1000
68 static int shm_last_free
, shm_committed
, shmalloced
;
70 static struct shmid_ds
*shmsegs
;
73 /* vm_offset_t kva; */
74 vm_object_t shm_object
;
82 static void shm_deallocate_segment (struct shmid_ds
*);
83 static int shm_find_segment_by_key (key_t
);
84 static struct shmid_ds
*shm_find_segment_by_shmid (int);
85 static int shm_delete_mapping (struct vmspace
*vm
, struct shmmap_state
*);
86 static void shmrealloc (void);
87 static void shminit (void *);
102 struct shminfo shminfo
= {
110 static int shm_allow_removed
;
111 static int shm_use_phys
= 1;
113 TUNABLE_LONG("kern.ipc.shmmin", &shminfo
.shmmin
);
114 TUNABLE_LONG("kern.ipc.shmmni", &shminfo
.shmmni
);
115 TUNABLE_LONG("kern.ipc.shmseg", &shminfo
.shmseg
);
116 TUNABLE_LONG("kern.ipc.shmmaxpgs", &shminfo
.shmall
);
117 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys
);
119 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmmax
, CTLFLAG_RW
, &shminfo
.shmmax
, 0,
120 "Max shared memory segment size");
121 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmmin
, CTLFLAG_RW
, &shminfo
.shmmin
, 0,
122 "Min shared memory segment size");
123 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmmni
, CTLFLAG_RD
, &shminfo
.shmmni
, 0,
124 "Max number of shared memory identifiers");
125 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmseg
, CTLFLAG_RW
, &shminfo
.shmseg
, 0,
126 "Max shared memory segments per process");
127 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmall
, CTLFLAG_RW
, &shminfo
.shmall
, 0,
128 "Max pages of shared memory");
129 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_use_phys
, CTLFLAG_RW
, &shm_use_phys
, 0,
130 "Use phys pager allocation instead of swap pager allocation");
131 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_allow_removed
, CTLFLAG_RW
,
132 &shm_allow_removed
, 0,
133 "Enable/Disable attachment to attached segments marked for removal");
136 shm_find_segment_by_key(key_t key
)
140 for (i
= 0; i
< shmalloced
; i
++) {
141 if ((shmsegs
[i
].shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
142 shmsegs
[i
].shm_perm
.key
== key
)
148 static struct shmid_ds
*
149 shm_find_segment_by_shmid(int shmid
)
152 struct shmid_ds
*shmseg
;
154 segnum
= IPCID_TO_IX(shmid
);
155 if (segnum
< 0 || segnum
>= shmalloced
)
157 shmseg
= &shmsegs
[segnum
];
158 if ((shmseg
->shm_perm
.mode
& SHMSEG_ALLOCATED
) == 0 ||
159 (!shm_allow_removed
&&
160 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) != 0) ||
161 shmseg
->shm_perm
.seq
!= IPCID_TO_SEQ(shmid
)) {
168 shm_deallocate_segment(struct shmid_ds
*shmseg
)
170 struct shm_handle
*shm_handle
;
173 shm_handle
= shmseg
->shm_internal
;
174 vm_object_deallocate(shm_handle
->shm_object
);
175 kfree((caddr_t
)shm_handle
, M_SHM
);
176 shmseg
->shm_internal
= NULL
;
177 size
= round_page(shmseg
->shm_segsz
);
178 shm_committed
-= btoc(size
);
180 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
184 shm_delete_mapping(struct vmspace
*vm
, struct shmmap_state
*shmmap_s
)
186 struct shmid_ds
*shmseg
;
190 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
191 shmseg
= &shmsegs
[segnum
];
192 size
= round_page(shmseg
->shm_segsz
);
193 result
= vm_map_remove(&vm
->vm_map
, shmmap_s
->va
, shmmap_s
->va
+ size
);
194 if (result
!= KERN_SUCCESS
)
196 shmmap_s
->shmid
= -1;
197 shmseg
->shm_dtime
= time_second
;
198 if ((--shmseg
->shm_nattch
<= 0) &&
199 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
)) {
200 shm_deallocate_segment(shmseg
);
201 shm_last_free
= segnum
;
210 sys_shmdt(struct shmdt_args
*uap
)
212 struct thread
*td
= curthread
;
213 struct proc
*p
= td
->td_proc
;
214 struct shmmap_state
*shmmap_s
;
218 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
222 shmmap_s
= (struct shmmap_state
*)p
->p_vmspace
->vm_shm
;
223 if (shmmap_s
== NULL
) {
227 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++) {
228 if (shmmap_s
->shmid
!= -1 &&
229 shmmap_s
->va
== (vm_offset_t
)uap
->shmaddr
)
232 if (i
== shminfo
.shmseg
)
235 error
= shm_delete_mapping(p
->p_vmspace
, shmmap_s
);
245 sys_shmat(struct shmat_args
*uap
)
247 struct thread
*td
= curthread
;
248 struct proc
*p
= td
->td_proc
;
251 struct shmid_ds
*shmseg
;
252 struct shmmap_state
*shmmap_s
= NULL
;
253 struct shm_handle
*shm_handle
;
254 vm_offset_t attach_va
;
260 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
265 shmmap_s
= (struct shmmap_state
*)p
->p_vmspace
->vm_shm
;
266 if (shmmap_s
== NULL
) {
267 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
268 shmmap_s
= kmalloc(size
, M_SHM
, M_WAITOK
);
269 for (i
= 0; i
< shminfo
.shmseg
; i
++)
270 shmmap_s
[i
].shmid
= -1;
271 if (p
->p_vmspace
->vm_shm
!= NULL
) {
272 kfree(shmmap_s
, M_SHM
);
275 p
->p_vmspace
->vm_shm
= (caddr_t
)shmmap_s
;
277 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
278 if (shmseg
== NULL
) {
282 error
= ipcperm(p
, &shmseg
->shm_perm
,
283 (uap
->shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
);
286 for (i
= 0; i
< shminfo
.shmseg
; i
++) {
287 if (shmmap_s
->shmid
== -1)
291 if (i
>= shminfo
.shmseg
) {
295 size
= round_page(shmseg
->shm_segsz
);
296 #ifdef VM_PROT_READ_IS_EXEC
297 prot
= VM_PROT_READ
| VM_PROT_EXECUTE
;
301 if ((uap
->shmflg
& SHM_RDONLY
) == 0)
302 prot
|= VM_PROT_WRITE
;
303 flags
= MAP_ANON
| MAP_SHARED
;
306 if (uap
->shmflg
& SHM_RND
) {
307 attach_va
= (vm_offset_t
)uap
->shmaddr
& ~(SHMLBA
-1);
308 } else if (((vm_offset_t
)uap
->shmaddr
& (SHMLBA
-1)) == 0) {
309 attach_va
= (vm_offset_t
)uap
->shmaddr
;
316 * This is just a hint to vm_map_find() about where to put it.
318 attach_va
= round_page((vm_offset_t
)p
->p_vmspace
->vm_taddr
+
323 * Handle alignment. For large memory maps it is possible
324 * that the MMU can optimize the page table so align anything
325 * that is a multiple of SEG_SIZE to SEG_SIZE.
327 if ((flags
& MAP_FIXED
) == 0 && (size
& SEG_MASK
) == 0)
332 shm_handle
= shmseg
->shm_internal
;
333 vm_object_hold(shm_handle
->shm_object
);
334 vm_object_chain_wait(shm_handle
->shm_object
, 0);
335 vm_object_reference_locked(shm_handle
->shm_object
);
336 rv
= vm_map_find(&p
->p_vmspace
->vm_map
,
337 shm_handle
->shm_object
, NULL
,
340 ((flags
& MAP_FIXED
) ? 0 : 1),
343 vm_object_drop(shm_handle
->shm_object
);
344 if (rv
!= KERN_SUCCESS
) {
345 vm_object_deallocate(shm_handle
->shm_object
);
349 vm_map_inherit(&p
->p_vmspace
->vm_map
,
350 attach_va
, attach_va
+ size
, VM_INHERIT_SHARE
);
352 KKASSERT(shmmap_s
->shmid
== -1);
353 shmmap_s
->va
= attach_va
;
354 shmmap_s
->shmid
= uap
->shmid
;
355 shmseg
->shm_lpid
= p
->p_pid
;
356 shmseg
->shm_atime
= time_second
;
357 shmseg
->shm_nattch
++;
358 uap
->sysmsg_resultp
= (void *)attach_va
;
369 sys_shmctl(struct shmctl_args
*uap
)
371 struct thread
*td
= curthread
;
372 struct proc
*p
= td
->td_proc
;
374 struct shmid_ds inbuf
;
375 struct shmid_ds
*shmseg
;
377 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
381 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
382 if (shmseg
== NULL
) {
389 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_R
);
391 error
= copyout(shmseg
, uap
->buf
, sizeof(inbuf
));
394 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_M
);
396 error
= copyin(uap
->buf
, &inbuf
, sizeof(inbuf
));
398 shmseg
->shm_perm
.uid
= inbuf
.shm_perm
.uid
;
399 shmseg
->shm_perm
.gid
= inbuf
.shm_perm
.gid
;
400 shmseg
->shm_perm
.mode
=
401 (shmseg
->shm_perm
.mode
& ~ACCESSPERMS
) |
402 (inbuf
.shm_perm
.mode
& ACCESSPERMS
);
403 shmseg
->shm_ctime
= time_second
;
407 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_M
);
409 shmseg
->shm_perm
.key
= IPC_PRIVATE
;
410 shmseg
->shm_perm
.mode
|= SHMSEG_REMOVED
;
411 if (shmseg
->shm_nattch
<= 0) {
412 shm_deallocate_segment(shmseg
);
413 shm_last_free
= IPCID_TO_IX(uap
->shmid
);
431 shmget_existing(struct proc
*p
, struct shmget_args
*uap
, int mode
, int segnum
)
433 struct shmid_ds
*shmseg
;
436 shmseg
= &shmsegs
[segnum
];
437 if (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) {
439 * This segment is in the process of being allocated. Wait
440 * until it's done, and look the key up again (in case the
441 * allocation failed or it was freed).
443 shmseg
->shm_perm
.mode
|= SHMSEG_WANTED
;
444 error
= tsleep((caddr_t
)shmseg
, PCATCH
, "shmget", 0);
449 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
))
451 error
= ipcperm(p
, &shmseg
->shm_perm
, mode
);
454 if (uap
->size
&& uap
->size
> shmseg
->shm_segsz
)
456 uap
->sysmsg_result
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
461 shmget_allocate_segment(struct proc
*p
, struct shmget_args
*uap
, int mode
)
463 int i
, segnum
, shmid
;
465 struct ucred
*cred
= p
->p_ucred
;
466 struct shmid_ds
*shmseg
;
467 struct shm_handle
*shm_handle
;
469 if (uap
->size
< shminfo
.shmmin
|| uap
->size
> shminfo
.shmmax
)
471 if (shm_nused
>= shminfo
.shmmni
) /* any shmids left? */
473 size
= round_page(uap
->size
);
474 if (shm_committed
+ btoc(size
) > shminfo
.shmall
)
476 if (shm_last_free
< 0) {
477 shmrealloc(); /* maybe expand the shmsegs[] array */
478 for (i
= 0; i
< shmalloced
; i
++) {
479 if (shmsegs
[i
].shm_perm
.mode
& SHMSEG_FREE
)
486 segnum
= shm_last_free
;
489 shmseg
= &shmsegs
[segnum
];
491 * In case we sleep in malloc(), mark the segment present but deleted
492 * so that noone else tries to create the same key.
494 shmseg
->shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
495 shmseg
->shm_perm
.key
= uap
->key
;
496 shmseg
->shm_perm
.seq
= (shmseg
->shm_perm
.seq
+ 1) & 0x7fff;
497 shm_handle
= kmalloc(sizeof(struct shm_handle
), M_SHM
, M_WAITOK
);
498 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
501 * We make sure that we have allocated a pager before we need
505 shm_handle
->shm_object
=
506 phys_pager_alloc(NULL
, size
, VM_PROT_DEFAULT
, 0);
508 shm_handle
->shm_object
=
509 swap_pager_alloc(NULL
, size
, VM_PROT_DEFAULT
, 0);
511 vm_object_clear_flag(shm_handle
->shm_object
, OBJ_ONEMAPPING
);
512 vm_object_set_flag(shm_handle
->shm_object
, OBJ_NOSPLIT
);
514 shmseg
->shm_internal
= shm_handle
;
515 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= cred
->cr_uid
;
516 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= cred
->cr_gid
;
517 shmseg
->shm_perm
.mode
= (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) |
518 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
519 shmseg
->shm_segsz
= uap
->size
;
520 shmseg
->shm_cpid
= p
->p_pid
;
521 shmseg
->shm_lpid
= shmseg
->shm_nattch
= 0;
522 shmseg
->shm_atime
= shmseg
->shm_dtime
= 0;
523 shmseg
->shm_ctime
= time_second
;
524 shm_committed
+= btoc(size
);
528 * If a physical mapping is desired and we have a ton of free pages
529 * we pre-allocate the pages here in order to avoid on-the-fly
530 * allocation later. This has a big effect on database warm-up
531 * times since DFly supports concurrent page faults coming from the
532 * same VM object for pages which already exist.
534 * This can hang the kernel for a while so only do it if shm_use_phys
535 * is set to 2 or higher.
537 if (shm_use_phys
> 1) {
538 vm_pindex_t pi
, pmax
;
541 pmax
= round_page(shmseg
->shm_segsz
) >> PAGE_SHIFT
;
542 vm_object_hold(shm_handle
->shm_object
);
543 if (pmax
> vmstats
.v_free_count
)
544 pmax
= vmstats
.v_free_count
;
545 for (pi
= 0; pi
< pmax
; ++pi
) {
546 m
= vm_page_grab(shm_handle
->shm_object
, pi
,
547 VM_ALLOC_SYSTEM
| VM_ALLOC_NULL_OK
|
551 vm_pager_get_page(shm_handle
->shm_object
, &m
, 1);
556 vm_object_drop(shm_handle
->shm_object
);
559 if (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) {
561 * Somebody else wanted this key while we were asleep. Wake
564 shmseg
->shm_perm
.mode
&= ~SHMSEG_WANTED
;
565 wakeup((caddr_t
)shmseg
);
567 uap
->sysmsg_result
= shmid
;
575 sys_shmget(struct shmget_args
*uap
)
577 struct thread
*td
= curthread
;
578 struct proc
*p
= td
->td_proc
;
579 int segnum
, mode
, error
;
581 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
584 mode
= uap
->shmflg
& ACCESSPERMS
;
587 if (uap
->key
!= IPC_PRIVATE
) {
589 segnum
= shm_find_segment_by_key(uap
->key
);
591 error
= shmget_existing(p
, uap
, mode
, segnum
);
596 if ((uap
->shmflg
& IPC_CREAT
) == 0) {
601 error
= shmget_allocate_segment(p
, uap
, mode
);
608 shmfork(struct proc
*p1
, struct proc
*p2
)
610 struct shmmap_state
*shmmap_s
;
615 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
616 shmmap_s
= kmalloc(size
, M_SHM
, M_WAITOK
);
617 bcopy((caddr_t
)p1
->p_vmspace
->vm_shm
, (caddr_t
)shmmap_s
, size
);
618 p2
->p_vmspace
->vm_shm
= (caddr_t
)shmmap_s
;
619 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++) {
620 if (shmmap_s
->shmid
!= -1)
621 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].shm_nattch
++;
627 shmexit(struct vmspace
*vm
)
629 struct shmmap_state
*base
, *shm
;
632 if ((base
= (struct shmmap_state
*)vm
->vm_shm
) != NULL
) {
635 for (i
= 0, shm
= base
; i
< shminfo
.shmseg
; i
++, shm
++) {
636 if (shm
->shmid
!= -1)
637 shm_delete_mapping(vm
, shm
);
648 struct shmid_ds
*newsegs
;
650 if (shmalloced
>= shminfo
.shmmni
)
653 newsegs
= kmalloc(shminfo
.shmmni
* sizeof(*newsegs
), M_SHM
, M_WAITOK
);
654 for (i
= 0; i
< shmalloced
; i
++)
655 bcopy(&shmsegs
[i
], &newsegs
[i
], sizeof(newsegs
[0]));
656 for (; i
< shminfo
.shmmni
; i
++) {
657 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
658 shmsegs
[i
].shm_perm
.seq
= 0;
660 kfree(shmsegs
, M_SHM
);
662 shmalloced
= shminfo
.shmmni
;
671 * If not overridden by a tunable set the maximum shm to
672 * 2/3 of main memory.
674 if (shminfo
.shmall
== 0)
675 shminfo
.shmall
= (size_t)vmstats
.v_page_count
* 2 / 3;
677 shminfo
.shmmax
= shminfo
.shmall
* PAGE_SIZE
;
678 shmalloced
= shminfo
.shmmni
;
679 shmsegs
= kmalloc(shmalloced
* sizeof(shmsegs
[0]), M_SHM
, M_WAITOK
);
680 for (i
= 0; i
< shmalloced
; i
++) {
681 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
682 shmsegs
[i
].shm_perm
.seq
= 0;
688 SYSINIT(sysv_shm
, SI_SUB_SYSV_SHM
, SI_ORDER_FIRST
, shminit
, NULL
);