2 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. All advertising materials mentioning features or use of this software
13 * must display the following acknowledgement:
14 * This product includes software developed by Adam Glass and Charles
16 * 4. The names of the authors may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "opt_sysvipc.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sysproto.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
43 #include <sys/sysent.h>
47 #include <vm/vm_param.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pager.h>
55 static MALLOC_DEFINE(M_SHM
, "shm", "SVID compatible shared memory segments");
57 static int shmget_allocate_segment (struct proc
*p
, struct shmget_args
*uap
, int mode
);
58 static int shmget_existing (struct proc
*p
, struct shmget_args
*uap
, int mode
, int segnum
);
60 #define SHMSEG_FREE 0x0200
61 #define SHMSEG_REMOVED 0x0400
62 #define SHMSEG_ALLOCATED 0x0800
63 #define SHMSEG_WANTED 0x1000
65 static int shm_last_free
, shm_committed
, shmalloced
;
67 static struct shmid_ds
*shmsegs
;
68 static struct lwkt_token shm_token
= LWKT_TOKEN_INITIALIZER(shm_token
);
71 /* vm_offset_t kva; */
72 vm_object_t shm_object
;
80 static void shm_deallocate_segment (struct shmid_ds
*);
81 static int shm_find_segment_by_key (key_t
);
82 static struct shmid_ds
*shm_find_segment_by_shmid (int);
83 static int shm_delete_mapping (struct vmspace
*vm
, struct shmmap_state
*);
84 static void shmrealloc (void);
85 static void shminit (void *);
100 struct shminfo shminfo
= {
109 * allow-removed Allow a shared memory segment to be attached by its shmid
110 * even after it has been deleted, as long as it was still
111 * being referenced by someone. This is a trick used by
112 * chrome and other applications to avoid leaving shm
113 * segments hanging around after the application is killed
114 * or seg-faults unexpectedly.
116 * use-phys Shared memory segments are to use physical memory by
117 * default, which allows the kernel to optimize (remove)
118 * pv_entry management structures for the related PTEs and
119 * prevents paging. This has distinctly different and
120 * usually desireable characteristics verses mmap()ing
123 static int shm_allow_removed
= 1;
124 static int shm_use_phys
= 1;
126 TUNABLE_LONG("kern.ipc.shmmin", &shminfo
.shmmin
);
127 TUNABLE_LONG("kern.ipc.shmmni", &shminfo
.shmmni
);
128 TUNABLE_LONG("kern.ipc.shmseg", &shminfo
.shmseg
);
129 TUNABLE_LONG("kern.ipc.shmmaxpgs", &shminfo
.shmall
);
130 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys
);
132 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmmax
, CTLFLAG_RW
, &shminfo
.shmmax
, 0,
133 "Max shared memory segment size");
134 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmmin
, CTLFLAG_RW
, &shminfo
.shmmin
, 0,
135 "Min shared memory segment size");
136 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmmni
, CTLFLAG_RD
, &shminfo
.shmmni
, 0,
137 "Max number of shared memory identifiers");
138 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmseg
, CTLFLAG_RW
, &shminfo
.shmseg
, 0,
139 "Max shared memory segments per process");
140 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmall
, CTLFLAG_RW
, &shminfo
.shmall
, 0,
141 "Max pages of shared memory");
142 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_use_phys
, CTLFLAG_RW
, &shm_use_phys
, 0,
143 "Use phys pager allocation instead of swap pager allocation");
144 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_allow_removed
, CTLFLAG_RW
,
145 &shm_allow_removed
, 0,
146 "Enable/Disable attachment to attached segments marked for removal");
149 shm_find_segment_by_key(key_t key
)
153 for (i
= 0; i
< shmalloced
; i
++) {
154 if ((shmsegs
[i
].shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
155 shmsegs
[i
].shm_perm
.key
== key
)
161 static struct shmid_ds
*
162 shm_find_segment_by_shmid(int shmid
)
165 struct shmid_ds
*shmseg
;
167 segnum
= IPCID_TO_IX(shmid
);
168 if (segnum
< 0 || segnum
>= shmalloced
)
170 shmseg
= &shmsegs
[segnum
];
171 if ((shmseg
->shm_perm
.mode
& SHMSEG_ALLOCATED
) == 0 ||
172 (!shm_allow_removed
&&
173 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) != 0) ||
174 shmseg
->shm_perm
.seq
!= IPCID_TO_SEQ(shmid
)) {
181 shm_deallocate_segment(struct shmid_ds
*shmseg
)
183 struct shm_handle
*shm_handle
;
186 shm_handle
= shmseg
->shm_internal
;
187 vm_object_deallocate(shm_handle
->shm_object
);
188 kfree((caddr_t
)shm_handle
, M_SHM
);
189 shmseg
->shm_internal
= NULL
;
190 size
= round_page(shmseg
->shm_segsz
);
191 shm_committed
-= btoc(size
);
193 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
197 shm_delete_mapping(struct vmspace
*vm
, struct shmmap_state
*shmmap_s
)
199 struct shmid_ds
*shmseg
;
203 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
204 shmseg
= &shmsegs
[segnum
];
205 size
= round_page(shmseg
->shm_segsz
);
206 result
= vm_map_remove(&vm
->vm_map
, shmmap_s
->va
, shmmap_s
->va
+ size
);
207 if (result
!= KERN_SUCCESS
)
209 shmmap_s
->shmid
= -1;
210 shmseg
->shm_dtime
= time_second
;
211 if ((--shmseg
->shm_nattch
<= 0) &&
212 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
)) {
213 shm_deallocate_segment(shmseg
);
214 shm_last_free
= segnum
;
223 sys_shmdt(struct shmdt_args
*uap
)
225 struct thread
*td
= curthread
;
226 struct proc
*p
= td
->td_proc
;
227 struct shmmap_state
*shmmap_s
;
231 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
234 lwkt_gettoken(&shm_token
);
235 shmmap_s
= (struct shmmap_state
*)p
->p_vmspace
->vm_shm
;
236 if (shmmap_s
== NULL
) {
240 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++) {
241 if (shmmap_s
->shmid
!= -1 &&
242 shmmap_s
->va
== (vm_offset_t
)uap
->shmaddr
)
245 if (i
== shminfo
.shmseg
)
248 error
= shm_delete_mapping(p
->p_vmspace
, shmmap_s
);
250 lwkt_reltoken(&shm_token
);
259 sys_shmat(struct shmat_args
*uap
)
261 struct thread
*td
= curthread
;
262 struct proc
*p
= td
->td_proc
;
265 struct shmid_ds
*shmseg
;
266 struct shmmap_state
*shmmap_s
= NULL
;
267 struct shm_handle
*shm_handle
;
268 vm_offset_t attach_va
;
274 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
277 lwkt_gettoken(&shm_token
);
279 shmmap_s
= (struct shmmap_state
*)p
->p_vmspace
->vm_shm
;
280 if (shmmap_s
== NULL
) {
281 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
282 shmmap_s
= kmalloc(size
, M_SHM
, M_WAITOK
);
283 for (i
= 0; i
< shminfo
.shmseg
; i
++)
284 shmmap_s
[i
].shmid
= -1;
285 if (p
->p_vmspace
->vm_shm
!= NULL
) {
286 kfree(shmmap_s
, M_SHM
);
289 p
->p_vmspace
->vm_shm
= (caddr_t
)shmmap_s
;
291 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
292 if (shmseg
== NULL
) {
296 error
= ipcperm(p
, &shmseg
->shm_perm
,
297 (uap
->shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
);
300 for (i
= 0; i
< shminfo
.shmseg
; i
++) {
301 if (shmmap_s
->shmid
== -1)
305 if (i
>= shminfo
.shmseg
) {
309 size
= round_page(shmseg
->shm_segsz
);
310 #ifdef VM_PROT_READ_IS_EXEC
311 prot
= VM_PROT_READ
| VM_PROT_EXECUTE
;
315 if ((uap
->shmflg
& SHM_RDONLY
) == 0)
316 prot
|= VM_PROT_WRITE
;
317 flags
= MAP_ANON
| MAP_SHARED
;
320 if (uap
->shmflg
& SHM_RND
) {
321 attach_va
= (vm_offset_t
)uap
->shmaddr
& ~(SHMLBA
-1);
322 } else if (((vm_offset_t
)uap
->shmaddr
& (SHMLBA
-1)) == 0) {
323 attach_va
= (vm_offset_t
)uap
->shmaddr
;
330 * This is just a hint to vm_map_find() about where to put it.
332 attach_va
= round_page((vm_offset_t
)p
->p_vmspace
->vm_taddr
+
337 * Handle alignment. For large memory maps it is possible
338 * that the MMU can optimize the page table so align anything
339 * that is a multiple of SEG_SIZE to SEG_SIZE.
341 if ((flags
& MAP_FIXED
) == 0 && (size
& SEG_MASK
) == 0)
346 shm_handle
= shmseg
->shm_internal
;
347 vm_object_hold(shm_handle
->shm_object
);
348 vm_object_chain_wait(shm_handle
->shm_object
, 0);
349 vm_object_reference_locked(shm_handle
->shm_object
);
350 rv
= vm_map_find(&p
->p_vmspace
->vm_map
,
351 shm_handle
->shm_object
, NULL
,
354 ((flags
& MAP_FIXED
) ? 0 : 1),
355 VM_MAPTYPE_NORMAL
, VM_SUBSYS_SHMEM
,
357 vm_object_drop(shm_handle
->shm_object
);
358 if (rv
!= KERN_SUCCESS
) {
359 vm_object_deallocate(shm_handle
->shm_object
);
363 vm_map_inherit(&p
->p_vmspace
->vm_map
,
364 attach_va
, attach_va
+ size
, VM_INHERIT_SHARE
);
366 KKASSERT(shmmap_s
->shmid
== -1);
367 shmmap_s
->va
= attach_va
;
368 shmmap_s
->shmid
= uap
->shmid
;
369 shmseg
->shm_lpid
= p
->p_pid
;
370 shmseg
->shm_atime
= time_second
;
371 shmseg
->shm_nattch
++;
372 uap
->sysmsg_resultp
= (void *)attach_va
;
375 lwkt_reltoken(&shm_token
);
384 sys_shmctl(struct shmctl_args
*uap
)
386 struct thread
*td
= curthread
;
387 struct proc
*p
= td
->td_proc
;
389 struct shmid_ds inbuf
;
390 struct shmid_ds
*shmseg
;
392 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
395 lwkt_gettoken(&shm_token
);
396 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
397 if (shmseg
== NULL
) {
404 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_R
);
406 error
= copyout(shmseg
, uap
->buf
, sizeof(inbuf
));
409 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_M
);
411 error
= copyin(uap
->buf
, &inbuf
, sizeof(inbuf
));
413 shmseg
->shm_perm
.uid
= inbuf
.shm_perm
.uid
;
414 shmseg
->shm_perm
.gid
= inbuf
.shm_perm
.gid
;
415 shmseg
->shm_perm
.mode
=
416 (shmseg
->shm_perm
.mode
& ~ACCESSPERMS
) |
417 (inbuf
.shm_perm
.mode
& ACCESSPERMS
);
418 shmseg
->shm_ctime
= time_second
;
422 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_M
);
424 shmseg
->shm_perm
.key
= IPC_PRIVATE
;
425 shmseg
->shm_perm
.mode
|= SHMSEG_REMOVED
;
426 if (shmseg
->shm_nattch
<= 0) {
427 shm_deallocate_segment(shmseg
);
428 shm_last_free
= IPCID_TO_IX(uap
->shmid
);
441 lwkt_reltoken(&shm_token
);
447 shmget_existing(struct proc
*p
, struct shmget_args
*uap
, int mode
, int segnum
)
449 struct shmid_ds
*shmseg
;
452 shmseg
= &shmsegs
[segnum
];
453 if (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) {
455 * This segment is in the process of being allocated. Wait
456 * until it's done, and look the key up again (in case the
457 * allocation failed or it was freed).
459 shmseg
->shm_perm
.mode
|= SHMSEG_WANTED
;
460 error
= tsleep((caddr_t
)shmseg
, PCATCH
, "shmget", 0);
465 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
))
467 error
= ipcperm(p
, &shmseg
->shm_perm
, mode
);
470 if (uap
->size
&& uap
->size
> shmseg
->shm_segsz
)
472 uap
->sysmsg_result
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
477 shmget_allocate_segment(struct proc
*p
, struct shmget_args
*uap
, int mode
)
479 int i
, segnum
, shmid
;
481 struct ucred
*cred
= p
->p_ucred
;
482 struct shmid_ds
*shmseg
;
483 struct shm_handle
*shm_handle
;
485 if (uap
->size
< shminfo
.shmmin
|| uap
->size
> shminfo
.shmmax
)
487 if (shm_nused
>= shminfo
.shmmni
) /* any shmids left? */
489 size
= round_page(uap
->size
);
490 if (shm_committed
+ btoc(size
) > shminfo
.shmall
)
492 if (shm_last_free
< 0) {
493 shmrealloc(); /* maybe expand the shmsegs[] array */
494 for (i
= 0; i
< shmalloced
; i
++) {
495 if (shmsegs
[i
].shm_perm
.mode
& SHMSEG_FREE
)
502 segnum
= shm_last_free
;
505 shmseg
= &shmsegs
[segnum
];
507 * In case we sleep in malloc(), mark the segment present but deleted
508 * so that noone else tries to create the same key.
510 shmseg
->shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
511 shmseg
->shm_perm
.key
= uap
->key
;
512 shmseg
->shm_perm
.seq
= (shmseg
->shm_perm
.seq
+ 1) & 0x7fff;
513 shm_handle
= kmalloc(sizeof(struct shm_handle
), M_SHM
, M_WAITOK
);
514 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
517 * We make sure that we have allocated a pager before we need
521 shm_handle
->shm_object
=
522 phys_pager_alloc(NULL
, size
, VM_PROT_DEFAULT
, 0);
524 shm_handle
->shm_object
=
525 swap_pager_alloc(NULL
, size
, VM_PROT_DEFAULT
, 0);
527 vm_object_clear_flag(shm_handle
->shm_object
, OBJ_ONEMAPPING
);
528 vm_object_set_flag(shm_handle
->shm_object
, OBJ_NOSPLIT
);
530 shmseg
->shm_internal
= shm_handle
;
531 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= cred
->cr_uid
;
532 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= cred
->cr_gid
;
533 shmseg
->shm_perm
.mode
= (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) |
534 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
535 shmseg
->shm_segsz
= uap
->size
;
536 shmseg
->shm_cpid
= p
->p_pid
;
537 shmseg
->shm_lpid
= shmseg
->shm_nattch
= 0;
538 shmseg
->shm_atime
= shmseg
->shm_dtime
= 0;
539 shmseg
->shm_ctime
= time_second
;
540 shm_committed
+= btoc(size
);
544 * If a physical mapping is desired and we have a ton of free pages
545 * we pre-allocate the pages here in order to avoid on-the-fly
546 * allocation later. This has a big effect on database warm-up
547 * times since DFly supports concurrent page faults coming from the
548 * same VM object for pages which already exist.
550 * This can hang the kernel for a while so only do it if shm_use_phys
551 * is set to 2 or higher.
553 if (shm_use_phys
> 1) {
554 vm_pindex_t pi
, pmax
;
557 pmax
= round_page(shmseg
->shm_segsz
) >> PAGE_SHIFT
;
558 vm_object_hold(shm_handle
->shm_object
);
559 if (pmax
> vmstats
.v_free_count
)
560 pmax
= vmstats
.v_free_count
;
561 for (pi
= 0; pi
< pmax
; ++pi
) {
562 m
= vm_page_grab(shm_handle
->shm_object
, pi
,
563 VM_ALLOC_SYSTEM
| VM_ALLOC_NULL_OK
|
567 vm_pager_get_page(shm_handle
->shm_object
, &m
, 1);
572 vm_object_drop(shm_handle
->shm_object
);
575 if (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) {
577 * Somebody else wanted this key while we were asleep. Wake
580 shmseg
->shm_perm
.mode
&= ~SHMSEG_WANTED
;
581 wakeup((caddr_t
)shmseg
);
583 uap
->sysmsg_result
= shmid
;
591 sys_shmget(struct shmget_args
*uap
)
593 struct thread
*td
= curthread
;
594 struct proc
*p
= td
->td_proc
;
595 int segnum
, mode
, error
;
597 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
600 mode
= uap
->shmflg
& ACCESSPERMS
;
602 lwkt_gettoken(&shm_token
);
604 if (uap
->key
!= IPC_PRIVATE
) {
606 segnum
= shm_find_segment_by_key(uap
->key
);
608 error
= shmget_existing(p
, uap
, mode
, segnum
);
613 if ((uap
->shmflg
& IPC_CREAT
) == 0) {
618 error
= shmget_allocate_segment(p
, uap
, mode
);
620 lwkt_reltoken(&shm_token
);
626 shmfork(struct proc
*p1
, struct proc
*p2
)
628 struct shmmap_state
*shmmap_s
;
632 lwkt_gettoken(&shm_token
);
633 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
634 shmmap_s
= kmalloc(size
, M_SHM
, M_WAITOK
);
635 bcopy((caddr_t
)p1
->p_vmspace
->vm_shm
, (caddr_t
)shmmap_s
, size
);
636 p2
->p_vmspace
->vm_shm
= (caddr_t
)shmmap_s
;
637 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++) {
638 if (shmmap_s
->shmid
!= -1)
639 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].shm_nattch
++;
641 lwkt_reltoken(&shm_token
);
645 shmexit(struct vmspace
*vm
)
647 struct shmmap_state
*base
, *shm
;
650 if ((base
= (struct shmmap_state
*)vm
->vm_shm
) != NULL
) {
652 lwkt_gettoken(&shm_token
);
653 for (i
= 0, shm
= base
; i
< shminfo
.shmseg
; i
++, shm
++) {
654 if (shm
->shmid
!= -1)
655 shm_delete_mapping(vm
, shm
);
658 lwkt_reltoken(&shm_token
);
666 struct shmid_ds
*newsegs
;
668 if (shmalloced
>= shminfo
.shmmni
)
671 newsegs
= kmalloc(shminfo
.shmmni
* sizeof(*newsegs
), M_SHM
, M_WAITOK
);
672 for (i
= 0; i
< shmalloced
; i
++)
673 bcopy(&shmsegs
[i
], &newsegs
[i
], sizeof(newsegs
[0]));
674 for (; i
< shminfo
.shmmni
; i
++) {
675 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
676 shmsegs
[i
].shm_perm
.seq
= 0;
678 kfree(shmsegs
, M_SHM
);
680 shmalloced
= shminfo
.shmmni
;
689 * If not overridden by a tunable set the maximum shm to
690 * 2/3 of main memory.
692 if (shminfo
.shmall
== 0)
693 shminfo
.shmall
= (size_t)vmstats
.v_page_count
* 2 / 3;
695 shminfo
.shmmax
= shminfo
.shmall
* PAGE_SIZE
;
696 shmalloced
= shminfo
.shmmni
;
697 shmsegs
= kmalloc(shmalloced
* sizeof(shmsegs
[0]), M_SHM
, M_WAITOK
);
698 for (i
= 0; i
< shmalloced
; i
++) {
699 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
700 shmsegs
[i
].shm_perm
.seq
= 0;
706 SYSINIT(sysv_shm
, SI_SUB_SYSV_SHM
, SI_ORDER_FIRST
, shminit
, NULL
);