1 /* $FreeBSD: src/sys/kern/sysv_shm.c,v 1.45.2.6 2002/10/22 20:45:03 fjoe Exp $ */
2 /* $DragonFly: src/sys/kern/sysv_shm.c,v 1.21 2008/01/06 16:55:51 swildner Exp $ */
3 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
6 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Adam Glass and Charles
20 * 4. The names of the authors may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include "opt_compat.h"
36 #include "opt_sysvipc.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
45 #include <sys/malloc.h>
48 #include <sys/sysent.h>
51 #include <sys/mplock2.h>
54 #include <vm/vm_param.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_pager.h>
62 static MALLOC_DEFINE(M_SHM
, "shm", "SVID compatible shared memory segments");
65 static int sys_oshmctl (struct proc
*p
, struct oshmctl_args
*uap
);
67 static int shmget_allocate_segment (struct proc
*p
, struct shmget_args
*uap
, int mode
);
68 static int shmget_existing (struct proc
*p
, struct shmget_args
*uap
, int mode
, int segnum
);
70 /* XXX casting to (sy_call_t *) is bogus, as usual. */
71 static sy_call_t
*shmcalls
[] = {
72 (sy_call_t
*)sys_shmat
, (sy_call_t
*)sys_oshmctl
,
73 (sy_call_t
*)sys_shmdt
, (sy_call_t
*)sys_shmget
,
74 (sy_call_t
*)sys_shmctl
77 #define SHMSEG_FREE 0x0200
78 #define SHMSEG_REMOVED 0x0400
79 #define SHMSEG_ALLOCATED 0x0800
80 #define SHMSEG_WANTED 0x1000
82 static int shm_last_free
, shm_nused
, shm_committed
, shmalloced
;
83 static struct shmid_ds
*shmsegs
;
86 /* vm_offset_t kva; */
87 vm_object_t shm_object
;
95 static void shm_deallocate_segment (struct shmid_ds
*);
96 static int shm_find_segment_by_key (key_t
);
97 static struct shmid_ds
*shm_find_segment_by_shmid (int);
98 static int shm_delete_mapping (struct vmspace
*vm
, struct shmmap_state
*);
99 static void shmrealloc (void);
100 static void shminit (void *);
106 #define SHMMAXPGS 8192 /* note: sysv shared memory is swap backed */
109 #define SHMMAX (SHMMAXPGS*PAGE_SIZE)
121 #define SHMALL (SHMMAXPGS)
124 struct shminfo shminfo
= {
132 static int shm_use_phys
;
134 TUNABLE_INT("kern.ipc.shmmin", &shminfo
.shmmin
);
135 TUNABLE_INT("kern.ipc.shmmni", &shminfo
.shmmni
);
136 TUNABLE_INT("kern.ipc.shmseg", &shminfo
.shmseg
);
137 TUNABLE_INT("kern.ipc.shmmaxpgs", &shminfo
.shmall
);
138 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys
);
140 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmax
, CTLFLAG_RW
, &shminfo
.shmmax
, 0, "");
141 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmin
, CTLFLAG_RW
, &shminfo
.shmmin
, 0, "");
142 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmni
, CTLFLAG_RD
, &shminfo
.shmmni
, 0, "");
143 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmseg
, CTLFLAG_RW
, &shminfo
.shmseg
, 0, "");
144 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmall
, CTLFLAG_RW
, &shminfo
.shmall
, 0, "");
145 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_use_phys
, CTLFLAG_RW
, &shm_use_phys
, 0, "");
148 shm_find_segment_by_key(key_t key
)
152 for (i
= 0; i
< shmalloced
; i
++)
153 if ((shmsegs
[i
].shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
154 shmsegs
[i
].shm_perm
.key
== key
)
159 static struct shmid_ds
*
160 shm_find_segment_by_shmid(int shmid
)
163 struct shmid_ds
*shmseg
;
165 segnum
= IPCID_TO_IX(shmid
);
166 if (segnum
< 0 || segnum
>= shmalloced
)
168 shmseg
= &shmsegs
[segnum
];
169 if ((shmseg
->shm_perm
.mode
& (SHMSEG_ALLOCATED
| SHMSEG_REMOVED
))
170 != SHMSEG_ALLOCATED
||
171 shmseg
->shm_perm
.seq
!= IPCID_TO_SEQ(shmid
))
177 shm_deallocate_segment(struct shmid_ds
*shmseg
)
179 struct shm_handle
*shm_handle
;
182 shm_handle
= shmseg
->shm_internal
;
183 vm_object_deallocate(shm_handle
->shm_object
);
184 kfree((caddr_t
)shm_handle
, M_SHM
);
185 shmseg
->shm_internal
= NULL
;
186 size
= round_page(shmseg
->shm_segsz
);
187 shm_committed
-= btoc(size
);
189 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
193 shm_delete_mapping(struct vmspace
*vm
, struct shmmap_state
*shmmap_s
)
195 struct shmid_ds
*shmseg
;
199 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
200 shmseg
= &shmsegs
[segnum
];
201 size
= round_page(shmseg
->shm_segsz
);
202 result
= vm_map_remove(&vm
->vm_map
, shmmap_s
->va
, shmmap_s
->va
+ size
);
203 if (result
!= KERN_SUCCESS
)
205 shmmap_s
->shmid
= -1;
206 shmseg
->shm_dtime
= time_second
;
207 if ((--shmseg
->shm_nattch
<= 0) &&
208 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
)) {
209 shm_deallocate_segment(shmseg
);
210 shm_last_free
= segnum
;
219 sys_shmdt(struct shmdt_args
*uap
)
221 struct thread
*td
= curthread
;
222 struct proc
*p
= td
->td_proc
;
223 struct shmmap_state
*shmmap_s
;
227 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
231 shmmap_s
= (struct shmmap_state
*)p
->p_vmspace
->vm_shm
;
232 if (shmmap_s
== NULL
) {
236 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++) {
237 if (shmmap_s
->shmid
!= -1 &&
238 shmmap_s
->va
== (vm_offset_t
)uap
->shmaddr
)
241 if (i
== shminfo
.shmseg
)
244 error
= shm_delete_mapping(p
->p_vmspace
, shmmap_s
);
254 sys_shmat(struct shmat_args
*uap
)
256 struct thread
*td
= curthread
;
257 struct proc
*p
= td
->td_proc
;
259 struct shmid_ds
*shmseg
;
260 struct shmmap_state
*shmmap_s
= NULL
;
261 struct shm_handle
*shm_handle
;
262 vm_offset_t attach_va
;
267 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
272 shmmap_s
= (struct shmmap_state
*)p
->p_vmspace
->vm_shm
;
273 if (shmmap_s
== NULL
) {
274 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
275 shmmap_s
= kmalloc(size
, M_SHM
, M_WAITOK
);
276 for (i
= 0; i
< shminfo
.shmseg
; i
++)
277 shmmap_s
[i
].shmid
= -1;
278 if (p
->p_vmspace
->vm_shm
!= NULL
) {
279 kfree(shmmap_s
, M_SHM
);
282 p
->p_vmspace
->vm_shm
= (caddr_t
)shmmap_s
;
284 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
285 if (shmseg
== NULL
) {
289 error
= ipcperm(p
, &shmseg
->shm_perm
,
290 (uap
->shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
);
293 for (i
= 0; i
< shminfo
.shmseg
; i
++) {
294 if (shmmap_s
->shmid
== -1)
298 if (i
>= shminfo
.shmseg
) {
302 size
= round_page(shmseg
->shm_segsz
);
303 #ifdef VM_PROT_READ_IS_EXEC
304 prot
= VM_PROT_READ
| VM_PROT_EXECUTE
;
308 if ((uap
->shmflg
& SHM_RDONLY
) == 0)
309 prot
|= VM_PROT_WRITE
;
310 flags
= MAP_ANON
| MAP_SHARED
;
313 if (uap
->shmflg
& SHM_RND
) {
314 attach_va
= (vm_offset_t
)uap
->shmaddr
& ~(SHMLBA
-1);
315 } else if (((vm_offset_t
)uap
->shmaddr
& (SHMLBA
-1)) == 0) {
316 attach_va
= (vm_offset_t
)uap
->shmaddr
;
323 * This is just a hint to vm_map_find() about where to put it.
325 attach_va
= round_page((vm_offset_t
)p
->p_vmspace
->vm_taddr
+ maxtsiz
+ maxdsiz
);
328 shm_handle
= shmseg
->shm_internal
;
329 vm_object_reference(shm_handle
->shm_object
);
330 rv
= vm_map_find(&p
->p_vmspace
->vm_map
,
331 shm_handle
->shm_object
, 0,
333 ((flags
& MAP_FIXED
) ? 0 : 1),
337 if (rv
!= KERN_SUCCESS
) {
338 vm_object_deallocate(shm_handle
->shm_object
);
342 vm_map_inherit(&p
->p_vmspace
->vm_map
,
343 attach_va
, attach_va
+ size
, VM_INHERIT_SHARE
);
345 KKASSERT(shmmap_s
->shmid
== -1);
346 shmmap_s
->va
= attach_va
;
347 shmmap_s
->shmid
= uap
->shmid
;
348 shmseg
->shm_lpid
= p
->p_pid
;
349 shmseg
->shm_atime
= time_second
;
350 shmseg
->shm_nattch
++;
351 uap
->sysmsg_result
= attach_va
;
359 struct ipc_perm shm_perm
; /* operation perms */
360 int shm_segsz
; /* size of segment (bytes) */
361 ushort shm_cpid
; /* pid, creator */
362 ushort shm_lpid
; /* pid, last operation */
363 short shm_nattch
; /* no. of current attaches */
364 time_t shm_atime
; /* last attach time */
365 time_t shm_dtime
; /* last detach time */
366 time_t shm_ctime
; /* last change time */
367 void *shm_handle
; /* internal handle for shm segment */
370 struct oshmctl_args
{
371 struct sysmsg sysmsg
;
374 struct oshmid_ds
*ubuf
;
381 sys_oshmctl(struct proc
*p
, struct oshmctl_args
*uap
)
384 struct thread
*td
= curthread
;
385 struct shmid_ds
*shmseg
;
386 struct oshmid_ds outbuf
;
389 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
393 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
394 if (shmseg
== NULL
) {
401 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_R
);
404 outbuf
.shm_perm
= shmseg
->shm_perm
;
405 outbuf
.shm_segsz
= shmseg
->shm_segsz
;
406 outbuf
.shm_cpid
= shmseg
->shm_cpid
;
407 outbuf
.shm_lpid
= shmseg
->shm_lpid
;
408 outbuf
.shm_nattch
= shmseg
->shm_nattch
;
409 outbuf
.shm_atime
= shmseg
->shm_atime
;
410 outbuf
.shm_dtime
= shmseg
->shm_dtime
;
411 outbuf
.shm_ctime
= shmseg
->shm_ctime
;
412 outbuf
.shm_handle
= shmseg
->shm_internal
;
413 error
= copyout((caddr_t
)&outbuf
, uap
->ubuf
, sizeof(outbuf
));
416 /* XXX casting to (sy_call_t *) is bogus, as usual. */
417 error
= sys_shmctl((struct shmctl_args
*)uap
);
431 sys_shmctl(struct shmctl_args
*uap
)
433 struct thread
*td
= curthread
;
434 struct proc
*p
= td
->td_proc
;
436 struct shmid_ds inbuf
;
437 struct shmid_ds
*shmseg
;
439 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
443 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
444 if (shmseg
== NULL
) {
451 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_R
);
453 error
= copyout(shmseg
, uap
->buf
, sizeof(inbuf
));
456 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_M
);
458 error
= copyin(uap
->buf
, &inbuf
, sizeof(inbuf
));
460 shmseg
->shm_perm
.uid
= inbuf
.shm_perm
.uid
;
461 shmseg
->shm_perm
.gid
= inbuf
.shm_perm
.gid
;
462 shmseg
->shm_perm
.mode
=
463 (shmseg
->shm_perm
.mode
& ~ACCESSPERMS
) |
464 (inbuf
.shm_perm
.mode
& ACCESSPERMS
);
465 shmseg
->shm_ctime
= time_second
;
469 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_M
);
471 shmseg
->shm_perm
.key
= IPC_PRIVATE
;
472 shmseg
->shm_perm
.mode
|= SHMSEG_REMOVED
;
473 if (shmseg
->shm_nattch
<= 0) {
474 shm_deallocate_segment(shmseg
);
475 shm_last_free
= IPCID_TO_IX(uap
->shmid
);
493 shmget_existing(struct proc
*p
, struct shmget_args
*uap
, int mode
, int segnum
)
495 struct shmid_ds
*shmseg
;
498 shmseg
= &shmsegs
[segnum
];
499 if (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) {
501 * This segment is in the process of being allocated. Wait
502 * until it's done, and look the key up again (in case the
503 * allocation failed or it was freed).
505 shmseg
->shm_perm
.mode
|= SHMSEG_WANTED
;
506 error
= tsleep((caddr_t
)shmseg
, PCATCH
, "shmget", 0);
511 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
))
513 error
= ipcperm(p
, &shmseg
->shm_perm
, mode
);
516 if (uap
->size
&& uap
->size
> shmseg
->shm_segsz
)
518 uap
->sysmsg_result
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
523 shmget_allocate_segment(struct proc
*p
, struct shmget_args
*uap
, int mode
)
525 int i
, segnum
, shmid
, size
;
526 struct ucred
*cred
= p
->p_ucred
;
527 struct shmid_ds
*shmseg
;
528 struct shm_handle
*shm_handle
;
530 if (uap
->size
< shminfo
.shmmin
|| uap
->size
> shminfo
.shmmax
)
532 if (shm_nused
>= shminfo
.shmmni
) /* any shmids left? */
534 size
= round_page(uap
->size
);
535 if (shm_committed
+ btoc(size
) > shminfo
.shmall
)
537 if (shm_last_free
< 0) {
538 shmrealloc(); /* maybe expand the shmsegs[] array */
539 for (i
= 0; i
< shmalloced
; i
++)
540 if (shmsegs
[i
].shm_perm
.mode
& SHMSEG_FREE
)
546 segnum
= shm_last_free
;
549 shmseg
= &shmsegs
[segnum
];
551 * In case we sleep in malloc(), mark the segment present but deleted
552 * so that noone else tries to create the same key.
554 shmseg
->shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
555 shmseg
->shm_perm
.key
= uap
->key
;
556 shmseg
->shm_perm
.seq
= (shmseg
->shm_perm
.seq
+ 1) & 0x7fff;
557 shm_handle
= kmalloc(sizeof(struct shm_handle
), M_SHM
, M_WAITOK
);
558 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
561 * We make sure that we have allocated a pager before we need
565 shm_handle
->shm_object
=
566 vm_pager_allocate(OBJT_PHYS
, NULL
, size
, VM_PROT_DEFAULT
, 0);
568 shm_handle
->shm_object
=
569 vm_pager_allocate(OBJT_SWAP
, NULL
, size
, VM_PROT_DEFAULT
, 0);
571 vm_object_clear_flag(shm_handle
->shm_object
, OBJ_ONEMAPPING
);
572 vm_object_set_flag(shm_handle
->shm_object
, OBJ_NOSPLIT
);
574 shmseg
->shm_internal
= shm_handle
;
575 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= cred
->cr_uid
;
576 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= cred
->cr_gid
;
577 shmseg
->shm_perm
.mode
= (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) |
578 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
579 shmseg
->shm_segsz
= uap
->size
;
580 shmseg
->shm_cpid
= p
->p_pid
;
581 shmseg
->shm_lpid
= shmseg
->shm_nattch
= 0;
582 shmseg
->shm_atime
= shmseg
->shm_dtime
= 0;
583 shmseg
->shm_ctime
= time_second
;
584 shm_committed
+= btoc(size
);
586 if (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) {
588 * Somebody else wanted this key while we were asleep. Wake
591 shmseg
->shm_perm
.mode
&= ~SHMSEG_WANTED
;
592 wakeup((caddr_t
)shmseg
);
594 uap
->sysmsg_result
= shmid
;
602 sys_shmget(struct shmget_args
*uap
)
604 struct thread
*td
= curthread
;
605 struct proc
*p
= td
->td_proc
;
606 int segnum
, mode
, error
;
608 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
611 mode
= uap
->shmflg
& ACCESSPERMS
;
614 if (uap
->key
!= IPC_PRIVATE
) {
616 segnum
= shm_find_segment_by_key(uap
->key
);
618 error
= shmget_existing(p
, uap
, mode
, segnum
);
623 if ((uap
->shmflg
& IPC_CREAT
) == 0) {
628 error
= shmget_allocate_segment(p
, uap
, mode
);
635 * shmsys_args(int which, int a2, ...) (VARARGS)
640 sys_shmsys(struct shmsys_args
*uap
)
642 struct thread
*td
= curthread
;
643 unsigned int which
= (unsigned int)uap
->which
;
646 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
649 if (which
>= sizeof(shmcalls
)/sizeof(shmcalls
[0]))
652 bcopy(&uap
->a2
, &uap
->which
,
653 sizeof(struct shmsys_args
) - offsetof(struct shmsys_args
, a2
));
654 error
= ((*shmcalls
[which
])(uap
));
661 shmfork(struct proc
*p1
, struct proc
*p2
)
663 struct shmmap_state
*shmmap_s
;
667 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
668 shmmap_s
= kmalloc(size
, M_SHM
, M_WAITOK
);
669 bcopy((caddr_t
)p1
->p_vmspace
->vm_shm
, (caddr_t
)shmmap_s
, size
);
670 p2
->p_vmspace
->vm_shm
= (caddr_t
)shmmap_s
;
671 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
672 if (shmmap_s
->shmid
!= -1)
673 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].shm_nattch
++;
677 shmexit(struct vmspace
*vm
)
679 struct shmmap_state
*base
, *shm
;
682 if ((base
= (struct shmmap_state
*)vm
->vm_shm
) != NULL
) {
684 for (i
= 0, shm
= base
; i
< shminfo
.shmseg
; i
++, shm
++) {
685 if (shm
->shmid
!= -1)
686 shm_delete_mapping(vm
, shm
);
696 struct shmid_ds
*newsegs
;
698 if (shmalloced
>= shminfo
.shmmni
)
701 newsegs
= kmalloc(shminfo
.shmmni
* sizeof(*newsegs
), M_SHM
, M_WAITOK
);
702 for (i
= 0; i
< shmalloced
; i
++)
703 bcopy(&shmsegs
[i
], &newsegs
[i
], sizeof(newsegs
[0]));
704 for (; i
< shminfo
.shmmni
; i
++) {
705 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
706 shmsegs
[i
].shm_perm
.seq
= 0;
708 kfree(shmsegs
, M_SHM
);
710 shmalloced
= shminfo
.shmmni
;
718 shminfo
.shmmax
= shminfo
.shmall
* PAGE_SIZE
;
719 shmalloced
= shminfo
.shmmni
;
720 shmsegs
= kmalloc(shmalloced
* sizeof(shmsegs
[0]), M_SHM
, M_WAITOK
);
721 for (i
= 0; i
< shmalloced
; i
++) {
722 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
723 shmsegs
[i
].shm_perm
.seq
= 0;
729 SYSINIT(sysv_shm
, SI_SUB_SYSV_SHM
, SI_ORDER_FIRST
, shminit
, NULL
);