2 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. All advertising materials mentioning features or use of this software
13 * must display the following acknowledgement:
14 * This product includes software developed by Adam Glass and Charles
16 * 4. The names of the authors may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "opt_compat.h"
32 #include "opt_sysvipc.h"
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sysproto.h>
37 #include <sys/kernel.h>
38 #include <sys/sysctl.h>
41 #include <sys/malloc.h>
44 #include <sys/sysent.h>
47 #include <sys/mplock2.h>
50 #include <vm/vm_param.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_pager.h>
58 static MALLOC_DEFINE(M_SHM
, "shm", "SVID compatible shared memory segments");
61 static int sys_oshmctl (struct proc
*p
, struct oshmctl_args
*uap
);
63 static int shmget_allocate_segment (struct proc
*p
, struct shmget_args
*uap
, int mode
);
64 static int shmget_existing (struct proc
*p
, struct shmget_args
*uap
, int mode
, int segnum
);
66 /* XXX casting to (sy_call_t *) is bogus, as usual. */
67 static sy_call_t
*shmcalls
[] = {
68 (sy_call_t
*)sys_shmat
, (sy_call_t
*)sys_oshmctl
,
69 (sy_call_t
*)sys_shmdt
, (sy_call_t
*)sys_shmget
,
70 (sy_call_t
*)sys_shmctl
73 #define SHMSEG_FREE 0x0200
74 #define SHMSEG_REMOVED 0x0400
75 #define SHMSEG_ALLOCATED 0x0800
76 #define SHMSEG_WANTED 0x1000
78 static int shm_last_free
, shm_committed
, shmalloced
;
80 static struct shmid_ds
*shmsegs
;
83 /* vm_offset_t kva; */
84 vm_object_t shm_object
;
92 static void shm_deallocate_segment (struct shmid_ds
*);
93 static int shm_find_segment_by_key (key_t
);
94 static struct shmid_ds
*shm_find_segment_by_shmid (int);
95 static int shm_delete_mapping (struct vmspace
*vm
, struct shmmap_state
*);
96 static void shmrealloc (void);
97 static void shminit (void *);
112 struct shminfo shminfo
= {
120 static int shm_use_phys
;
122 TUNABLE_LONG("kern.ipc.shmmin", &shminfo
.shmmin
);
123 TUNABLE_LONG("kern.ipc.shmmni", &shminfo
.shmmni
);
124 TUNABLE_LONG("kern.ipc.shmseg", &shminfo
.shmseg
);
125 TUNABLE_LONG("kern.ipc.shmmaxpgs", &shminfo
.shmall
);
126 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys
);
128 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmmax
, CTLFLAG_RW
, &shminfo
.shmmax
, 0,
129 "Max shared memory segment size");
130 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmmin
, CTLFLAG_RW
, &shminfo
.shmmin
, 0,
131 "Min shared memory segment size");
132 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmmni
, CTLFLAG_RD
, &shminfo
.shmmni
, 0,
133 "Max number of shared memory identifiers");
134 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmseg
, CTLFLAG_RW
, &shminfo
.shmseg
, 0,
135 "Max shared memory segments per process");
136 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmall
, CTLFLAG_RW
, &shminfo
.shmall
, 0,
137 "Max pages of shared memory");
138 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_use_phys
, CTLFLAG_RW
, &shm_use_phys
, 0,
139 "Use phys pager allocation instead of swap pager allocation");
142 shm_find_segment_by_key(key_t key
)
146 for (i
= 0; i
< shmalloced
; i
++) {
147 if ((shmsegs
[i
].shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
148 shmsegs
[i
].shm_perm
.key
== key
)
154 static struct shmid_ds
*
155 shm_find_segment_by_shmid(int shmid
)
158 struct shmid_ds
*shmseg
;
160 segnum
= IPCID_TO_IX(shmid
);
161 if (segnum
< 0 || segnum
>= shmalloced
)
163 shmseg
= &shmsegs
[segnum
];
164 if ((shmseg
->shm_perm
.mode
& (SHMSEG_ALLOCATED
| SHMSEG_REMOVED
))
165 != SHMSEG_ALLOCATED
||
166 shmseg
->shm_perm
.seq
!= IPCID_TO_SEQ(shmid
)) {
173 shm_deallocate_segment(struct shmid_ds
*shmseg
)
175 struct shm_handle
*shm_handle
;
178 shm_handle
= shmseg
->shm_internal
;
179 vm_object_deallocate(shm_handle
->shm_object
);
180 kfree((caddr_t
)shm_handle
, M_SHM
);
181 shmseg
->shm_internal
= NULL
;
182 size
= round_page(shmseg
->shm_segsz
);
183 shm_committed
-= btoc(size
);
185 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
189 shm_delete_mapping(struct vmspace
*vm
, struct shmmap_state
*shmmap_s
)
191 struct shmid_ds
*shmseg
;
195 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
196 shmseg
= &shmsegs
[segnum
];
197 size
= round_page(shmseg
->shm_segsz
);
198 result
= vm_map_remove(&vm
->vm_map
, shmmap_s
->va
, shmmap_s
->va
+ size
);
199 if (result
!= KERN_SUCCESS
)
201 shmmap_s
->shmid
= -1;
202 shmseg
->shm_dtime
= time_second
;
203 if ((--shmseg
->shm_nattch
<= 0) &&
204 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
)) {
205 shm_deallocate_segment(shmseg
);
206 shm_last_free
= segnum
;
215 sys_shmdt(struct shmdt_args
*uap
)
217 struct thread
*td
= curthread
;
218 struct proc
*p
= td
->td_proc
;
219 struct shmmap_state
*shmmap_s
;
223 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
227 shmmap_s
= (struct shmmap_state
*)p
->p_vmspace
->vm_shm
;
228 if (shmmap_s
== NULL
) {
232 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++) {
233 if (shmmap_s
->shmid
!= -1 &&
234 shmmap_s
->va
== (vm_offset_t
)uap
->shmaddr
)
237 if (i
== shminfo
.shmseg
)
240 error
= shm_delete_mapping(p
->p_vmspace
, shmmap_s
);
250 sys_shmat(struct shmat_args
*uap
)
252 struct thread
*td
= curthread
;
253 struct proc
*p
= td
->td_proc
;
256 struct shmid_ds
*shmseg
;
257 struct shmmap_state
*shmmap_s
= NULL
;
258 struct shm_handle
*shm_handle
;
259 vm_offset_t attach_va
;
264 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
269 shmmap_s
= (struct shmmap_state
*)p
->p_vmspace
->vm_shm
;
270 if (shmmap_s
== NULL
) {
271 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
272 shmmap_s
= kmalloc(size
, M_SHM
, M_WAITOK
);
273 for (i
= 0; i
< shminfo
.shmseg
; i
++)
274 shmmap_s
[i
].shmid
= -1;
275 if (p
->p_vmspace
->vm_shm
!= NULL
) {
276 kfree(shmmap_s
, M_SHM
);
279 p
->p_vmspace
->vm_shm
= (caddr_t
)shmmap_s
;
281 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
282 if (shmseg
== NULL
) {
286 error
= ipcperm(p
, &shmseg
->shm_perm
,
287 (uap
->shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
);
290 for (i
= 0; i
< shminfo
.shmseg
; i
++) {
291 if (shmmap_s
->shmid
== -1)
295 if (i
>= shminfo
.shmseg
) {
299 size
= round_page(shmseg
->shm_segsz
);
300 #ifdef VM_PROT_READ_IS_EXEC
301 prot
= VM_PROT_READ
| VM_PROT_EXECUTE
;
305 if ((uap
->shmflg
& SHM_RDONLY
) == 0)
306 prot
|= VM_PROT_WRITE
;
307 flags
= MAP_ANON
| MAP_SHARED
;
310 if (uap
->shmflg
& SHM_RND
) {
311 attach_va
= (vm_offset_t
)uap
->shmaddr
& ~(SHMLBA
-1);
312 } else if (((vm_offset_t
)uap
->shmaddr
& (SHMLBA
-1)) == 0) {
313 attach_va
= (vm_offset_t
)uap
->shmaddr
;
320 * This is just a hint to vm_map_find() about where to put it.
322 attach_va
= round_page((vm_offset_t
)p
->p_vmspace
->vm_taddr
+ maxtsiz
+ maxdsiz
);
325 shm_handle
= shmseg
->shm_internal
;
326 vm_object_hold(shm_handle
->shm_object
);
327 vm_object_chain_wait(shm_handle
->shm_object
);
328 vm_object_reference_locked(shm_handle
->shm_object
);
329 rv
= vm_map_find(&p
->p_vmspace
->vm_map
,
330 shm_handle
->shm_object
, 0,
333 ((flags
& MAP_FIXED
) ? 0 : 1),
337 vm_object_drop(shm_handle
->shm_object
);
338 if (rv
!= KERN_SUCCESS
) {
339 vm_object_deallocate(shm_handle
->shm_object
);
343 vm_map_inherit(&p
->p_vmspace
->vm_map
,
344 attach_va
, attach_va
+ size
, VM_INHERIT_SHARE
);
346 KKASSERT(shmmap_s
->shmid
== -1);
347 shmmap_s
->va
= attach_va
;
348 shmmap_s
->shmid
= uap
->shmid
;
349 shmseg
->shm_lpid
= p
->p_pid
;
350 shmseg
->shm_atime
= time_second
;
351 shmseg
->shm_nattch
++;
352 uap
->sysmsg_resultp
= (void *)attach_va
;
360 struct ipc_perm shm_perm
; /* operation perms */
361 int shm_segsz
; /* size of segment (bytes) */
362 ushort shm_cpid
; /* pid, creator */
363 ushort shm_lpid
; /* pid, last operation */
364 short shm_nattch
; /* no. of current attaches */
365 time_t shm_atime
; /* last attach time */
366 time_t shm_dtime
; /* last detach time */
367 time_t shm_ctime
; /* last change time */
368 void *shm_handle
; /* internal handle for shm segment */
371 struct oshmctl_args
{
372 struct sysmsg sysmsg
;
375 struct oshmid_ds
*ubuf
;
382 sys_oshmctl(struct proc
*p
, struct oshmctl_args
*uap
)
385 struct thread
*td
= curthread
;
386 struct shmid_ds
*shmseg
;
387 struct oshmid_ds outbuf
;
390 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
394 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
395 if (shmseg
== NULL
) {
402 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_R
);
405 outbuf
.shm_perm
= shmseg
->shm_perm
;
406 outbuf
.shm_segsz
= shmseg
->shm_segsz
;
407 outbuf
.shm_cpid
= shmseg
->shm_cpid
;
408 outbuf
.shm_lpid
= shmseg
->shm_lpid
;
409 outbuf
.shm_nattch
= shmseg
->shm_nattch
;
410 outbuf
.shm_atime
= shmseg
->shm_atime
;
411 outbuf
.shm_dtime
= shmseg
->shm_dtime
;
412 outbuf
.shm_ctime
= shmseg
->shm_ctime
;
413 outbuf
.shm_handle
= shmseg
->shm_internal
;
414 error
= copyout((caddr_t
)&outbuf
, uap
->ubuf
, sizeof(outbuf
));
417 /* XXX casting to (sy_call_t *) is bogus, as usual. */
418 error
= sys_shmctl((struct shmctl_args
*)uap
);
432 sys_shmctl(struct shmctl_args
*uap
)
434 struct thread
*td
= curthread
;
435 struct proc
*p
= td
->td_proc
;
437 struct shmid_ds inbuf
;
438 struct shmid_ds
*shmseg
;
440 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
444 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
445 if (shmseg
== NULL
) {
452 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_R
);
454 error
= copyout(shmseg
, uap
->buf
, sizeof(inbuf
));
457 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_M
);
459 error
= copyin(uap
->buf
, &inbuf
, sizeof(inbuf
));
461 shmseg
->shm_perm
.uid
= inbuf
.shm_perm
.uid
;
462 shmseg
->shm_perm
.gid
= inbuf
.shm_perm
.gid
;
463 shmseg
->shm_perm
.mode
=
464 (shmseg
->shm_perm
.mode
& ~ACCESSPERMS
) |
465 (inbuf
.shm_perm
.mode
& ACCESSPERMS
);
466 shmseg
->shm_ctime
= time_second
;
470 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_M
);
472 shmseg
->shm_perm
.key
= IPC_PRIVATE
;
473 shmseg
->shm_perm
.mode
|= SHMSEG_REMOVED
;
474 if (shmseg
->shm_nattch
<= 0) {
475 shm_deallocate_segment(shmseg
);
476 shm_last_free
= IPCID_TO_IX(uap
->shmid
);
494 shmget_existing(struct proc
*p
, struct shmget_args
*uap
, int mode
, int segnum
)
496 struct shmid_ds
*shmseg
;
499 shmseg
= &shmsegs
[segnum
];
500 if (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) {
502 * This segment is in the process of being allocated. Wait
503 * until it's done, and look the key up again (in case the
504 * allocation failed or it was freed).
506 shmseg
->shm_perm
.mode
|= SHMSEG_WANTED
;
507 error
= tsleep((caddr_t
)shmseg
, PCATCH
, "shmget", 0);
512 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
))
514 error
= ipcperm(p
, &shmseg
->shm_perm
, mode
);
517 if (uap
->size
&& uap
->size
> shmseg
->shm_segsz
)
519 uap
->sysmsg_result
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
524 shmget_allocate_segment(struct proc
*p
, struct shmget_args
*uap
, int mode
)
526 int i
, segnum
, shmid
;
528 struct ucred
*cred
= p
->p_ucred
;
529 struct shmid_ds
*shmseg
;
530 struct shm_handle
*shm_handle
;
532 if (uap
->size
< shminfo
.shmmin
|| uap
->size
> shminfo
.shmmax
)
534 if (shm_nused
>= shminfo
.shmmni
) /* any shmids left? */
536 size
= round_page(uap
->size
);
537 if (shm_committed
+ btoc(size
) > shminfo
.shmall
)
539 if (shm_last_free
< 0) {
540 shmrealloc(); /* maybe expand the shmsegs[] array */
541 for (i
= 0; i
< shmalloced
; i
++) {
542 if (shmsegs
[i
].shm_perm
.mode
& SHMSEG_FREE
)
549 segnum
= shm_last_free
;
552 shmseg
= &shmsegs
[segnum
];
554 * In case we sleep in malloc(), mark the segment present but deleted
555 * so that noone else tries to create the same key.
557 shmseg
->shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
558 shmseg
->shm_perm
.key
= uap
->key
;
559 shmseg
->shm_perm
.seq
= (shmseg
->shm_perm
.seq
+ 1) & 0x7fff;
560 shm_handle
= kmalloc(sizeof(struct shm_handle
), M_SHM
, M_WAITOK
);
561 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
564 * We make sure that we have allocated a pager before we need
568 shm_handle
->shm_object
=
569 phys_pager_alloc(NULL
, size
, VM_PROT_DEFAULT
, 0);
571 shm_handle
->shm_object
=
572 swap_pager_alloc(NULL
, size
, VM_PROT_DEFAULT
, 0);
574 vm_object_clear_flag(shm_handle
->shm_object
, OBJ_ONEMAPPING
);
575 vm_object_set_flag(shm_handle
->shm_object
, OBJ_NOSPLIT
);
577 shmseg
->shm_internal
= shm_handle
;
578 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= cred
->cr_uid
;
579 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= cred
->cr_gid
;
580 shmseg
->shm_perm
.mode
= (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) |
581 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
582 shmseg
->shm_segsz
= uap
->size
;
583 shmseg
->shm_cpid
= p
->p_pid
;
584 shmseg
->shm_lpid
= shmseg
->shm_nattch
= 0;
585 shmseg
->shm_atime
= shmseg
->shm_dtime
= 0;
586 shmseg
->shm_ctime
= time_second
;
587 shm_committed
+= btoc(size
);
591 * If a physical mapping is desired and we have a ton of free pages
592 * we pre-allocate the pages here in order to avoid on-the-fly
593 * allocation later. This has a big effect on database warm-up
594 * times since DFly supports concurrent page faults coming from the
595 * same VM object for pages which already exist.
597 * This can hang the kernel for a while so only do it if shm_use_phys
598 * is set to 2 or higher.
600 if (shm_use_phys
> 1) {
601 vm_pindex_t pi
, pmax
;
604 pmax
= round_page(shmseg
->shm_segsz
) >> PAGE_SHIFT
;
605 vm_object_hold(shm_handle
->shm_object
);
606 if (pmax
> vmstats
.v_free_count
)
607 pmax
= vmstats
.v_free_count
;
608 for (pi
= 0; pi
< pmax
; ++pi
) {
609 m
= vm_page_grab(shm_handle
->shm_object
, pi
,
610 VM_ALLOC_SYSTEM
| VM_ALLOC_NULL_OK
|
614 vm_pager_get_page(shm_handle
->shm_object
, &m
, 1);
619 vm_object_drop(shm_handle
->shm_object
);
622 if (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) {
624 * Somebody else wanted this key while we were asleep. Wake
627 shmseg
->shm_perm
.mode
&= ~SHMSEG_WANTED
;
628 wakeup((caddr_t
)shmseg
);
630 uap
->sysmsg_result
= shmid
;
638 sys_shmget(struct shmget_args
*uap
)
640 struct thread
*td
= curthread
;
641 struct proc
*p
= td
->td_proc
;
642 int segnum
, mode
, error
;
644 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
647 mode
= uap
->shmflg
& ACCESSPERMS
;
650 if (uap
->key
!= IPC_PRIVATE
) {
652 segnum
= shm_find_segment_by_key(uap
->key
);
654 error
= shmget_existing(p
, uap
, mode
, segnum
);
659 if ((uap
->shmflg
& IPC_CREAT
) == 0) {
664 error
= shmget_allocate_segment(p
, uap
, mode
);
671 * shmsys_args(int which, int a2, ...) (VARARGS)
676 sys_shmsys(struct shmsys_args
*uap
)
678 struct thread
*td
= curthread
;
679 unsigned int which
= (unsigned int)uap
->which
;
682 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
685 if (which
>= NELEM(shmcalls
))
688 bcopy(&uap
->a2
, &uap
->which
,
689 sizeof(struct shmsys_args
) - offsetof(struct shmsys_args
, a2
));
690 error
= ((*shmcalls
[which
])(uap
));
697 shmfork(struct proc
*p1
, struct proc
*p2
)
699 struct shmmap_state
*shmmap_s
;
704 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
705 shmmap_s
= kmalloc(size
, M_SHM
, M_WAITOK
);
706 bcopy((caddr_t
)p1
->p_vmspace
->vm_shm
, (caddr_t
)shmmap_s
, size
);
707 p2
->p_vmspace
->vm_shm
= (caddr_t
)shmmap_s
;
708 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++) {
709 if (shmmap_s
->shmid
!= -1)
710 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].shm_nattch
++;
716 shmexit(struct vmspace
*vm
)
718 struct shmmap_state
*base
, *shm
;
721 if ((base
= (struct shmmap_state
*)vm
->vm_shm
) != NULL
) {
724 for (i
= 0, shm
= base
; i
< shminfo
.shmseg
; i
++, shm
++) {
725 if (shm
->shmid
!= -1)
726 shm_delete_mapping(vm
, shm
);
737 struct shmid_ds
*newsegs
;
739 if (shmalloced
>= shminfo
.shmmni
)
742 newsegs
= kmalloc(shminfo
.shmmni
* sizeof(*newsegs
), M_SHM
, M_WAITOK
);
743 for (i
= 0; i
< shmalloced
; i
++)
744 bcopy(&shmsegs
[i
], &newsegs
[i
], sizeof(newsegs
[0]));
745 for (; i
< shminfo
.shmmni
; i
++) {
746 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
747 shmsegs
[i
].shm_perm
.seq
= 0;
749 kfree(shmsegs
, M_SHM
);
751 shmalloced
= shminfo
.shmmni
;
760 * If not overridden by a tunable set the maximum shm to
761 * 2/3 of main memory.
763 if (shminfo
.shmall
== 0)
764 shminfo
.shmall
= (size_t)vmstats
.v_page_count
* 2 / 3;
766 shminfo
.shmmax
= shminfo
.shmall
* PAGE_SIZE
;
767 shmalloced
= shminfo
.shmmni
;
768 shmsegs
= kmalloc(shmalloced
* sizeof(shmsegs
[0]), M_SHM
, M_WAITOK
);
769 for (i
= 0; i
< shmalloced
; i
++) {
770 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
771 shmsegs
[i
].shm_perm
.seq
= 0;
777 SYSINIT(sysv_shm
, SI_SUB_SYSV_SHM
, SI_ORDER_FIRST
, shminit
, NULL
);