2 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. All advertising materials mentioning features or use of this software
13 * must display the following acknowledgement:
14 * This product includes software developed by Adam Glass and Charles
16 * 4. The names of the authors may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "opt_sysvipc.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sysmsg.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
43 #include <sys/sysent.h>
47 #include <vm/vm_param.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pager.h>
55 static MALLOC_DEFINE(M_SHM
, "shm", "SVID compatible shared memory segments");
57 static int shmget_allocate_segment(struct proc
*p
, struct sysmsg
*sysmsg
,
58 const struct shmget_args
*uap
, int mode
);
59 static int shmget_existing(struct proc
*p
, struct sysmsg
*sysmsg
,
60 const struct shmget_args
*uap
, int mode
, int segnum
);
62 #define SHMSEG_FREE 0x0200
63 #define SHMSEG_REMOVED 0x0400
64 #define SHMSEG_ALLOCATED 0x0800
65 #define SHMSEG_WANTED 0x1000
67 static int shm_last_free
, shm_committed
, shmalloced
, shm_nused
;
68 static struct shmid_ds
*shmsegs
;
69 static struct lwkt_token shm_token
= LWKT_TOKEN_INITIALIZER(shm_token
);
72 /* vm_offset_t kva; */
73 vm_object_t shm_object
;
82 static void shm_deallocate_segment (struct shmid_ds
*);
83 static int shm_find_segment_by_key (key_t
);
84 static struct shmid_ds
*shm_find_segment_by_shmid (int);
85 static int shm_delete_mapping (struct vmspace
*vm
, struct shmmap_state
*);
86 static void shmrealloc (void);
87 static void shminit (void *);
102 struct shminfo shminfo
= {
111 * allow-removed Allow a shared memory segment to be attached by its shmid
112 * even after it has been deleted, as long as it was still
113 * being referenced by someone. This is a trick used by
114 * chrome and other applications to avoid leaving shm
115 * segments hanging around after the application is killed
116 * or seg-faults unexpectedly.
118 * use-phys Shared memory segments are to use physical memory by
119 * default, which may allow the kernel to better-optimize
120 * the pmap and reduce overhead. The pages are effectively
123 static int shm_allow_removed
= 1;
124 static int shm_use_phys
= 1;
126 TUNABLE_LONG("kern.ipc.shmmin", &shminfo
.shmmin
);
127 TUNABLE_LONG("kern.ipc.shmmni", &shminfo
.shmmni
);
128 TUNABLE_LONG("kern.ipc.shmseg", &shminfo
.shmseg
);
129 TUNABLE_LONG("kern.ipc.shmmaxpgs", &shminfo
.shmall
);
130 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys
);
132 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmmax
, CTLFLAG_RW
, &shminfo
.shmmax
, 0,
133 "Max shared memory segment size");
134 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmmin
, CTLFLAG_RW
, &shminfo
.shmmin
, 0,
135 "Min shared memory segment size");
136 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmmni
, CTLFLAG_RD
, &shminfo
.shmmni
, 0,
137 "Max number of shared memory identifiers");
138 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmseg
, CTLFLAG_RW
, &shminfo
.shmseg
, 0,
139 "Max shared memory segments per process");
140 SYSCTL_LONG(_kern_ipc
, OID_AUTO
, shmall
, CTLFLAG_RW
, &shminfo
.shmall
, 0,
141 "Max pages of shared memory");
142 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_use_phys
, CTLFLAG_RW
, &shm_use_phys
, 0,
143 "Use phys pager allocation instead of swap pager allocation");
144 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_allow_removed
, CTLFLAG_RW
,
145 &shm_allow_removed
, 0,
146 "Enable/Disable attachment to attached segments marked for removal");
149 shm_find_segment_by_key(key_t key
)
153 for (i
= 0; i
< shmalloced
; i
++) {
154 if ((shmsegs
[i
].shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
155 shmsegs
[i
].shm_perm
.key
== key
)
161 static struct shmid_ds
*
162 shm_find_segment_by_shmid(int shmid
)
165 struct shmid_ds
*shmseg
;
167 segnum
= IPCID_TO_IX(shmid
);
168 if (segnum
< 0 || segnum
>= shmalloced
)
170 shmseg
= &shmsegs
[segnum
];
171 if ((shmseg
->shm_perm
.mode
& SHMSEG_ALLOCATED
) == 0 ||
172 (!shm_allow_removed
&&
173 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) != 0) ||
174 shmseg
->shm_perm
.seq
!= IPCID_TO_SEQ(shmid
)) {
181 shm_deallocate_segment(struct shmid_ds
*shmseg
)
183 struct shm_handle
*shm_handle
;
186 shm_handle
= shmseg
->shm_internal
;
187 vm_object_deallocate(shm_handle
->shm_object
);
188 kfree((caddr_t
)shm_handle
, M_SHM
);
189 shmseg
->shm_internal
= NULL
;
190 size
= round_page(shmseg
->shm_segsz
);
191 shm_committed
-= btoc(size
);
193 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
197 shm_delete_mapping(struct vmspace
*vm
, struct shmmap_state
*shmmap_s
)
199 struct shmid_ds
*shmseg
;
203 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
204 shmseg
= &shmsegs
[segnum
];
205 size
= round_page(shmseg
->shm_segsz
);
206 result
= vm_map_remove(&vm
->vm_map
, shmmap_s
->va
, shmmap_s
->va
+ size
);
207 if (result
!= KERN_SUCCESS
)
209 shmmap_s
->shmid
= -1;
210 shmseg
->shm_dtime
= time_second
;
211 if ((--shmseg
->shm_nattch
<= 0) &&
212 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
)) {
213 shm_deallocate_segment(shmseg
);
214 shm_last_free
= segnum
;
223 sys_shmdt(struct sysmsg
*sysmsg
, const struct shmdt_args
*uap
)
225 struct thread
*td
= curthread
;
226 struct proc
*p
= td
->td_proc
;
227 struct shmmap_state
*shmmap_s
;
228 struct prison
*pr
= p
->p_ucred
->cr_prison
;
233 if (pr
&& !PRISON_CAP_ISSET(pr
->pr_caps
, PRISON_CAP_SYS_SYSVIPC
))
236 lwkt_gettoken(&shm_token
);
237 shmmap_s
= (struct shmmap_state
*)p
->p_vmspace
->vm_shm
;
238 if (shmmap_s
== NULL
) {
242 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++) {
243 if (shmmap_s
->shmid
!= -1 &&
244 shmmap_s
->va
== (vm_offset_t
)uap
->shmaddr
)
247 if (i
== shminfo
.shmseg
)
250 error
= shm_delete_mapping(p
->p_vmspace
, shmmap_s
);
252 lwkt_reltoken(&shm_token
);
261 sys_shmat(struct sysmsg
*sysmsg
, const struct shmat_args
*uap
)
263 struct thread
*td
= curthread
;
264 struct proc
*p
= td
->td_proc
;
265 struct prison
*pr
= p
->p_ucred
->cr_prison
;
268 struct shmid_ds
*shmseg
;
269 struct shmmap_state
*shmmap_s
= NULL
;
270 struct shm_handle
*shm_handle
;
271 vm_offset_t attach_va
;
277 if (pr
&& !PRISON_CAP_ISSET(pr
->pr_caps
, PRISON_CAP_SYS_SYSVIPC
))
280 lwkt_gettoken(&shm_token
);
282 shmmap_s
= (struct shmmap_state
*)p
->p_vmspace
->vm_shm
;
283 if (shmmap_s
== NULL
) {
284 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
285 shmmap_s
= kmalloc(size
, M_SHM
, M_WAITOK
);
286 for (i
= 0; i
< shminfo
.shmseg
; i
++) {
287 shmmap_s
[i
].shmid
= -1;
288 shmmap_s
[i
].reserved
= 0;
290 if (p
->p_vmspace
->vm_shm
!= NULL
) {
291 kfree(shmmap_s
, M_SHM
);
294 p
->p_vmspace
->vm_shm
= (caddr_t
)shmmap_s
;
296 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
297 if (shmseg
== NULL
) {
301 error
= ipcperm(p
, &shmseg
->shm_perm
,
302 (uap
->shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
);
307 * Find a free element and mark reserved. This fixes races
308 * against concurrent allocations due to the token being
309 * interrupted by blocking operations. The shmmap_s reservation
310 * will be cleared upon completion or error.
312 for (i
= 0; i
< shminfo
.shmseg
; i
++) {
313 if (shmmap_s
->shmid
== -1 && shmmap_s
->reserved
== 0) {
314 shmmap_s
->reserved
= 1;
319 if (i
>= shminfo
.shmseg
) {
323 size
= round_page(shmseg
->shm_segsz
);
324 #ifdef VM_PROT_READ_IS_EXEC
325 prot
= VM_PROT_READ
| VM_PROT_EXECUTE
;
329 if ((uap
->shmflg
& SHM_RDONLY
) == 0)
330 prot
|= VM_PROT_WRITE
;
331 flags
= MAP_ANON
| MAP_SHARED
;
334 if (uap
->shmflg
& SHM_RND
) {
336 rounddown2((vm_offset_t
)uap
->shmaddr
, SHMLBA
);
337 } else if (((vm_offset_t
)uap
->shmaddr
& (SHMLBA
-1)) == 0) {
338 attach_va
= (vm_offset_t
)uap
->shmaddr
;
341 shmmap_s
->reserved
= 0;
346 * This is just a hint to vm_map_find() about where to put it.
348 attach_va
= round_page((vm_offset_t
)p
->p_vmspace
->vm_taddr
+
353 * Handle alignment. For large memory maps it is possible
354 * that the MMU can optimize the page table so align anything
355 * that is a multiple of SEG_SIZE to SEG_SIZE.
357 if ((flags
& MAP_FIXED
) == 0 && (size
& SEG_MASK
) == 0)
362 shm_handle
= shmseg
->shm_internal
;
363 vm_object_hold(shm_handle
->shm_object
);
364 vm_object_reference_locked(shm_handle
->shm_object
);
365 rv
= vm_map_find(&p
->p_vmspace
->vm_map
,
366 shm_handle
->shm_object
, NULL
,
369 ((flags
& MAP_FIXED
) ? 0 : 1),
370 VM_MAPTYPE_NORMAL
, VM_SUBSYS_SHMEM
,
372 vm_object_drop(shm_handle
->shm_object
);
373 if (rv
!= KERN_SUCCESS
) {
374 vm_object_deallocate(shm_handle
->shm_object
);
375 shmmap_s
->reserved
= 0;
379 vm_map_inherit(&p
->p_vmspace
->vm_map
,
380 attach_va
, attach_va
+ size
, VM_INHERIT_SHARE
);
382 KKASSERT(shmmap_s
->shmid
== -1);
383 shmmap_s
->va
= attach_va
;
384 shmmap_s
->shmid
= uap
->shmid
;
385 shmmap_s
->reserved
= 0;
386 shmseg
->shm_lpid
= p
->p_pid
;
387 shmseg
->shm_atime
= time_second
;
388 shmseg
->shm_nattch
++;
389 sysmsg
->sysmsg_resultp
= (void *)attach_va
;
392 lwkt_reltoken(&shm_token
);
401 sys_shmctl(struct sysmsg
*sysmsg
, const struct shmctl_args
*uap
)
403 struct thread
*td
= curthread
;
404 struct proc
*p
= td
->td_proc
;
405 struct prison
*pr
= p
->p_ucred
->cr_prison
;
407 struct shmid_ds inbuf
;
408 struct shmid_ds
*shmseg
;
410 if (pr
&& !PRISON_CAP_ISSET(pr
->pr_caps
, PRISON_CAP_SYS_SYSVIPC
))
413 lwkt_gettoken(&shm_token
);
414 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
415 if (shmseg
== NULL
) {
422 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_R
);
424 error
= copyout(shmseg
, uap
->buf
, sizeof(inbuf
));
427 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_M
);
429 error
= copyin(uap
->buf
, &inbuf
, sizeof(inbuf
));
431 shmseg
->shm_perm
.uid
= inbuf
.shm_perm
.uid
;
432 shmseg
->shm_perm
.gid
= inbuf
.shm_perm
.gid
;
433 shmseg
->shm_perm
.mode
=
434 (shmseg
->shm_perm
.mode
& ~ACCESSPERMS
) |
435 (inbuf
.shm_perm
.mode
& ACCESSPERMS
);
436 shmseg
->shm_ctime
= time_second
;
440 error
= ipcperm(p
, &shmseg
->shm_perm
, IPC_M
);
442 shmseg
->shm_perm
.key
= IPC_PRIVATE
;
443 shmseg
->shm_perm
.mode
|= SHMSEG_REMOVED
;
444 if (shmseg
->shm_nattch
<= 0) {
445 shm_deallocate_segment(shmseg
);
446 shm_last_free
= IPCID_TO_IX(uap
->shmid
);
459 lwkt_reltoken(&shm_token
);
465 shmget_existing(struct proc
*p
, struct sysmsg
*sysmsg
,
466 const struct shmget_args
*uap
, int mode
, int segnum
)
468 struct shmid_ds
*shmseg
;
471 shmseg
= &shmsegs
[segnum
];
472 if (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) {
474 * This segment is in the process of being allocated. Wait
475 * until it's done, and look the key up again (in case the
476 * allocation failed or it was freed).
478 shmseg
->shm_perm
.mode
|= SHMSEG_WANTED
;
479 error
= tsleep((caddr_t
)shmseg
, PCATCH
, "shmget", 0);
484 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
))
486 error
= ipcperm(p
, &shmseg
->shm_perm
, mode
);
489 if (uap
->size
&& uap
->size
> shmseg
->shm_segsz
)
491 sysmsg
->sysmsg_result
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
496 shmget_allocate_segment(struct proc
*p
, struct sysmsg
*sysmsg
,
497 const struct shmget_args
*uap
, int mode
)
499 int i
, segnum
, shmid
;
501 struct ucred
*cred
= p
->p_ucred
;
502 struct shmid_ds
*shmseg
;
503 struct shm_handle
*shm_handle
;
505 if (uap
->size
< shminfo
.shmmin
|| uap
->size
> shminfo
.shmmax
)
507 if (shm_nused
>= shminfo
.shmmni
) /* any shmids left? */
509 size
= round_page(uap
->size
);
510 if (shm_committed
+ btoc(size
) > shminfo
.shmall
)
512 if (shm_last_free
< 0) {
513 shmrealloc(); /* maybe expand the shmsegs[] array */
514 for (i
= 0; i
< shmalloced
; i
++) {
515 if (shmsegs
[i
].shm_perm
.mode
& SHMSEG_FREE
)
522 segnum
= shm_last_free
;
525 shmseg
= &shmsegs
[segnum
];
527 * In case we sleep in malloc(), mark the segment present but deleted
528 * so that noone else tries to create the same key.
530 shmseg
->shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
531 shmseg
->shm_perm
.key
= uap
->key
;
532 shmseg
->shm_perm
.seq
= (shmseg
->shm_perm
.seq
+ 1) & 0x7fff;
533 shm_handle
= kmalloc(sizeof(struct shm_handle
), M_SHM
, M_WAITOK
);
534 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
537 * We make sure that we have allocated a pager before we need
541 shm_handle
->shm_object
=
542 phys_pager_alloc(NULL
, size
, VM_PROT_DEFAULT
, 0);
544 shm_handle
->shm_object
=
545 swap_pager_alloc(NULL
, size
, VM_PROT_DEFAULT
, 0);
547 vm_object_clear_flag(shm_handle
->shm_object
, OBJ_ONEMAPPING
);
548 vm_object_set_flag(shm_handle
->shm_object
, OBJ_NOSPLIT
);
550 shmseg
->shm_internal
= shm_handle
;
551 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= cred
->cr_uid
;
552 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= cred
->cr_gid
;
553 shmseg
->shm_perm
.mode
= (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) |
554 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
555 shmseg
->shm_segsz
= uap
->size
;
556 shmseg
->shm_cpid
= p
->p_pid
;
557 shmseg
->shm_lpid
= shmseg
->shm_nattch
= 0;
558 shmseg
->shm_atime
= shmseg
->shm_dtime
= 0;
559 shmseg
->shm_ctime
= time_second
;
560 shm_committed
+= btoc(size
);
564 * If a physical mapping is desired and we have a ton of free pages
565 * we pre-allocate the pages here in order to avoid on-the-fly
566 * allocation later. This has a big effect on database warm-up
567 * times since DFly supports concurrent page faults coming from the
568 * same VM object for pages which already exist.
570 * This can hang the kernel for a while so only do it if shm_use_phys
571 * is set to 2 or higher.
573 if (shm_use_phys
> 1) {
574 vm_pindex_t pi
, pmax
;
577 pmax
= round_page(shmseg
->shm_segsz
) >> PAGE_SHIFT
;
578 vm_object_hold(shm_handle
->shm_object
);
579 if (pmax
> vmstats
.v_free_count
)
580 pmax
= vmstats
.v_free_count
;
581 for (pi
= 0; pi
< pmax
; ++pi
) {
582 m
= vm_page_grab(shm_handle
->shm_object
, pi
,
583 VM_ALLOC_SYSTEM
| VM_ALLOC_NULL_OK
|
587 vm_pager_get_page(shm_handle
->shm_object
, pi
, &m
, 1);
592 vm_object_drop(shm_handle
->shm_object
);
595 if (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) {
597 * Somebody else wanted this key while we were asleep. Wake
600 shmseg
->shm_perm
.mode
&= ~SHMSEG_WANTED
;
601 wakeup((caddr_t
)shmseg
);
603 sysmsg
->sysmsg_result
= shmid
;
611 sys_shmget(struct sysmsg
*sysmsg
, const struct shmget_args
*uap
)
613 struct thread
*td
= curthread
;
614 struct proc
*p
= td
->td_proc
;
615 struct prison
*pr
= p
->p_ucred
->cr_prison
;
616 int segnum
, mode
, error
;
618 if (pr
&& !PRISON_CAP_ISSET(pr
->pr_caps
, PRISON_CAP_SYS_SYSVIPC
))
621 mode
= uap
->shmflg
& ACCESSPERMS
;
623 lwkt_gettoken(&shm_token
);
625 if (uap
->key
!= IPC_PRIVATE
) {
627 segnum
= shm_find_segment_by_key(uap
->key
);
629 error
= shmget_existing(p
, sysmsg
, uap
, mode
, segnum
);
634 if ((uap
->shmflg
& IPC_CREAT
) == 0) {
639 error
= shmget_allocate_segment(p
, sysmsg
, uap
, mode
);
641 lwkt_reltoken(&shm_token
);
647 shmfork(struct proc
*p1
, struct proc
*p2
)
649 struct shmmap_state
*shmmap_s
;
653 lwkt_gettoken(&shm_token
);
654 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
655 shmmap_s
= kmalloc(size
, M_SHM
, M_WAITOK
);
656 bcopy((caddr_t
)p1
->p_vmspace
->vm_shm
, (caddr_t
)shmmap_s
, size
);
657 p2
->p_vmspace
->vm_shm
= (caddr_t
)shmmap_s
;
658 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++) {
659 if (shmmap_s
->shmid
!= -1)
660 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].shm_nattch
++;
662 lwkt_reltoken(&shm_token
);
666 shmexit(struct vmspace
*vm
)
668 struct shmmap_state
*base
, *shm
;
671 if ((base
= (struct shmmap_state
*)vm
->vm_shm
) != NULL
) {
673 lwkt_gettoken(&shm_token
);
674 for (i
= 0, shm
= base
; i
< shminfo
.shmseg
; i
++, shm
++) {
675 if (shm
->shmid
!= -1)
676 shm_delete_mapping(vm
, shm
);
679 lwkt_reltoken(&shm_token
);
687 struct shmid_ds
*newsegs
;
689 if (shmalloced
>= shminfo
.shmmni
)
692 newsegs
= kmalloc(shminfo
.shmmni
* sizeof(*newsegs
), M_SHM
, M_WAITOK
);
693 for (i
= 0; i
< shmalloced
; i
++)
694 bcopy(&shmsegs
[i
], &newsegs
[i
], sizeof(newsegs
[0]));
695 for (; i
< shminfo
.shmmni
; i
++) {
696 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
697 shmsegs
[i
].shm_perm
.seq
= 0;
699 kfree(shmsegs
, M_SHM
);
701 shmalloced
= shminfo
.shmmni
;
710 * If not overridden by a tunable set the maximum shm to
711 * 2/3 of main memory.
713 if (shminfo
.shmall
== 0)
714 shminfo
.shmall
= (size_t)vmstats
.v_page_count
* 2 / 3;
716 shminfo
.shmmax
= shminfo
.shmall
* PAGE_SIZE
;
717 shmalloced
= shminfo
.shmmni
;
718 shmsegs
= kmalloc(shmalloced
* sizeof(shmsegs
[0]), M_SHM
, M_WAITOK
);
719 for (i
= 0; i
< shmalloced
; i
++) {
720 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
721 shmsegs
[i
].shm_perm
.seq
= 0;
727 SYSINIT(sysv_shm
, SI_SUB_SYSV_SHM
, SI_ORDER_FIRST
, shminit
, NULL
);