drm/i915: Use dev->pdev to get PCI device revisions
[dragonfly.git] / sys / kern / sysv_shm.c
blobf5f2d8a07d1567b3fe5300007c67301233c2f1ee
1 /*
2 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. All advertising materials mentioning features or use of this software
13 * must display the following acknowledgement:
14 * This product includes software developed by Adam Glass and Charles
15 * Hannum.
16 * 4. The names of the authors may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "opt_compat.h"
32 #include "opt_sysvipc.h"
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sysproto.h>
37 #include <sys/kernel.h>
38 #include <sys/sysctl.h>
39 #include <sys/shm.h>
40 #include <sys/proc.h>
41 #include <sys/malloc.h>
42 #include <sys/mman.h>
43 #include <sys/stat.h>
44 #include <sys/sysent.h>
45 #include <sys/jail.h>
47 #include <sys/mplock2.h>
49 #include <vm/vm.h>
50 #include <vm/vm_param.h>
51 #include <sys/lock.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_pager.h>
58 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
60 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode);
61 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum);
63 #define SHMSEG_FREE 0x0200
64 #define SHMSEG_REMOVED 0x0400
65 #define SHMSEG_ALLOCATED 0x0800
66 #define SHMSEG_WANTED 0x1000
68 static int shm_last_free, shm_committed, shmalloced;
69 int shm_nused;
70 static struct shmid_ds *shmsegs;
72 struct shm_handle {
73 /* vm_offset_t kva; */
74 vm_object_t shm_object;
77 struct shmmap_state {
78 vm_offset_t va;
79 int shmid;
82 static void shm_deallocate_segment (struct shmid_ds *);
83 static int shm_find_segment_by_key (key_t);
84 static struct shmid_ds *shm_find_segment_by_shmid (int);
85 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *);
86 static void shmrealloc (void);
87 static void shminit (void *);
90 * Tuneable values
92 #ifndef SHMMIN
93 #define SHMMIN 1
94 #endif
95 #ifndef SHMMNI
96 #define SHMMNI 512
97 #endif
98 #ifndef SHMSEG
99 #define SHMSEG 1024
100 #endif
102 struct shminfo shminfo = {
104 SHMMIN,
105 SHMMNI,
106 SHMSEG,
110 static int shm_allow_removed;
111 static int shm_use_phys = 1;
113 TUNABLE_LONG("kern.ipc.shmmin", &shminfo.shmmin);
114 TUNABLE_LONG("kern.ipc.shmmni", &shminfo.shmmni);
115 TUNABLE_LONG("kern.ipc.shmseg", &shminfo.shmseg);
116 TUNABLE_LONG("kern.ipc.shmmaxpgs", &shminfo.shmall);
117 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys);
119 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0,
120 "Max shared memory segment size");
121 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0,
122 "Min shared memory segment size");
123 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0,
124 "Max number of shared memory identifiers");
125 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0,
126 "Max shared memory segments per process");
127 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0,
128 "Max pages of shared memory");
129 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0,
130 "Use phys pager allocation instead of swap pager allocation");
131 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
132 &shm_allow_removed, 0,
133 "Enable/Disable attachment to attached segments marked for removal");
135 static int
136 shm_find_segment_by_key(key_t key)
138 int i;
140 for (i = 0; i < shmalloced; i++) {
141 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
142 shmsegs[i].shm_perm.key == key)
143 return i;
145 return -1;
148 static struct shmid_ds *
149 shm_find_segment_by_shmid(int shmid)
151 int segnum;
152 struct shmid_ds *shmseg;
154 segnum = IPCID_TO_IX(shmid);
155 if (segnum < 0 || segnum >= shmalloced)
156 return NULL;
157 shmseg = &shmsegs[segnum];
158 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
159 (!shm_allow_removed &&
160 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) ||
161 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) {
162 return NULL;
164 return shmseg;
167 static void
168 shm_deallocate_segment(struct shmid_ds *shmseg)
170 struct shm_handle *shm_handle;
171 size_t size;
173 shm_handle = shmseg->shm_internal;
174 vm_object_deallocate(shm_handle->shm_object);
175 kfree((caddr_t)shm_handle, M_SHM);
176 shmseg->shm_internal = NULL;
177 size = round_page(shmseg->shm_segsz);
178 shm_committed -= btoc(size);
179 shm_nused--;
180 shmseg->shm_perm.mode = SHMSEG_FREE;
183 static int
184 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
186 struct shmid_ds *shmseg;
187 int segnum, result;
188 size_t size;
190 segnum = IPCID_TO_IX(shmmap_s->shmid);
191 shmseg = &shmsegs[segnum];
192 size = round_page(shmseg->shm_segsz);
193 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
194 if (result != KERN_SUCCESS)
195 return EINVAL;
196 shmmap_s->shmid = -1;
197 shmseg->shm_dtime = time_second;
198 if ((--shmseg->shm_nattch <= 0) &&
199 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
200 shm_deallocate_segment(shmseg);
201 shm_last_free = segnum;
203 return 0;
207 * MPALMOSTSAFE
210 sys_shmdt(struct shmdt_args *uap)
212 struct thread *td = curthread;
213 struct proc *p = td->td_proc;
214 struct shmmap_state *shmmap_s;
215 long i;
216 int error;
218 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
219 return (ENOSYS);
221 get_mplock();
222 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
223 if (shmmap_s == NULL) {
224 error = EINVAL;
225 goto done;
227 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
228 if (shmmap_s->shmid != -1 &&
229 shmmap_s->va == (vm_offset_t)uap->shmaddr)
230 break;
232 if (i == shminfo.shmseg)
233 error = EINVAL;
234 else
235 error = shm_delete_mapping(p->p_vmspace, shmmap_s);
236 done:
237 rel_mplock();
238 return (error);
242 * MPALMOSTSAFE
245 sys_shmat(struct shmat_args *uap)
247 struct thread *td = curthread;
248 struct proc *p = td->td_proc;
249 int error, flags;
250 long i;
251 struct shmid_ds *shmseg;
252 struct shmmap_state *shmmap_s = NULL;
253 struct shm_handle *shm_handle;
254 vm_offset_t attach_va;
255 vm_prot_t prot;
256 vm_size_t size;
257 vm_size_t align;
258 int rv;
260 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
261 return (ENOSYS);
263 get_mplock();
264 again:
265 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
266 if (shmmap_s == NULL) {
267 size = shminfo.shmseg * sizeof(struct shmmap_state);
268 shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
269 for (i = 0; i < shminfo.shmseg; i++)
270 shmmap_s[i].shmid = -1;
271 if (p->p_vmspace->vm_shm != NULL) {
272 kfree(shmmap_s, M_SHM);
273 goto again;
275 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
277 shmseg = shm_find_segment_by_shmid(uap->shmid);
278 if (shmseg == NULL) {
279 error = EINVAL;
280 goto done;
282 error = ipcperm(p, &shmseg->shm_perm,
283 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
284 if (error)
285 goto done;
286 for (i = 0; i < shminfo.shmseg; i++) {
287 if (shmmap_s->shmid == -1)
288 break;
289 shmmap_s++;
291 if (i >= shminfo.shmseg) {
292 error = EMFILE;
293 goto done;
295 size = round_page(shmseg->shm_segsz);
296 #ifdef VM_PROT_READ_IS_EXEC
297 prot = VM_PROT_READ | VM_PROT_EXECUTE;
298 #else
299 prot = VM_PROT_READ;
300 #endif
301 if ((uap->shmflg & SHM_RDONLY) == 0)
302 prot |= VM_PROT_WRITE;
303 flags = MAP_ANON | MAP_SHARED;
304 if (uap->shmaddr) {
305 flags |= MAP_FIXED;
306 if (uap->shmflg & SHM_RND) {
307 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
308 } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) {
309 attach_va = (vm_offset_t)uap->shmaddr;
310 } else {
311 error = EINVAL;
312 goto done;
314 } else {
316 * This is just a hint to vm_map_find() about where to put it.
318 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr +
319 maxtsiz + maxdsiz);
323 * Handle alignment. For large memory maps it is possible
324 * that the MMU can optimize the page table so align anything
325 * that is a multiple of SEG_SIZE to SEG_SIZE.
327 if ((flags & MAP_FIXED) == 0 && (size & SEG_MASK) == 0)
328 align = SEG_SIZE;
329 else
330 align = PAGE_SIZE;
332 shm_handle = shmseg->shm_internal;
333 vm_object_hold(shm_handle->shm_object);
334 vm_object_chain_wait(shm_handle->shm_object, 0);
335 vm_object_reference_locked(shm_handle->shm_object);
336 rv = vm_map_find(&p->p_vmspace->vm_map,
337 shm_handle->shm_object, NULL,
338 0, &attach_va, size,
339 align,
340 ((flags & MAP_FIXED) ? 0 : 1),
341 VM_MAPTYPE_NORMAL,
342 prot, prot, 0);
343 vm_object_drop(shm_handle->shm_object);
344 if (rv != KERN_SUCCESS) {
345 vm_object_deallocate(shm_handle->shm_object);
346 error = ENOMEM;
347 goto done;
349 vm_map_inherit(&p->p_vmspace->vm_map,
350 attach_va, attach_va + size, VM_INHERIT_SHARE);
352 KKASSERT(shmmap_s->shmid == -1);
353 shmmap_s->va = attach_va;
354 shmmap_s->shmid = uap->shmid;
355 shmseg->shm_lpid = p->p_pid;
356 shmseg->shm_atime = time_second;
357 shmseg->shm_nattch++;
358 uap->sysmsg_resultp = (void *)attach_va;
359 error = 0;
360 done:
361 rel_mplock();
362 return error;
366 * MPALMOSTSAFE
369 sys_shmctl(struct shmctl_args *uap)
371 struct thread *td = curthread;
372 struct proc *p = td->td_proc;
373 int error;
374 struct shmid_ds inbuf;
375 struct shmid_ds *shmseg;
377 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
378 return (ENOSYS);
380 get_mplock();
381 shmseg = shm_find_segment_by_shmid(uap->shmid);
382 if (shmseg == NULL) {
383 error = EINVAL;
384 goto done;
387 switch (uap->cmd) {
388 case IPC_STAT:
389 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
390 if (error == 0)
391 error = copyout(shmseg, uap->buf, sizeof(inbuf));
392 break;
393 case IPC_SET:
394 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
395 if (error == 0)
396 error = copyin(uap->buf, &inbuf, sizeof(inbuf));
397 if (error == 0) {
398 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
399 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
400 shmseg->shm_perm.mode =
401 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
402 (inbuf.shm_perm.mode & ACCESSPERMS);
403 shmseg->shm_ctime = time_second;
405 break;
406 case IPC_RMID:
407 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
408 if (error == 0) {
409 shmseg->shm_perm.key = IPC_PRIVATE;
410 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
411 if (shmseg->shm_nattch <= 0) {
412 shm_deallocate_segment(shmseg);
413 shm_last_free = IPCID_TO_IX(uap->shmid);
416 break;
417 #if 0
418 case SHM_LOCK:
419 case SHM_UNLOCK:
420 #endif
421 default:
422 error = EINVAL;
423 break;
425 done:
426 rel_mplock();
427 return error;
430 static int
431 shmget_existing(struct proc *p, struct shmget_args *uap, int mode, int segnum)
433 struct shmid_ds *shmseg;
434 int error;
436 shmseg = &shmsegs[segnum];
437 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
439 * This segment is in the process of being allocated. Wait
440 * until it's done, and look the key up again (in case the
441 * allocation failed or it was freed).
443 shmseg->shm_perm.mode |= SHMSEG_WANTED;
444 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0);
445 if (error)
446 return error;
447 return EAGAIN;
449 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
450 return EEXIST;
451 error = ipcperm(p, &shmseg->shm_perm, mode);
452 if (error)
453 return error;
454 if (uap->size && uap->size > shmseg->shm_segsz)
455 return EINVAL;
456 uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
457 return 0;
460 static int
461 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode)
463 int i, segnum, shmid;
464 size_t size;
465 struct ucred *cred = p->p_ucred;
466 struct shmid_ds *shmseg;
467 struct shm_handle *shm_handle;
469 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
470 return EINVAL;
471 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
472 return ENOSPC;
473 size = round_page(uap->size);
474 if (shm_committed + btoc(size) > shminfo.shmall)
475 return ENOMEM;
476 if (shm_last_free < 0) {
477 shmrealloc(); /* maybe expand the shmsegs[] array */
478 for (i = 0; i < shmalloced; i++) {
479 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
480 break;
482 if (i == shmalloced)
483 return ENOSPC;
484 segnum = i;
485 } else {
486 segnum = shm_last_free;
487 shm_last_free = -1;
489 shmseg = &shmsegs[segnum];
491 * In case we sleep in malloc(), mark the segment present but deleted
492 * so that noone else tries to create the same key.
494 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
495 shmseg->shm_perm.key = uap->key;
496 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
497 shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
498 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
501 * We make sure that we have allocated a pager before we need
502 * to.
504 if (shm_use_phys) {
505 shm_handle->shm_object =
506 phys_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
507 } else {
508 shm_handle->shm_object =
509 swap_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
511 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
512 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
514 shmseg->shm_internal = shm_handle;
515 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
516 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
517 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
518 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
519 shmseg->shm_segsz = uap->size;
520 shmseg->shm_cpid = p->p_pid;
521 shmseg->shm_lpid = shmseg->shm_nattch = 0;
522 shmseg->shm_atime = shmseg->shm_dtime = 0;
523 shmseg->shm_ctime = time_second;
524 shm_committed += btoc(size);
525 shm_nused++;
528 * If a physical mapping is desired and we have a ton of free pages
529 * we pre-allocate the pages here in order to avoid on-the-fly
530 * allocation later. This has a big effect on database warm-up
531 * times since DFly supports concurrent page faults coming from the
532 * same VM object for pages which already exist.
534 * This can hang the kernel for a while so only do it if shm_use_phys
535 * is set to 2 or higher.
537 if (shm_use_phys > 1) {
538 vm_pindex_t pi, pmax;
539 vm_page_t m;
541 pmax = round_page(shmseg->shm_segsz) >> PAGE_SHIFT;
542 vm_object_hold(shm_handle->shm_object);
543 if (pmax > vmstats.v_free_count)
544 pmax = vmstats.v_free_count;
545 for (pi = 0; pi < pmax; ++pi) {
546 m = vm_page_grab(shm_handle->shm_object, pi,
547 VM_ALLOC_SYSTEM | VM_ALLOC_NULL_OK |
548 VM_ALLOC_ZERO);
549 if (m == NULL)
550 break;
551 vm_pager_get_page(shm_handle->shm_object, &m, 1);
552 vm_page_activate(m);
553 vm_page_wakeup(m);
554 lwkt_yield();
556 vm_object_drop(shm_handle->shm_object);
559 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
561 * Somebody else wanted this key while we were asleep. Wake
562 * them up now.
564 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
565 wakeup((caddr_t)shmseg);
567 uap->sysmsg_result = shmid;
568 return 0;
572 * MPALMOSTSAFE
575 sys_shmget(struct shmget_args *uap)
577 struct thread *td = curthread;
578 struct proc *p = td->td_proc;
579 int segnum, mode, error;
581 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
582 return (ENOSYS);
584 mode = uap->shmflg & ACCESSPERMS;
585 get_mplock();
587 if (uap->key != IPC_PRIVATE) {
588 again:
589 segnum = shm_find_segment_by_key(uap->key);
590 if (segnum >= 0) {
591 error = shmget_existing(p, uap, mode, segnum);
592 if (error == EAGAIN)
593 goto again;
594 goto done;
596 if ((uap->shmflg & IPC_CREAT) == 0) {
597 error = ENOENT;
598 goto done;
601 error = shmget_allocate_segment(p, uap, mode);
602 done:
603 rel_mplock();
604 return (error);
607 void
608 shmfork(struct proc *p1, struct proc *p2)
610 struct shmmap_state *shmmap_s;
611 size_t size;
612 int i;
614 get_mplock();
615 size = shminfo.shmseg * sizeof(struct shmmap_state);
616 shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
617 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
618 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
619 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
620 if (shmmap_s->shmid != -1)
621 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
623 rel_mplock();
626 void
627 shmexit(struct vmspace *vm)
629 struct shmmap_state *base, *shm;
630 int i;
632 if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) {
633 vm->vm_shm = NULL;
634 get_mplock();
635 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
636 if (shm->shmid != -1)
637 shm_delete_mapping(vm, shm);
639 kfree(base, M_SHM);
640 rel_mplock();
644 static void
645 shmrealloc(void)
647 int i;
648 struct shmid_ds *newsegs;
650 if (shmalloced >= shminfo.shmmni)
651 return;
653 newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
654 for (i = 0; i < shmalloced; i++)
655 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
656 for (; i < shminfo.shmmni; i++) {
657 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
658 shmsegs[i].shm_perm.seq = 0;
660 kfree(shmsegs, M_SHM);
661 shmsegs = newsegs;
662 shmalloced = shminfo.shmmni;
665 static void
666 shminit(void *dummy)
668 int i;
671 * If not overridden by a tunable set the maximum shm to
672 * 2/3 of main memory.
674 if (shminfo.shmall == 0)
675 shminfo.shmall = (size_t)vmstats.v_page_count * 2 / 3;
677 shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
678 shmalloced = shminfo.shmmni;
679 shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
680 for (i = 0; i < shmalloced; i++) {
681 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
682 shmsegs[i].shm_perm.seq = 0;
684 shm_last_free = 0;
685 shm_nused = 0;
686 shm_committed = 0;
688 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL);