Fix "ls: not found" problem during buildworld. mdate.sh script
[dragonfly.git] / sys / kern / sysv_shm.c
blob5498ad0e46b8bc4b41c56148381c9aacd2c64c76
1 /* $FreeBSD: src/sys/kern/sysv_shm.c,v 1.45.2.6 2002/10/22 20:45:03 fjoe Exp $ */
2 /* $DragonFly: src/sys/kern/sysv_shm.c,v 1.15 2004/07/23 14:07:46 joerg Exp $ */
3 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
5 /*
6 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Adam Glass and Charles
19 * Hannum.
20 * 4. The names of the authors may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include "opt_compat.h"
36 #include "opt_sysvipc.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
43 #include <sys/shm.h>
44 #include <sys/proc.h>
45 #include <sys/malloc.h>
46 #include <sys/mman.h>
47 #include <sys/stat.h>
48 #include <sys/sysent.h>
49 #include <sys/jail.h>
51 #include <vm/vm.h>
52 #include <vm/vm_param.h>
53 #include <sys/lock.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_pager.h>
60 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
62 struct oshmctl_args;
63 static int oshmctl (struct proc *p, struct oshmctl_args *uap);
65 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode);
66 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum);
68 /* XXX casting to (sy_call_t *) is bogus, as usual. */
69 static sy_call_t *shmcalls[] = {
70 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
71 (sy_call_t *)shmdt, (sy_call_t *)shmget,
72 (sy_call_t *)shmctl
75 #define SHMSEG_FREE 0x0200
76 #define SHMSEG_REMOVED 0x0400
77 #define SHMSEG_ALLOCATED 0x0800
78 #define SHMSEG_WANTED 0x1000
80 static int shm_last_free, shm_nused, shm_committed, shmalloced;
81 static struct shmid_ds *shmsegs;
83 struct shm_handle {
84 /* vm_offset_t kva; */
85 vm_object_t shm_object;
88 struct shmmap_state {
89 vm_offset_t va;
90 int shmid;
93 static void shm_deallocate_segment (struct shmid_ds *);
94 static int shm_find_segment_by_key (key_t);
95 static struct shmid_ds *shm_find_segment_by_shmid (int);
96 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *);
97 static void shmrealloc (void);
98 static void shminit (void *);
101 * Tuneable values
103 #ifndef SHMMAXPGS
104 #define SHMMAXPGS 8192 /* note: sysv shared memory is swap backed */
105 #endif
106 #ifndef SHMMAX
107 #define SHMMAX (SHMMAXPGS*PAGE_SIZE)
108 #endif
109 #ifndef SHMMIN
110 #define SHMMIN 1
111 #endif
112 #ifndef SHMMNI
113 #define SHMMNI 192
114 #endif
115 #ifndef SHMSEG
116 #define SHMSEG 128
117 #endif
118 #ifndef SHMALL
119 #define SHMALL (SHMMAXPGS)
120 #endif
122 struct shminfo shminfo = {
123 SHMMAX,
124 SHMMIN,
125 SHMMNI,
126 SHMSEG,
127 SHMALL
130 static int shm_use_phys;
132 TUNABLE_INT("kern.ipc.shmmin", &shminfo.shmmin);
133 TUNABLE_INT("kern.ipc.shmmni", &shminfo.shmmni);
134 TUNABLE_INT("kern.ipc.shmseg", &shminfo.shmseg);
135 TUNABLE_INT("kern.ipc.shmmaxpgs", &shminfo.shmall);
136 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys);
138 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
139 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
140 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, "");
141 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0, "");
142 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
143 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, "");
145 static int
146 shm_find_segment_by_key(key)
147 key_t key;
149 int i;
151 for (i = 0; i < shmalloced; i++)
152 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
153 shmsegs[i].shm_perm.key == key)
154 return i;
155 return -1;
158 static struct shmid_ds *
159 shm_find_segment_by_shmid(shmid)
160 int shmid;
162 int segnum;
163 struct shmid_ds *shmseg;
165 segnum = IPCID_TO_IX(shmid);
166 if (segnum < 0 || segnum >= shmalloced)
167 return NULL;
168 shmseg = &shmsegs[segnum];
169 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
170 != SHMSEG_ALLOCATED ||
171 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
172 return NULL;
173 return shmseg;
176 static void
177 shm_deallocate_segment(shmseg)
178 struct shmid_ds *shmseg;
180 struct shm_handle *shm_handle;
181 size_t size;
183 shm_handle = shmseg->shm_internal;
184 vm_object_deallocate(shm_handle->shm_object);
185 free((caddr_t)shm_handle, M_SHM);
186 shmseg->shm_internal = NULL;
187 size = round_page(shmseg->shm_segsz);
188 shm_committed -= btoc(size);
189 shm_nused--;
190 shmseg->shm_perm.mode = SHMSEG_FREE;
193 static int
194 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
196 struct shmid_ds *shmseg;
197 int segnum, result;
198 size_t size;
200 segnum = IPCID_TO_IX(shmmap_s->shmid);
201 shmseg = &shmsegs[segnum];
202 size = round_page(shmseg->shm_segsz);
203 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
204 if (result != KERN_SUCCESS)
205 return EINVAL;
206 shmmap_s->shmid = -1;
207 shmseg->shm_dtime = time_second;
208 if ((--shmseg->shm_nattch <= 0) &&
209 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
210 shm_deallocate_segment(shmseg);
211 shm_last_free = segnum;
213 return 0;
217 shmdt(struct shmdt_args *uap)
219 struct proc *p = curproc;
220 struct shmmap_state *shmmap_s;
221 int i;
223 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
224 return (ENOSYS);
226 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
227 if (shmmap_s == NULL)
228 return EINVAL;
229 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
230 if (shmmap_s->shmid != -1 &&
231 shmmap_s->va == (vm_offset_t)uap->shmaddr)
232 break;
233 if (i == shminfo.shmseg)
234 return EINVAL;
235 return shm_delete_mapping(p->p_vmspace, shmmap_s);
239 shmat(struct shmat_args *uap)
241 struct proc *p = curproc;
242 int error, i, flags;
243 struct shmid_ds *shmseg;
244 struct shmmap_state *shmmap_s = NULL;
245 struct shm_handle *shm_handle;
246 vm_offset_t attach_va;
247 vm_prot_t prot;
248 vm_size_t size;
249 int rv;
251 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
252 return (ENOSYS);
254 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
255 if (shmmap_s == NULL) {
256 size = shminfo.shmseg * sizeof(struct shmmap_state);
257 shmmap_s = malloc(size, M_SHM, M_WAITOK);
258 for (i = 0; i < shminfo.shmseg; i++)
259 shmmap_s[i].shmid = -1;
260 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
262 shmseg = shm_find_segment_by_shmid(uap->shmid);
263 if (shmseg == NULL)
264 return EINVAL;
265 error = ipcperm(p, &shmseg->shm_perm,
266 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
267 if (error)
268 return error;
269 for (i = 0; i < shminfo.shmseg; i++) {
270 if (shmmap_s->shmid == -1)
271 break;
272 shmmap_s++;
274 if (i >= shminfo.shmseg)
275 return EMFILE;
276 size = round_page(shmseg->shm_segsz);
277 #ifdef VM_PROT_READ_IS_EXEC
278 prot = VM_PROT_READ | VM_PROT_EXECUTE;
279 #else
280 prot = VM_PROT_READ;
281 #endif
282 if ((uap->shmflg & SHM_RDONLY) == 0)
283 prot |= VM_PROT_WRITE;
284 flags = MAP_ANON | MAP_SHARED;
285 if (uap->shmaddr) {
286 flags |= MAP_FIXED;
287 if (uap->shmflg & SHM_RND)
288 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
289 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
290 attach_va = (vm_offset_t)uap->shmaddr;
291 else
292 return EINVAL;
293 } else {
294 /* This is just a hint to vm_map_find() about where to put it. */
295 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + maxtsiz + maxdsiz);
298 shm_handle = shmseg->shm_internal;
299 vm_object_reference(shm_handle->shm_object);
300 rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
301 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
302 if (rv != KERN_SUCCESS) {
303 vm_object_deallocate(shm_handle->shm_object);
304 return ENOMEM;
306 vm_map_inherit(&p->p_vmspace->vm_map,
307 attach_va, attach_va + size, VM_INHERIT_SHARE);
309 shmmap_s->va = attach_va;
310 shmmap_s->shmid = uap->shmid;
311 shmseg->shm_lpid = p->p_pid;
312 shmseg->shm_atime = time_second;
313 shmseg->shm_nattch++;
314 uap->sysmsg_result = attach_va;
315 return 0;
318 struct oshmid_ds {
319 struct ipc_perm shm_perm; /* operation perms */
320 int shm_segsz; /* size of segment (bytes) */
321 ushort shm_cpid; /* pid, creator */
322 ushort shm_lpid; /* pid, last operation */
323 short shm_nattch; /* no. of current attaches */
324 time_t shm_atime; /* last attach time */
325 time_t shm_dtime; /* last detach time */
326 time_t shm_ctime; /* last change time */
327 void *shm_handle; /* internal handle for shm segment */
330 struct oshmctl_args {
331 struct sysmsg sysmsg;
332 int shmid;
333 int cmd;
334 struct oshmid_ds *ubuf;
337 static int
338 oshmctl(p, uap)
339 struct proc *p;
340 struct oshmctl_args *uap;
342 #ifdef COMPAT_43
343 int error;
344 struct shmid_ds *shmseg;
345 struct oshmid_ds outbuf;
347 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
348 return (ENOSYS);
350 shmseg = shm_find_segment_by_shmid(uap->shmid);
351 if (shmseg == NULL)
352 return EINVAL;
353 switch (uap->cmd) {
354 case IPC_STAT:
355 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
356 if (error)
357 return error;
358 outbuf.shm_perm = shmseg->shm_perm;
359 outbuf.shm_segsz = shmseg->shm_segsz;
360 outbuf.shm_cpid = shmseg->shm_cpid;
361 outbuf.shm_lpid = shmseg->shm_lpid;
362 outbuf.shm_nattch = shmseg->shm_nattch;
363 outbuf.shm_atime = shmseg->shm_atime;
364 outbuf.shm_dtime = shmseg->shm_dtime;
365 outbuf.shm_ctime = shmseg->shm_ctime;
366 outbuf.shm_handle = shmseg->shm_internal;
367 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
368 if (error)
369 return error;
370 break;
371 default:
372 /* XXX casting to (sy_call_t *) is bogus, as usual. */
373 return (shmctl((struct shmctl_args *)uap));
375 return 0;
376 #else
377 return EINVAL;
378 #endif
382 shmctl(struct shmctl_args *uap)
384 struct proc *p = curproc;
385 int error;
386 struct shmid_ds inbuf;
387 struct shmid_ds *shmseg;
389 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
390 return (ENOSYS);
392 shmseg = shm_find_segment_by_shmid(uap->shmid);
393 if (shmseg == NULL)
394 return EINVAL;
395 switch (uap->cmd) {
396 case IPC_STAT:
397 error = ipcperm(p, &shmseg->shm_perm, IPC_R);
398 if (error)
399 return error;
400 error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf));
401 if (error)
402 return error;
403 break;
404 case IPC_SET:
405 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
406 if (error)
407 return error;
408 error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf));
409 if (error)
410 return error;
411 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
412 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
413 shmseg->shm_perm.mode =
414 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
415 (inbuf.shm_perm.mode & ACCESSPERMS);
416 shmseg->shm_ctime = time_second;
417 break;
418 case IPC_RMID:
419 error = ipcperm(p, &shmseg->shm_perm, IPC_M);
420 if (error)
421 return error;
422 shmseg->shm_perm.key = IPC_PRIVATE;
423 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
424 if (shmseg->shm_nattch <= 0) {
425 shm_deallocate_segment(shmseg);
426 shm_last_free = IPCID_TO_IX(uap->shmid);
428 break;
429 #if 0
430 case SHM_LOCK:
431 case SHM_UNLOCK:
432 #endif
433 default:
434 return EINVAL;
436 return 0;
439 static int
440 shmget_existing(p, uap, mode, segnum)
441 struct proc *p;
442 struct shmget_args *uap;
443 int mode;
444 int segnum;
446 struct shmid_ds *shmseg;
447 int error;
449 shmseg = &shmsegs[segnum];
450 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
452 * This segment is in the process of being allocated. Wait
453 * until it's done, and look the key up again (in case the
454 * allocation failed or it was freed).
456 shmseg->shm_perm.mode |= SHMSEG_WANTED;
457 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0);
458 if (error)
459 return error;
460 return EAGAIN;
462 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
463 return EEXIST;
464 error = ipcperm(p, &shmseg->shm_perm, mode);
465 if (error)
466 return error;
467 if (uap->size && uap->size > shmseg->shm_segsz)
468 return EINVAL;
469 uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
470 return 0;
473 static int
474 shmget_allocate_segment(p, uap, mode)
475 struct proc *p;
476 struct shmget_args *uap;
477 int mode;
479 int i, segnum, shmid, size;
480 struct ucred *cred = p->p_ucred;
481 struct shmid_ds *shmseg;
482 struct shm_handle *shm_handle;
484 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
485 return EINVAL;
486 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
487 return ENOSPC;
488 size = round_page(uap->size);
489 if (shm_committed + btoc(size) > shminfo.shmall)
490 return ENOMEM;
491 if (shm_last_free < 0) {
492 shmrealloc(); /* maybe expand the shmsegs[] array */
493 for (i = 0; i < shmalloced; i++)
494 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
495 break;
496 if (i == shmalloced)
497 return ENOSPC;
498 segnum = i;
499 } else {
500 segnum = shm_last_free;
501 shm_last_free = -1;
503 shmseg = &shmsegs[segnum];
505 * In case we sleep in malloc(), mark the segment present but deleted
506 * so that noone else tries to create the same key.
508 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
509 shmseg->shm_perm.key = uap->key;
510 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
511 shm_handle = (struct shm_handle *)
512 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
513 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
516 * We make sure that we have allocated a pager before we need
517 * to.
519 if (shm_use_phys) {
520 shm_handle->shm_object =
521 vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
522 } else {
523 shm_handle->shm_object =
524 vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
526 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
527 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
529 shmseg->shm_internal = shm_handle;
530 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
531 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
532 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
533 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
534 shmseg->shm_segsz = uap->size;
535 shmseg->shm_cpid = p->p_pid;
536 shmseg->shm_lpid = shmseg->shm_nattch = 0;
537 shmseg->shm_atime = shmseg->shm_dtime = 0;
538 shmseg->shm_ctime = time_second;
539 shm_committed += btoc(size);
540 shm_nused++;
541 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
543 * Somebody else wanted this key while we were asleep. Wake
544 * them up now.
546 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
547 wakeup((caddr_t)shmseg);
549 uap->sysmsg_result = shmid;
550 return 0;
554 shmget(struct shmget_args *uap)
556 struct proc *p = curproc;
557 int segnum, mode, error;
559 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
560 return (ENOSYS);
562 mode = uap->shmflg & ACCESSPERMS;
563 if (uap->key != IPC_PRIVATE) {
564 again:
565 segnum = shm_find_segment_by_key(uap->key);
566 if (segnum >= 0) {
567 error = shmget_existing(p, uap, mode, segnum);
568 if (error == EAGAIN)
569 goto again;
570 return error;
572 if ((uap->shmflg & IPC_CREAT) == 0)
573 return ENOENT;
575 return shmget_allocate_segment(p, uap, mode);
579 * shmsys_args(int which, int a2, ...) (VARARGS)
582 shmsys(struct shmsys_args *uap)
584 struct proc *p = curproc;
585 unsigned int which = (unsigned int)uap->which;
586 int error;
588 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
589 return (ENOSYS);
591 if (which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
592 return EINVAL;
593 bcopy(&uap->a2, &uap->which,
594 sizeof(struct shmsys_args) - offsetof(struct shmsys_args, a2));
595 error = ((*shmcalls[which])(uap));
596 return(error);
599 void
600 shmfork(p1, p2)
601 struct proc *p1, *p2;
603 struct shmmap_state *shmmap_s;
604 size_t size;
605 int i;
607 size = shminfo.shmseg * sizeof(struct shmmap_state);
608 shmmap_s = malloc(size, M_SHM, M_WAITOK);
609 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
610 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
611 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
612 if (shmmap_s->shmid != -1)
613 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
616 void
617 shmexit(struct vmspace *vm)
619 struct shmmap_state *base, *shm;
620 int i;
622 if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) {
623 vm->vm_shm = NULL;
624 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
625 if (shm->shmid != -1)
626 shm_delete_mapping(vm, shm);
628 free(base, M_SHM);
632 static void
633 shmrealloc(void)
635 int i;
636 struct shmid_ds *newsegs;
638 if (shmalloced >= shminfo.shmmni)
639 return;
641 newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
642 if (newsegs == NULL)
643 return;
644 for (i = 0; i < shmalloced; i++)
645 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
646 for (; i < shminfo.shmmni; i++) {
647 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
648 shmsegs[i].shm_perm.seq = 0;
650 free(shmsegs, M_SHM);
651 shmsegs = newsegs;
652 shmalloced = shminfo.shmmni;
655 static void
656 shminit(dummy)
657 void *dummy;
659 int i;
661 shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
662 shmalloced = shminfo.shmmni;
663 shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
664 if (shmsegs == NULL)
665 panic("cannot allocate initial memory for sysvshm");
666 for (i = 0; i < shmalloced; i++) {
667 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
668 shmsegs[i].shm_perm.seq = 0;
670 shm_last_free = 0;
671 shm_nused = 0;
672 shm_committed = 0;
674 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL);