1 /* $FreeBSD: src/sys/kern/sysv_sem.c,v 1.69 2004/03/17 09:37:13 cperciva Exp $ */
2 /* $DragonFly: src/sys/kern/sysv_sem.c,v 1.19 2008/01/06 16:55:51 swildner Exp $ */
5 * Implementation of SVID semaphores
7 * Author: Daniel Boulet
9 * This software is provided ``AS IS'' without any warranties of any kind.
12 #include "opt_sysvipc.h"
14 #include <sys/param.h>
15 #include <sys/systm.h>
16 #include <sys/sysproto.h>
17 #include <sys/kernel.h>
20 #include <sys/sysent.h>
21 #include <sys/sysctl.h>
22 #include <sys/malloc.h>
25 #include <sys/mplock2.h>
27 static MALLOC_DEFINE(M_SEM
, "sem", "SVID compatible semaphores");
29 static void seminit (void *);
31 static struct sem_undo
*semu_alloc (struct proc
*p
);
32 static int semundo_adjust (struct proc
*p
, struct sem_undo
**supptr
,
33 int semid
, int semnum
, int adjval
);
34 static void semundo_clear (int semid
, int semnum
);
36 /* XXX casting to (sy_call_t *) is bogus, as usual. */
37 static sy_call_t
*semcalls
[] = {
38 (sy_call_t
*)sys___semctl
, (sy_call_t
*)sys_semget
,
39 (sy_call_t
*)sys_semop
42 static int semtot
= 0;
43 static struct semid_ds
*sema
; /* semaphore id pool */
44 static struct sem
*sem
; /* semaphore pool */
45 static struct sem_undo
*semu_list
; /* list of active undo structures */
46 static int *semu
; /* undo structure pool */
49 u_short semval
; /* semaphore value */
50 pid_t sempid
; /* pid of last operation */
51 u_short semncnt
; /* # awaiting semval > cval */
52 u_short semzcnt
; /* # awaiting semval = 0 */
56 * Undo structure (one per process)
59 struct sem_undo
*un_next
; /* ptr to next active undo structure */
60 struct proc
*un_proc
; /* owner of this structure */
61 short un_cnt
; /* # of active entries */
63 short un_adjval
; /* adjust on exit values */
64 short un_num
; /* semaphore # */
65 int un_id
; /* semid */
66 } un_ent
[1]; /* undo entries */
70 * Configuration parameters
73 #define SEMMNI 10 /* # of semaphore identifiers */
76 #define SEMMNS 60 /* # of semaphores in system */
79 #define SEMUME 10 /* max # of undo entries per process */
82 #define SEMMNU 30 /* # of undo structures in system */
85 /* shouldn't need tuning */
87 #define SEMMAP 30 /* # of entries in semaphore map */
90 #define SEMMSL SEMMNS /* max # of semaphores per id */
93 #define SEMOPM 100 /* max # of operations per semop call */
96 #define SEMVMX 32767 /* semaphore maximum value */
97 #define SEMAEM 16384 /* adjust on exit max value */
100 * Due to the way semaphore memory is allocated, we have to ensure that
101 * SEMUSZ is properly aligned.
104 #define SEM_ALIGN(bytes) (((bytes) + (sizeof(long) - 1)) & ~(sizeof(long) - 1))
106 /* actual size of an undo structure */
107 #define SEMUSZ SEM_ALIGN(offsetof(struct sem_undo, un_ent[SEMUME]))
110 * Macro to find a particular sem_undo vector
112 #define SEMU(ix) ((struct sem_undo *)(((intptr_t)semu)+ix * seminfo.semusz))
115 * semaphore info struct
117 struct seminfo seminfo
= {
118 SEMMAP
, /* # of entries in semaphore map */
119 SEMMNI
, /* # of semaphore identifiers */
120 SEMMNS
, /* # of semaphores in system */
121 SEMMNU
, /* # of undo structures in system */
122 SEMMSL
, /* max # of semaphores per id */
123 SEMOPM
, /* max # of operations per semop call */
124 SEMUME
, /* max # of undo entries per process */
125 SEMUSZ
, /* size in bytes of undo structure */
126 SEMVMX
, /* semaphore maximum value */
127 SEMAEM
/* adjust on exit max value */
130 TUNABLE_INT("kern.ipc.semmap", &seminfo
.semmap
);
131 TUNABLE_INT("kern.ipc.semmni", &seminfo
.semmni
);
132 TUNABLE_INT("kern.ipc.semmns", &seminfo
.semmns
);
133 TUNABLE_INT("kern.ipc.semmnu", &seminfo
.semmnu
);
134 TUNABLE_INT("kern.ipc.semmsl", &seminfo
.semmsl
);
135 TUNABLE_INT("kern.ipc.semopm", &seminfo
.semopm
);
136 TUNABLE_INT("kern.ipc.semume", &seminfo
.semume
);
137 TUNABLE_INT("kern.ipc.semusz", &seminfo
.semusz
);
138 TUNABLE_INT("kern.ipc.semvmx", &seminfo
.semvmx
);
139 TUNABLE_INT("kern.ipc.semaem", &seminfo
.semaem
);
141 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semmap
, CTLFLAG_RW
, &seminfo
.semmap
, 0, "");
142 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semmni
, CTLFLAG_RD
, &seminfo
.semmni
, 0, "");
143 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semmns
, CTLFLAG_RD
, &seminfo
.semmns
, 0, "");
144 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semmnu
, CTLFLAG_RD
, &seminfo
.semmnu
, 0, "");
145 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semmsl
, CTLFLAG_RW
, &seminfo
.semmsl
, 0, "");
146 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semopm
, CTLFLAG_RD
, &seminfo
.semopm
, 0, "");
147 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semume
, CTLFLAG_RD
, &seminfo
.semume
, 0, "");
148 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semusz
, CTLFLAG_RD
, &seminfo
.semusz
, 0, "");
149 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semvmx
, CTLFLAG_RW
, &seminfo
.semvmx
, 0, "");
150 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semaem
, CTLFLAG_RW
, &seminfo
.semaem
, 0, "");
153 RO seminfo
.semmap
/* SEMMAP unused */
156 RO seminfo
.semmnu
/* undo entries per system */
158 RO seminfo
.semopm
/* SEMOPM unused */
160 RO seminfo
.semusz
/* param - derived from SEMUME for per-proc sizeof */
161 RO seminfo
.semvmx
/* SEMVMX unused - user param */
162 RO seminfo
.semaem
/* SEMAEM unused - user param */
170 sem
= kmalloc(sizeof(struct sem
) * seminfo
.semmns
, M_SEM
, M_WAITOK
);
171 sema
= kmalloc(sizeof(struct semid_ds
) * seminfo
.semmni
, M_SEM
, M_WAITOK
);
172 semu
= kmalloc(seminfo
.semmnu
* seminfo
.semusz
, M_SEM
, M_WAITOK
);
174 for (i
= 0; i
< seminfo
.semmni
; i
++) {
175 sema
[i
].sem_base
= 0;
176 sema
[i
].sem_perm
.mode
= 0;
178 for (i
= 0; i
< seminfo
.semmnu
; i
++) {
179 struct sem_undo
*suptr
= SEMU(i
);
180 suptr
->un_proc
= NULL
;
184 SYSINIT(sysv_sem
, SI_SUB_SYSV_SEM
, SI_ORDER_FIRST
, seminit
, NULL
)
187 * Entry point for all SEM calls
189 * semsys_args(int which, a2, a3, ...) (VARARGS)
194 sys_semsys(struct semsys_args
*uap
)
196 struct thread
*td
= curthread
;
197 unsigned int which
= (unsigned int)uap
->which
;
200 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
203 if (which
>= sizeof(semcalls
)/sizeof(semcalls
[0]))
205 bcopy(&uap
->a2
, &uap
->which
,
206 sizeof(struct semsys_args
) - offsetof(struct semsys_args
, a2
));
208 error
= (*semcalls
[which
])(uap
);
214 * Allocate a new sem_undo structure for a process
215 * (returns ptr to structure or NULL if no more room)
218 static struct sem_undo
*
219 semu_alloc(struct proc
*p
)
222 struct sem_undo
*suptr
;
223 struct sem_undo
**supptr
;
227 * Try twice to allocate something.
228 * (we'll purge any empty structures after the first pass so
229 * two passes are always enough)
232 for (attempt
= 0; attempt
< 2; attempt
++) {
234 * Look for a free structure.
235 * Fill it in and return it if we find one.
238 for (i
= 0; i
< seminfo
.semmnu
; i
++) {
240 if (suptr
->un_proc
== NULL
) {
241 suptr
->un_next
= semu_list
;
250 * We didn't find a free one, if this is the first attempt
251 * then try to free some structures.
255 /* All the structures are in use - try to free some */
256 int did_something
= 0;
259 while ((suptr
= *supptr
) != NULL
) {
260 if (suptr
->un_cnt
== 0) {
261 suptr
->un_proc
= NULL
;
262 *supptr
= suptr
->un_next
;
265 supptr
= &(suptr
->un_next
);
268 /* If we didn't free anything then just give-up */
273 * The second pass failed even though we freed
274 * something after the first pass!
275 * This is IMPOSSIBLE!
277 panic("semu_alloc - second attempt failed");
284 * Adjust a particular entry for a particular proc
288 semundo_adjust(struct proc
*p
, struct sem_undo
**supptr
, int semid
, int semnum
,
291 struct sem_undo
*suptr
;
295 /* Look for and remember the sem_undo if the caller doesn't provide
300 for (suptr
= semu_list
; suptr
!= NULL
;
301 suptr
= suptr
->un_next
) {
302 if (suptr
->un_proc
== p
) {
310 suptr
= semu_alloc(p
);
318 * Look for the requested entry and adjust it (delete if adjval becomes
321 sunptr
= &suptr
->un_ent
[0];
322 for (i
= 0; i
< suptr
->un_cnt
; i
++, sunptr
++) {
323 if (sunptr
->un_id
!= semid
|| sunptr
->un_num
!= semnum
)
326 sunptr
->un_adjval
= 0;
328 sunptr
->un_adjval
+= adjval
;
329 if (sunptr
->un_adjval
== 0) {
331 if (i
< suptr
->un_cnt
)
333 suptr
->un_ent
[suptr
->un_cnt
];
338 /* Didn't find the right entry - create it */
341 if (suptr
->un_cnt
!= seminfo
.semume
) {
342 sunptr
= &suptr
->un_ent
[suptr
->un_cnt
];
344 sunptr
->un_adjval
= adjval
;
345 sunptr
->un_id
= semid
; sunptr
->un_num
= semnum
;
352 semundo_clear(int semid
, int semnum
)
354 struct sem_undo
*suptr
;
356 for (suptr
= semu_list
; suptr
!= NULL
; suptr
= suptr
->un_next
) {
357 struct undo
*sunptr
= &suptr
->un_ent
[0];
360 while (i
< suptr
->un_cnt
) {
361 if (sunptr
->un_id
== semid
) {
362 if (semnum
== -1 || sunptr
->un_num
== semnum
) {
364 if (i
< suptr
->un_cnt
) {
366 suptr
->un_ent
[suptr
->un_cnt
];
379 * Note that the user-mode half of this passes a union, not a pointer
384 sys___semctl(struct __semctl_args
*uap
)
386 struct thread
*td
= curthread
;
387 int semid
= uap
->semid
;
388 int semnum
= uap
->semnum
;
390 union semun
*arg
= uap
->arg
;
391 union semun real_arg
;
392 struct ucred
*cred
= td
->td_ucred
;
394 struct semid_ds sbuf
;
395 struct semid_ds
*semaptr
;
398 kprintf("call to semctl(%d, %d, %d, 0x%x)\n", semid
, semnum
, cmd
, arg
);
401 if (!jail_sysvipc_allowed
&& cred
->cr_prison
!= NULL
)
404 semid
= IPCID_TO_IX(semid
);
405 if (semid
< 0 || semid
>= seminfo
.semmni
)
409 semaptr
= &sema
[semid
];
410 if ((semaptr
->sem_perm
.mode
& SEM_ALLOC
) == 0 ||
411 semaptr
->sem_perm
.seq
!= IPCID_TO_SEQ(uap
->semid
)) {
421 if ((eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_M
)) != 0)
423 semaptr
->sem_perm
.cuid
= cred
->cr_uid
;
424 semaptr
->sem_perm
.uid
= cred
->cr_uid
;
425 semtot
-= semaptr
->sem_nsems
;
426 for (i
= semaptr
->sem_base
- sem
; i
< semtot
; i
++)
427 sem
[i
] = sem
[i
+ semaptr
->sem_nsems
];
428 for (i
= 0; i
< seminfo
.semmni
; i
++) {
429 if ((sema
[i
].sem_perm
.mode
& SEM_ALLOC
) &&
430 sema
[i
].sem_base
> semaptr
->sem_base
)
431 sema
[i
].sem_base
-= semaptr
->sem_nsems
;
433 semaptr
->sem_perm
.mode
= 0;
434 semundo_clear(semid
, -1);
435 wakeup((caddr_t
)semaptr
);
439 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_M
);
442 if ((eval
= copyin(arg
, &real_arg
, sizeof(real_arg
))) != 0)
444 if ((eval
= copyin(real_arg
.buf
, (caddr_t
)&sbuf
,
445 sizeof(sbuf
))) != 0) {
448 semaptr
->sem_perm
.uid
= sbuf
.sem_perm
.uid
;
449 semaptr
->sem_perm
.gid
= sbuf
.sem_perm
.gid
;
450 semaptr
->sem_perm
.mode
= (semaptr
->sem_perm
.mode
& ~0777) |
451 (sbuf
.sem_perm
.mode
& 0777);
452 semaptr
->sem_ctime
= time_second
;
456 if ((eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_R
)))
458 if ((eval
= copyin(arg
, &real_arg
, sizeof(real_arg
))) != 0)
460 eval
= copyout(semaptr
, real_arg
.buf
, sizeof(struct semid_ds
));
464 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_R
);
467 if (semnum
< 0 || semnum
>= semaptr
->sem_nsems
) {
471 rval
= semaptr
->sem_base
[semnum
].semncnt
;
475 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_R
);
478 if (semnum
< 0 || semnum
>= semaptr
->sem_nsems
) {
482 rval
= semaptr
->sem_base
[semnum
].sempid
;
486 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_R
);
489 if (semnum
< 0 || semnum
>= semaptr
->sem_nsems
) {
493 rval
= semaptr
->sem_base
[semnum
].semval
;
497 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_R
);
500 if ((eval
= copyin(arg
, &real_arg
, sizeof(real_arg
))) != 0)
502 for (i
= 0; i
< semaptr
->sem_nsems
; i
++) {
503 eval
= copyout(&semaptr
->sem_base
[i
].semval
,
505 sizeof(real_arg
.array
[0]));
512 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_R
);
515 if (semnum
< 0 || semnum
>= semaptr
->sem_nsems
) {
519 rval
= semaptr
->sem_base
[semnum
].semzcnt
;
523 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_W
);
526 if (semnum
< 0 || semnum
>= semaptr
->sem_nsems
) {
530 if ((eval
= copyin(arg
, &real_arg
, sizeof(real_arg
))) != 0)
532 semaptr
->sem_base
[semnum
].semval
= real_arg
.val
;
533 semundo_clear(semid
, semnum
);
534 wakeup((caddr_t
)semaptr
);
538 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_W
);
541 if ((eval
= copyin(arg
, &real_arg
, sizeof(real_arg
))) != 0)
543 for (i
= 0; i
< semaptr
->sem_nsems
; i
++) {
544 eval
= copyin(&real_arg
.array
[i
],
545 (caddr_t
)&semaptr
->sem_base
[i
].semval
,
546 sizeof(real_arg
.array
[0]));
550 semundo_clear(semid
, -1);
551 wakeup((caddr_t
)semaptr
);
561 uap
->sysmsg_result
= rval
;
569 sys_semget(struct semget_args
*uap
)
571 struct thread
*td
= curthread
;
574 int nsems
= uap
->nsems
;
575 int semflg
= uap
->semflg
;
576 struct ucred
*cred
= td
->td_ucred
;
579 kprintf("semget(0x%x, %d, 0%o)\n", key
, nsems
, semflg
);
582 if (!jail_sysvipc_allowed
&& cred
->cr_prison
!= NULL
)
588 if (key
!= IPC_PRIVATE
) {
589 for (semid
= 0; semid
< seminfo
.semmni
; semid
++) {
590 if ((sema
[semid
].sem_perm
.mode
& SEM_ALLOC
) &&
591 sema
[semid
].sem_perm
.key
== key
)
594 if (semid
< seminfo
.semmni
) {
596 kprintf("found public key\n");
598 if ((eval
= ipcperm(td
->td_proc
,
599 &sema
[semid
].sem_perm
,
603 if (nsems
> 0 && sema
[semid
].sem_nsems
< nsems
) {
605 kprintf("too small\n");
610 if ((semflg
& IPC_CREAT
) && (semflg
& IPC_EXCL
)) {
612 kprintf("not exclusive\n");
622 kprintf("need to allocate the semid_ds\n");
624 if (key
== IPC_PRIVATE
|| (semflg
& IPC_CREAT
)) {
625 if (nsems
<= 0 || nsems
> seminfo
.semmsl
) {
627 kprintf("nsems out of range (0<%d<=%d)\n", nsems
,
633 if (nsems
> seminfo
.semmns
- semtot
) {
635 kprintf("not enough semaphores left (need %d, got %d)\n",
636 nsems
, seminfo
.semmns
- semtot
);
641 for (semid
= 0; semid
< seminfo
.semmni
; semid
++) {
642 if ((sema
[semid
].sem_perm
.mode
& SEM_ALLOC
) == 0)
645 if (semid
== seminfo
.semmni
) {
647 kprintf("no more semid_ds's available\n");
653 kprintf("semid %d is available\n", semid
);
655 sema
[semid
].sem_perm
.key
= key
;
656 sema
[semid
].sem_perm
.cuid
= cred
->cr_uid
;
657 sema
[semid
].sem_perm
.uid
= cred
->cr_uid
;
658 sema
[semid
].sem_perm
.cgid
= cred
->cr_gid
;
659 sema
[semid
].sem_perm
.gid
= cred
->cr_gid
;
660 sema
[semid
].sem_perm
.mode
= (semflg
& 0777) | SEM_ALLOC
;
661 sema
[semid
].sem_perm
.seq
=
662 (sema
[semid
].sem_perm
.seq
+ 1) & 0x7fff;
663 sema
[semid
].sem_nsems
= nsems
;
664 sema
[semid
].sem_otime
= 0;
665 sema
[semid
].sem_ctime
= time_second
;
666 sema
[semid
].sem_base
= &sem
[semtot
];
668 bzero(sema
[semid
].sem_base
,
669 sizeof(sema
[semid
].sem_base
[0])*nsems
);
671 kprintf("sembase = 0x%x, next = 0x%x\n", sema
[semid
].sem_base
,
676 kprintf("didn't find it and wasn't asked to create it\n");
683 uap
->sysmsg_result
= IXSEQ_TO_IPCID(semid
,
684 sema
[semid
].sem_perm
);
694 sys_semop(struct semop_args
*uap
)
696 struct thread
*td
= curthread
;
697 int semid
= uap
->semid
;
698 u_int nsops
= uap
->nsops
;
699 struct sembuf sops
[MAX_SOPS
];
700 struct semid_ds
*semaptr
;
701 struct sembuf
*sopptr
;
703 struct sem_undo
*suptr
= NULL
;
705 int do_wakeup
, do_undos
;
708 kprintf("call to semop(%d, 0x%x, %u)\n", semid
, sops
, nsops
);
711 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
715 semid
= IPCID_TO_IX(semid
); /* Convert back to zero origin */
717 if (semid
< 0 || semid
>= seminfo
.semmni
) {
722 semaptr
= &sema
[semid
];
723 if ((semaptr
->sem_perm
.mode
& SEM_ALLOC
) == 0) {
727 if (semaptr
->sem_perm
.seq
!= IPCID_TO_SEQ(uap
->semid
)) {
732 if ((eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_W
))) {
734 kprintf("eval = %d from ipaccess\n", eval
);
739 if (nsops
> MAX_SOPS
) {
741 kprintf("too many sops (max=%d, nsops=%u)\n", MAX_SOPS
, nsops
);
747 if ((eval
= copyin(uap
->sops
, &sops
, nsops
* sizeof(sops
[0]))) != 0) {
749 kprintf("eval = %d from copyin(%08x, %08x, %u)\n", eval
,
750 uap
->sops
, &sops
, nsops
* sizeof(sops
[0]));
756 * Loop trying to satisfy the vector of requests.
757 * If we reach a point where we must wait, any requests already
758 * performed are rolled back and we go to sleep until some other
759 * process wakes us up. At this point, we start all over again.
761 * This ensures that from the perspective of other tasks, a set
762 * of requests is atomic (never partially satisfied).
769 for (i
= 0; i
< nsops
; i
++) {
772 if (sopptr
->sem_num
>= semaptr
->sem_nsems
) {
777 semptr
= &semaptr
->sem_base
[sopptr
->sem_num
];
780 kprintf("semop: semaptr=%x, sem_base=%x, semptr=%x, sem[%d]=%d : op=%d, flag=%s\n",
781 semaptr
, semaptr
->sem_base
, semptr
,
782 sopptr
->sem_num
, semptr
->semval
, sopptr
->sem_op
,
783 (sopptr
->sem_flg
& IPC_NOWAIT
) ? "nowait" : "wait");
786 if (sopptr
->sem_op
< 0) {
787 if (semptr
->semval
+ sopptr
->sem_op
< 0) {
789 kprintf("semop: can't do it now\n");
793 semptr
->semval
+= sopptr
->sem_op
;
794 if (semptr
->semval
== 0 &&
798 if (sopptr
->sem_flg
& SEM_UNDO
)
800 } else if (sopptr
->sem_op
== 0) {
801 if (semptr
->semval
> 0) {
803 kprintf("semop: not zero now\n");
808 if (semptr
->semncnt
> 0)
810 semptr
->semval
+= sopptr
->sem_op
;
811 if (sopptr
->sem_flg
& SEM_UNDO
)
817 * Did we get through the entire vector?
823 * No ... rollback anything that we've already done
826 kprintf("semop: rollback 0 through %d\n", i
-1);
828 for (j
= 0; j
< i
; j
++)
829 semaptr
->sem_base
[sops
[j
].sem_num
].semval
-=
833 * If the request that we couldn't satisfy has the
834 * NOWAIT flag set then return with EAGAIN.
836 if (sopptr
->sem_flg
& IPC_NOWAIT
) {
841 if (sopptr
->sem_op
== 0)
847 kprintf("semop: good night!\n");
849 eval
= tsleep((caddr_t
)semaptr
, PCATCH
, "semwait", 0);
851 kprintf("semop: good morning (eval=%d)!\n", eval
);
854 suptr
= NULL
; /* sem_undo may have been reallocated */
856 /* return code is checked below, after sem[nz]cnt-- */
859 * Make sure that the semaphore still exists
861 if ((semaptr
->sem_perm
.mode
& SEM_ALLOC
) == 0 ||
862 semaptr
->sem_perm
.seq
!= IPCID_TO_SEQ(uap
->semid
)) {
868 * The semaphore is still alive. Readjust the count of
871 if (sopptr
->sem_op
== 0)
877 * Is it really morning, or was our sleep interrupted?
878 * (Delayed check of tsleep() return code because we
879 * need to decrement sem[nz]cnt either way.)
886 kprintf("semop: good morning!\n");
892 * Process any SEM_UNDO requests.
895 for (i
= 0; i
< nsops
; i
++) {
897 * We only need to deal with SEM_UNDO's for non-zero
902 if ((sops
[i
].sem_flg
& SEM_UNDO
) == 0)
904 adjval
= sops
[i
].sem_op
;
907 eval
= semundo_adjust(td
->td_proc
, &suptr
, semid
,
908 sops
[i
].sem_num
, -adjval
);
913 * Oh-Oh! We ran out of either sem_undo's or undo's.
914 * Rollback the adjustments to this point and then
915 * rollback the semaphore ups and down so we can return
916 * with an error with all structures restored. We
917 * rollback the undo's in the exact reverse order that
918 * we applied them. This guarantees that we won't run
919 * out of space as we roll things back out.
921 for (j
= i
- 1; j
>= 0; j
--) {
922 if ((sops
[j
].sem_flg
& SEM_UNDO
) == 0)
924 adjval
= sops
[j
].sem_op
;
927 if (semundo_adjust(td
->td_proc
, &suptr
, semid
,
928 sops
[j
].sem_num
, adjval
) != 0)
929 panic("semop - can't undo undos");
932 for (j
= 0; j
< nsops
; j
++)
933 semaptr
->sem_base
[sops
[j
].sem_num
].semval
-=
937 kprintf("eval = %d from semundo_adjust\n", eval
);
940 } /* loop through the sops */
941 } /* if (do_undos) */
943 /* We're definitely done - set the sempid's */
944 for (i
= 0; i
< nsops
; i
++) {
946 semptr
= &semaptr
->sem_base
[sopptr
->sem_num
];
947 semptr
->sempid
= td
->td_proc
->p_pid
;
950 /* Do a wakeup if any semaphore was up'd. */
953 kprintf("semop: doing wakeup\n");
955 wakeup((caddr_t
)semaptr
);
957 kprintf("semop: back from wakeup\n");
961 kprintf("semop: done\n");
963 uap
->sysmsg_result
= 0;
971 * Go through the undo structures for this process and apply the adjustments to
975 semexit(struct proc
*p
)
977 struct sem_undo
*suptr
;
978 struct sem_undo
**supptr
;
984 * Go through the chain of undo vectors looking for one
985 * associated with this process.
988 for (supptr
= &semu_list
; (suptr
= *supptr
) != NULL
;
989 supptr
= &suptr
->un_next
) {
990 if (suptr
->un_proc
== p
)
998 kprintf("proc @%08x has undo structure with %d entries\n", p
,
1003 * If there are any active undo elements then process them.
1005 if (suptr
->un_cnt
> 0) {
1008 for (ix
= 0; ix
< suptr
->un_cnt
; ix
++) {
1009 int semid
= suptr
->un_ent
[ix
].un_id
;
1010 int semnum
= suptr
->un_ent
[ix
].un_num
;
1011 int adjval
= suptr
->un_ent
[ix
].un_adjval
;
1012 struct semid_ds
*semaptr
;
1014 semaptr
= &sema
[semid
];
1015 if ((semaptr
->sem_perm
.mode
& SEM_ALLOC
) == 0)
1016 panic("semexit - semid not allocated");
1017 if (semnum
>= semaptr
->sem_nsems
)
1018 panic("semexit - semnum out of range");
1021 kprintf("semexit: %08x id=%d num=%d(adj=%d) ; sem=%d\n",
1022 suptr
->un_proc
, suptr
->un_ent
[ix
].un_id
,
1023 suptr
->un_ent
[ix
].un_num
,
1024 suptr
->un_ent
[ix
].un_adjval
,
1025 semaptr
->sem_base
[semnum
].semval
);
1029 if (semaptr
->sem_base
[semnum
].semval
< -adjval
)
1030 semaptr
->sem_base
[semnum
].semval
= 0;
1032 semaptr
->sem_base
[semnum
].semval
+=
1035 semaptr
->sem_base
[semnum
].semval
+= adjval
;
1037 wakeup((caddr_t
)semaptr
);
1039 kprintf("semexit: back from wakeup\n");
1045 * Deallocate the undo vector.
1048 kprintf("removing vector\n");
1050 suptr
->un_proc
= NULL
;
1051 *supptr
= suptr
->un_next
;