1 /* $FreeBSD: src/sys/kern/sysv_sem.c,v 1.69 2004/03/17 09:37:13 cperciva Exp $ */
2 /* $DragonFly: src/sys/kern/sysv_sem.c,v 1.19 2008/01/06 16:55:51 swildner Exp $ */
5 * Implementation of SVID semaphores
7 * Author: Daniel Boulet
9 * This software is provided ``AS IS'' without any warranties of any kind.
12 #include "opt_sysvipc.h"
14 #include <sys/param.h>
15 #include <sys/systm.h>
16 #include <sys/sysproto.h>
17 #include <sys/kernel.h>
20 #include <sys/sysent.h>
21 #include <sys/sysctl.h>
22 #include <sys/malloc.h>
25 #include <sys/mplock2.h>
27 static MALLOC_DEFINE(M_SEM
, "sem", "SVID compatible semaphores");
29 static void seminit (void *);
31 static struct sem_undo
*semu_alloc (struct proc
*p
);
32 static int semundo_adjust (struct proc
*p
, struct sem_undo
**supptr
,
33 int semid
, int semnum
, int adjval
);
34 static void semundo_clear (int semid
, int semnum
);
36 /* XXX casting to (sy_call_t *) is bogus, as usual. */
37 static sy_call_t
*semcalls
[] = {
38 (sy_call_t
*)sys___semctl
, (sy_call_t
*)sys_semget
,
39 (sy_call_t
*)sys_semop
42 static int semtot
= 0;
43 static struct semid_ds
*sema
; /* semaphore id pool */
44 static struct sem
*sem
; /* semaphore pool */
45 static struct sem_undo
*semu_list
; /* list of active undo structures */
46 static int *semu
; /* undo structure pool */
49 u_short semval
; /* semaphore value */
50 pid_t sempid
; /* pid of last operation */
51 u_short semncnt
; /* # awaiting semval > cval */
52 u_short semzcnt
; /* # awaiting semval = 0 */
56 * Undo structure (one per process)
59 struct sem_undo
*un_next
; /* ptr to next active undo structure */
60 struct proc
*un_proc
; /* owner of this structure */
61 short un_cnt
; /* # of active entries */
63 short un_adjval
; /* adjust on exit values */
64 short un_num
; /* semaphore # */
65 int un_id
; /* semid */
66 } un_ent
[1]; /* undo entries */
70 * Configuration parameters
73 #define SEMMNI 10 /* # of semaphore identifiers */
76 #define SEMMNS 60 /* # of semaphores in system */
79 #define SEMUME 10 /* max # of undo entries per process */
82 #define SEMMNU 30 /* # of undo structures in system */
85 /* shouldn't need tuning */
87 #define SEMMAP 30 /* # of entries in semaphore map */
90 #define SEMMSL SEMMNS /* max # of semaphores per id */
93 #define SEMOPM 100 /* max # of operations per semop call */
96 #define SEMVMX 32767 /* semaphore maximum value */
97 #define SEMAEM 16384 /* adjust on exit max value */
100 * Due to the way semaphore memory is allocated, we have to ensure that
101 * SEMUSZ is properly aligned.
104 #define SEM_ALIGN(bytes) (((bytes) + (sizeof(long) - 1)) & ~(sizeof(long) - 1))
106 /* actual size of an undo structure */
107 #define SEMUSZ SEM_ALIGN(offsetof(struct sem_undo, un_ent[SEMUME]))
110 * Macro to find a particular sem_undo vector
112 #define SEMU(ix) ((struct sem_undo *)(((intptr_t)semu)+ix * seminfo.semusz))
115 * semaphore info struct
117 struct seminfo seminfo
= {
118 SEMMAP
, /* # of entries in semaphore map */
119 SEMMNI
, /* # of semaphore identifiers */
120 SEMMNS
, /* # of semaphores in system */
121 SEMMNU
, /* # of undo structures in system */
122 SEMMSL
, /* max # of semaphores per id */
123 SEMOPM
, /* max # of operations per semop call */
124 SEMUME
, /* max # of undo entries per process */
125 SEMUSZ
, /* size in bytes of undo structure */
126 SEMVMX
, /* semaphore maximum value */
127 SEMAEM
/* adjust on exit max value */
130 TUNABLE_INT("kern.ipc.semmap", &seminfo
.semmap
);
131 TUNABLE_INT("kern.ipc.semmni", &seminfo
.semmni
);
132 TUNABLE_INT("kern.ipc.semmns", &seminfo
.semmns
);
133 TUNABLE_INT("kern.ipc.semmnu", &seminfo
.semmnu
);
134 TUNABLE_INT("kern.ipc.semmsl", &seminfo
.semmsl
);
135 TUNABLE_INT("kern.ipc.semopm", &seminfo
.semopm
);
136 TUNABLE_INT("kern.ipc.semume", &seminfo
.semume
);
137 TUNABLE_INT("kern.ipc.semusz", &seminfo
.semusz
);
138 TUNABLE_INT("kern.ipc.semvmx", &seminfo
.semvmx
);
139 TUNABLE_INT("kern.ipc.semaem", &seminfo
.semaem
);
141 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semmap
, CTLFLAG_RW
, &seminfo
.semmap
, 0, "");
142 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semmni
, CTLFLAG_RD
, &seminfo
.semmni
, 0, "");
143 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semmns
, CTLFLAG_RD
, &seminfo
.semmns
, 0, "");
144 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semmnu
, CTLFLAG_RD
, &seminfo
.semmnu
, 0, "");
145 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semmsl
, CTLFLAG_RW
, &seminfo
.semmsl
, 0, "");
146 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semopm
, CTLFLAG_RD
, &seminfo
.semopm
, 0, "");
147 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semume
, CTLFLAG_RD
, &seminfo
.semume
, 0, "");
148 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semusz
, CTLFLAG_RD
, &seminfo
.semusz
, 0, "");
149 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semvmx
, CTLFLAG_RW
, &seminfo
.semvmx
, 0, "");
150 SYSCTL_INT(_kern_ipc
, OID_AUTO
, semaem
, CTLFLAG_RW
, &seminfo
.semaem
, 0, "");
153 RO seminfo
.semmap
/* SEMMAP unused */
156 RO seminfo
.semmnu
/* undo entries per system */
158 RO seminfo
.semopm
/* SEMOPM unused */
160 RO seminfo
.semusz
/* param - derived from SEMUME for per-proc sizeof */
161 RO seminfo
.semvmx
/* SEMVMX unused - user param */
162 RO seminfo
.semaem
/* SEMAEM unused - user param */
170 sem
= kmalloc(sizeof(struct sem
) * seminfo
.semmns
, M_SEM
, M_WAITOK
);
171 sema
= kmalloc(sizeof(struct semid_ds
) * seminfo
.semmni
, M_SEM
, M_WAITOK
);
172 semu
= kmalloc(seminfo
.semmnu
* seminfo
.semusz
, M_SEM
, M_WAITOK
);
174 for (i
= 0; i
< seminfo
.semmni
; i
++) {
175 sema
[i
].sem_base
= 0;
176 sema
[i
].sem_perm
.mode
= 0;
178 for (i
= 0; i
< seminfo
.semmnu
; i
++) {
179 struct sem_undo
*suptr
= SEMU(i
);
180 suptr
->un_proc
= NULL
;
184 SYSINIT(sysv_sem
, SI_SUB_SYSV_SEM
, SI_ORDER_FIRST
, seminit
, NULL
)
187 * Entry point for all SEM calls
189 * semsys_args(int which, a2, a3, ...) (VARARGS)
194 sys_semsys(struct semsys_args
*uap
)
196 struct thread
*td
= curthread
;
197 unsigned int which
= (unsigned int)uap
->which
;
200 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
203 if (which
>= sizeof(semcalls
)/sizeof(semcalls
[0]))
205 bcopy(&uap
->a2
, &uap
->which
,
206 sizeof(struct semsys_args
) - offsetof(struct semsys_args
, a2
));
208 error
= (*semcalls
[which
])(uap
);
214 * Allocate a new sem_undo structure for a process
215 * (returns ptr to structure or NULL if no more room)
218 static struct sem_undo
*
219 semu_alloc(struct proc
*p
)
222 struct sem_undo
*suptr
;
223 struct sem_undo
**supptr
;
227 * Try twice to allocate something.
228 * (we'll purge any empty structures after the first pass so
229 * two passes are always enough)
232 for (attempt
= 0; attempt
< 2; attempt
++) {
234 * Look for a free structure.
235 * Fill it in and return it if we find one.
238 for (i
= 0; i
< seminfo
.semmnu
; i
++) {
240 if (suptr
->un_proc
== NULL
) {
241 suptr
->un_next
= semu_list
;
250 * We didn't find a free one, if this is the first attempt
251 * then try to free some structures.
255 /* All the structures are in use - try to free some */
256 int did_something
= 0;
259 while ((suptr
= *supptr
) != NULL
) {
260 if (suptr
->un_cnt
== 0) {
261 suptr
->un_proc
= NULL
;
262 *supptr
= suptr
->un_next
;
265 supptr
= &(suptr
->un_next
);
268 /* If we didn't free anything then just give-up */
273 * The second pass failed even though we freed
274 * something after the first pass!
275 * This is IMPOSSIBLE!
277 panic("semu_alloc - second attempt failed");
284 * Adjust a particular entry for a particular proc
288 semundo_adjust(struct proc
*p
, struct sem_undo
**supptr
, int semid
, int semnum
,
291 struct sem_undo
*suptr
;
295 /* Look for and remember the sem_undo if the caller doesn't provide
300 for (suptr
= semu_list
; suptr
!= NULL
;
301 suptr
= suptr
->un_next
) {
302 if (suptr
->un_proc
== p
) {
310 suptr
= semu_alloc(p
);
318 * Look for the requested entry and adjust it (delete if adjval becomes
321 sunptr
= &suptr
->un_ent
[0];
322 for (i
= 0; i
< suptr
->un_cnt
; i
++, sunptr
++) {
323 if (sunptr
->un_id
!= semid
|| sunptr
->un_num
!= semnum
)
326 sunptr
->un_adjval
= 0;
328 sunptr
->un_adjval
+= adjval
;
329 if (sunptr
->un_adjval
== 0) {
331 if (i
< suptr
->un_cnt
)
333 suptr
->un_ent
[suptr
->un_cnt
];
338 /* Didn't find the right entry - create it */
341 if (suptr
->un_cnt
!= seminfo
.semume
) {
342 sunptr
= &suptr
->un_ent
[suptr
->un_cnt
];
344 sunptr
->un_adjval
= adjval
;
345 sunptr
->un_id
= semid
; sunptr
->un_num
= semnum
;
352 semundo_clear(int semid
, int semnum
)
354 struct sem_undo
*suptr
;
356 for (suptr
= semu_list
; suptr
!= NULL
; suptr
= suptr
->un_next
) {
357 struct undo
*sunptr
= &suptr
->un_ent
[0];
360 while (i
< suptr
->un_cnt
) {
361 if (sunptr
->un_id
== semid
) {
362 if (semnum
== -1 || sunptr
->un_num
== semnum
) {
364 if (i
< suptr
->un_cnt
) {
366 suptr
->un_ent
[suptr
->un_cnt
];
379 * Note that the user-mode half of this passes a union, not a pointer
384 sys___semctl(struct __semctl_args
*uap
)
386 struct thread
*td
= curthread
;
387 int semid
= uap
->semid
;
388 int semnum
= uap
->semnum
;
390 union semun
*arg
= uap
->arg
;
391 union semun real_arg
;
392 struct ucred
*cred
= td
->td_ucred
;
394 struct semid_ds sbuf
;
395 struct semid_ds
*semaptr
;
396 struct semid_ds
*semakptr
;
399 kprintf("call to semctl(%d, %d, %d, 0x%x)\n", semid
, semnum
, cmd
, arg
);
402 if (!jail_sysvipc_allowed
&& cred
->cr_prison
!= NULL
)
409 * For this command we assume semid is an array index
410 * rather than an IPC id.
412 if (semid
< 0 || semid
>= seminfo
.semmni
) {
416 semakptr
= &sema
[semid
];
417 if ((semakptr
->sem_perm
.mode
& SEM_ALLOC
) == 0) {
421 if ((eval
= ipcperm(td
->td_proc
, &semakptr
->sem_perm
, IPC_R
)))
424 bcopy(&semakptr
, arg
->buf
, sizeof(struct semid_ds
));
425 rval
= IXSEQ_TO_IPCID(semid
, semakptr
->sem_perm
);
429 semid
= IPCID_TO_IX(semid
);
430 if (semid
< 0 || semid
>= seminfo
.semmni
) {
435 semaptr
= &sema
[semid
];
436 if ((semaptr
->sem_perm
.mode
& SEM_ALLOC
) == 0 ||
437 semaptr
->sem_perm
.seq
!= IPCID_TO_SEQ(uap
->semid
)) {
447 if ((eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_M
)) != 0)
449 semaptr
->sem_perm
.cuid
= cred
->cr_uid
;
450 semaptr
->sem_perm
.uid
= cred
->cr_uid
;
451 semtot
-= semaptr
->sem_nsems
;
452 for (i
= semaptr
->sem_base
- sem
; i
< semtot
; i
++)
453 sem
[i
] = sem
[i
+ semaptr
->sem_nsems
];
454 for (i
= 0; i
< seminfo
.semmni
; i
++) {
455 if ((sema
[i
].sem_perm
.mode
& SEM_ALLOC
) &&
456 sema
[i
].sem_base
> semaptr
->sem_base
)
457 sema
[i
].sem_base
-= semaptr
->sem_nsems
;
459 semaptr
->sem_perm
.mode
= 0;
460 semundo_clear(semid
, -1);
461 wakeup((caddr_t
)semaptr
);
465 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_M
);
468 if ((eval
= copyin(arg
, &real_arg
, sizeof(real_arg
))) != 0)
470 if ((eval
= copyin(real_arg
.buf
, (caddr_t
)&sbuf
,
471 sizeof(sbuf
))) != 0) {
474 semaptr
->sem_perm
.uid
= sbuf
.sem_perm
.uid
;
475 semaptr
->sem_perm
.gid
= sbuf
.sem_perm
.gid
;
476 semaptr
->sem_perm
.mode
= (semaptr
->sem_perm
.mode
& ~0777) |
477 (sbuf
.sem_perm
.mode
& 0777);
478 semaptr
->sem_ctime
= time_second
;
482 if ((eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_R
)))
484 if ((eval
= copyin(arg
, &real_arg
, sizeof(real_arg
))) != 0)
486 eval
= copyout(semaptr
, real_arg
.buf
, sizeof(struct semid_ds
));
490 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_R
);
493 if (semnum
< 0 || semnum
>= semaptr
->sem_nsems
) {
497 rval
= semaptr
->sem_base
[semnum
].semncnt
;
501 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_R
);
504 if (semnum
< 0 || semnum
>= semaptr
->sem_nsems
) {
508 rval
= semaptr
->sem_base
[semnum
].sempid
;
512 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_R
);
515 if (semnum
< 0 || semnum
>= semaptr
->sem_nsems
) {
519 rval
= semaptr
->sem_base
[semnum
].semval
;
523 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_R
);
526 if ((eval
= copyin(arg
, &real_arg
, sizeof(real_arg
))) != 0)
528 for (i
= 0; i
< semaptr
->sem_nsems
; i
++) {
529 eval
= copyout(&semaptr
->sem_base
[i
].semval
,
531 sizeof(real_arg
.array
[0]));
538 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_R
);
541 if (semnum
< 0 || semnum
>= semaptr
->sem_nsems
) {
545 rval
= semaptr
->sem_base
[semnum
].semzcnt
;
549 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_W
);
552 if (semnum
< 0 || semnum
>= semaptr
->sem_nsems
) {
556 if ((eval
= copyin(arg
, &real_arg
, sizeof(real_arg
))) != 0)
558 semaptr
->sem_base
[semnum
].semval
= real_arg
.val
;
559 semundo_clear(semid
, semnum
);
560 wakeup((caddr_t
)semaptr
);
564 eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_W
);
567 if ((eval
= copyin(arg
, &real_arg
, sizeof(real_arg
))) != 0)
569 for (i
= 0; i
< semaptr
->sem_nsems
; i
++) {
570 eval
= copyin(&real_arg
.array
[i
],
571 (caddr_t
)&semaptr
->sem_base
[i
].semval
,
572 sizeof(real_arg
.array
[0]));
576 semundo_clear(semid
, -1);
577 wakeup((caddr_t
)semaptr
);
587 uap
->sysmsg_result
= rval
;
595 sys_semget(struct semget_args
*uap
)
597 struct thread
*td
= curthread
;
600 int nsems
= uap
->nsems
;
601 int semflg
= uap
->semflg
;
602 struct ucred
*cred
= td
->td_ucred
;
605 kprintf("semget(0x%x, %d, 0%o)\n", key
, nsems
, semflg
);
608 if (!jail_sysvipc_allowed
&& cred
->cr_prison
!= NULL
)
614 if (key
!= IPC_PRIVATE
) {
615 for (semid
= 0; semid
< seminfo
.semmni
; semid
++) {
616 if ((sema
[semid
].sem_perm
.mode
& SEM_ALLOC
) &&
617 sema
[semid
].sem_perm
.key
== key
)
620 if (semid
< seminfo
.semmni
) {
622 kprintf("found public key\n");
624 if ((eval
= ipcperm(td
->td_proc
,
625 &sema
[semid
].sem_perm
,
629 if (nsems
> 0 && sema
[semid
].sem_nsems
< nsems
) {
631 kprintf("too small\n");
636 if ((semflg
& IPC_CREAT
) && (semflg
& IPC_EXCL
)) {
638 kprintf("not exclusive\n");
648 kprintf("need to allocate the semid_ds\n");
650 if (key
== IPC_PRIVATE
|| (semflg
& IPC_CREAT
)) {
651 if (nsems
<= 0 || nsems
> seminfo
.semmsl
) {
653 kprintf("nsems out of range (0<%d<=%d)\n", nsems
,
659 if (nsems
> seminfo
.semmns
- semtot
) {
661 kprintf("not enough semaphores left (need %d, got %d)\n",
662 nsems
, seminfo
.semmns
- semtot
);
667 for (semid
= 0; semid
< seminfo
.semmni
; semid
++) {
668 if ((sema
[semid
].sem_perm
.mode
& SEM_ALLOC
) == 0)
671 if (semid
== seminfo
.semmni
) {
673 kprintf("no more semid_ds's available\n");
679 kprintf("semid %d is available\n", semid
);
681 sema
[semid
].sem_perm
.key
= key
;
682 sema
[semid
].sem_perm
.cuid
= cred
->cr_uid
;
683 sema
[semid
].sem_perm
.uid
= cred
->cr_uid
;
684 sema
[semid
].sem_perm
.cgid
= cred
->cr_gid
;
685 sema
[semid
].sem_perm
.gid
= cred
->cr_gid
;
686 sema
[semid
].sem_perm
.mode
= (semflg
& 0777) | SEM_ALLOC
;
687 sema
[semid
].sem_perm
.seq
=
688 (sema
[semid
].sem_perm
.seq
+ 1) & 0x7fff;
689 sema
[semid
].sem_nsems
= nsems
;
690 sema
[semid
].sem_otime
= 0;
691 sema
[semid
].sem_ctime
= time_second
;
692 sema
[semid
].sem_base
= &sem
[semtot
];
694 bzero(sema
[semid
].sem_base
,
695 sizeof(sema
[semid
].sem_base
[0])*nsems
);
697 kprintf("sembase = 0x%x, next = 0x%x\n", sema
[semid
].sem_base
,
702 kprintf("didn't find it and wasn't asked to create it\n");
709 uap
->sysmsg_result
= IXSEQ_TO_IPCID(semid
,
710 sema
[semid
].sem_perm
);
720 sys_semop(struct semop_args
*uap
)
722 struct thread
*td
= curthread
;
723 int semid
= uap
->semid
;
724 u_int nsops
= uap
->nsops
;
725 struct sembuf sops
[MAX_SOPS
];
726 struct semid_ds
*semaptr
;
727 struct sembuf
*sopptr
;
729 struct sem_undo
*suptr
= NULL
;
731 int do_wakeup
, do_undos
;
734 kprintf("call to semop(%d, 0x%x, %u)\n", semid
, sops
, nsops
);
737 if (!jail_sysvipc_allowed
&& td
->td_ucred
->cr_prison
!= NULL
)
741 semid
= IPCID_TO_IX(semid
); /* Convert back to zero origin */
743 if (semid
< 0 || semid
>= seminfo
.semmni
) {
748 semaptr
= &sema
[semid
];
749 if ((semaptr
->sem_perm
.mode
& SEM_ALLOC
) == 0) {
753 if (semaptr
->sem_perm
.seq
!= IPCID_TO_SEQ(uap
->semid
)) {
758 if ((eval
= ipcperm(td
->td_proc
, &semaptr
->sem_perm
, IPC_W
))) {
760 kprintf("eval = %d from ipaccess\n", eval
);
765 if (nsops
> MAX_SOPS
) {
767 kprintf("too many sops (max=%d, nsops=%u)\n", MAX_SOPS
, nsops
);
773 if ((eval
= copyin(uap
->sops
, &sops
, nsops
* sizeof(sops
[0]))) != 0) {
775 kprintf("eval = %d from copyin(%08x, %08x, %u)\n", eval
,
776 uap
->sops
, &sops
, nsops
* sizeof(sops
[0]));
782 * Loop trying to satisfy the vector of requests.
783 * If we reach a point where we must wait, any requests already
784 * performed are rolled back and we go to sleep until some other
785 * process wakes us up. At this point, we start all over again.
787 * This ensures that from the perspective of other tasks, a set
788 * of requests is atomic (never partially satisfied).
795 for (i
= 0; i
< nsops
; i
++) {
798 if (sopptr
->sem_num
>= semaptr
->sem_nsems
) {
803 semptr
= &semaptr
->sem_base
[sopptr
->sem_num
];
806 kprintf("semop: semaptr=%x, sem_base=%x, semptr=%x, sem[%d]=%d : op=%d, flag=%s\n",
807 semaptr
, semaptr
->sem_base
, semptr
,
808 sopptr
->sem_num
, semptr
->semval
, sopptr
->sem_op
,
809 (sopptr
->sem_flg
& IPC_NOWAIT
) ? "nowait" : "wait");
812 if (sopptr
->sem_op
< 0) {
813 if (semptr
->semval
+ sopptr
->sem_op
< 0) {
815 kprintf("semop: can't do it now\n");
819 semptr
->semval
+= sopptr
->sem_op
;
820 if (semptr
->semval
== 0 &&
824 if (sopptr
->sem_flg
& SEM_UNDO
)
826 } else if (sopptr
->sem_op
== 0) {
827 if (semptr
->semval
> 0) {
829 kprintf("semop: not zero now\n");
834 if (semptr
->semncnt
> 0)
836 semptr
->semval
+= sopptr
->sem_op
;
837 if (sopptr
->sem_flg
& SEM_UNDO
)
843 * Did we get through the entire vector?
849 * No ... rollback anything that we've already done
852 kprintf("semop: rollback 0 through %d\n", i
-1);
854 for (j
= 0; j
< i
; j
++)
855 semaptr
->sem_base
[sops
[j
].sem_num
].semval
-=
859 * If the request that we couldn't satisfy has the
860 * NOWAIT flag set then return with EAGAIN.
862 if (sopptr
->sem_flg
& IPC_NOWAIT
) {
867 if (sopptr
->sem_op
== 0)
873 kprintf("semop: good night!\n");
875 eval
= tsleep((caddr_t
)semaptr
, PCATCH
, "semwait", 0);
877 kprintf("semop: good morning (eval=%d)!\n", eval
);
880 suptr
= NULL
; /* sem_undo may have been reallocated */
882 /* return code is checked below, after sem[nz]cnt-- */
885 * Make sure that the semaphore still exists
887 if ((semaptr
->sem_perm
.mode
& SEM_ALLOC
) == 0 ||
888 semaptr
->sem_perm
.seq
!= IPCID_TO_SEQ(uap
->semid
)) {
894 * The semaphore is still alive. Readjust the count of
897 if (sopptr
->sem_op
== 0)
903 * Is it really morning, or was our sleep interrupted?
904 * (Delayed check of tsleep() return code because we
905 * need to decrement sem[nz]cnt either way.)
912 kprintf("semop: good morning!\n");
918 * Process any SEM_UNDO requests.
921 for (i
= 0; i
< nsops
; i
++) {
923 * We only need to deal with SEM_UNDO's for non-zero
928 if ((sops
[i
].sem_flg
& SEM_UNDO
) == 0)
930 adjval
= sops
[i
].sem_op
;
933 eval
= semundo_adjust(td
->td_proc
, &suptr
, semid
,
934 sops
[i
].sem_num
, -adjval
);
939 * Oh-Oh! We ran out of either sem_undo's or undo's.
940 * Rollback the adjustments to this point and then
941 * rollback the semaphore ups and down so we can return
942 * with an error with all structures restored. We
943 * rollback the undo's in the exact reverse order that
944 * we applied them. This guarantees that we won't run
945 * out of space as we roll things back out.
947 for (j
= i
- 1; j
>= 0; j
--) {
948 if ((sops
[j
].sem_flg
& SEM_UNDO
) == 0)
950 adjval
= sops
[j
].sem_op
;
953 if (semundo_adjust(td
->td_proc
, &suptr
, semid
,
954 sops
[j
].sem_num
, adjval
) != 0)
955 panic("semop - can't undo undos");
958 for (j
= 0; j
< nsops
; j
++)
959 semaptr
->sem_base
[sops
[j
].sem_num
].semval
-=
963 kprintf("eval = %d from semundo_adjust\n", eval
);
966 } /* loop through the sops */
967 } /* if (do_undos) */
969 /* We're definitely done - set the sempid's */
970 for (i
= 0; i
< nsops
; i
++) {
972 semptr
= &semaptr
->sem_base
[sopptr
->sem_num
];
973 semptr
->sempid
= td
->td_proc
->p_pid
;
976 /* Do a wakeup if any semaphore was up'd. */
979 kprintf("semop: doing wakeup\n");
981 wakeup((caddr_t
)semaptr
);
983 kprintf("semop: back from wakeup\n");
987 kprintf("semop: done\n");
989 uap
->sysmsg_result
= 0;
997 * Go through the undo structures for this process and apply the adjustments to
1001 semexit(struct proc
*p
)
1003 struct sem_undo
*suptr
;
1004 struct sem_undo
**supptr
;
1010 * Go through the chain of undo vectors looking for one
1011 * associated with this process.
1014 for (supptr
= &semu_list
; (suptr
= *supptr
) != NULL
;
1015 supptr
= &suptr
->un_next
) {
1016 if (suptr
->un_proc
== p
)
1024 kprintf("proc @%08x has undo structure with %d entries\n", p
,
1029 * If there are any active undo elements then process them.
1031 if (suptr
->un_cnt
> 0) {
1034 for (ix
= 0; ix
< suptr
->un_cnt
; ix
++) {
1035 int semid
= suptr
->un_ent
[ix
].un_id
;
1036 int semnum
= suptr
->un_ent
[ix
].un_num
;
1037 int adjval
= suptr
->un_ent
[ix
].un_adjval
;
1038 struct semid_ds
*semaptr
;
1040 semaptr
= &sema
[semid
];
1041 if ((semaptr
->sem_perm
.mode
& SEM_ALLOC
) == 0)
1042 panic("semexit - semid not allocated");
1043 if (semnum
>= semaptr
->sem_nsems
)
1044 panic("semexit - semnum out of range");
1047 kprintf("semexit: %08x id=%d num=%d(adj=%d) ; sem=%d\n",
1048 suptr
->un_proc
, suptr
->un_ent
[ix
].un_id
,
1049 suptr
->un_ent
[ix
].un_num
,
1050 suptr
->un_ent
[ix
].un_adjval
,
1051 semaptr
->sem_base
[semnum
].semval
);
1055 if (semaptr
->sem_base
[semnum
].semval
< -adjval
)
1056 semaptr
->sem_base
[semnum
].semval
= 0;
1058 semaptr
->sem_base
[semnum
].semval
+=
1061 semaptr
->sem_base
[semnum
].semval
+= adjval
;
1063 wakeup((caddr_t
)semaptr
);
1065 kprintf("semexit: back from wakeup\n");
1071 * Deallocate the undo vector.
1074 kprintf("removing vector\n");
1076 suptr
->un_proc
= NULL
;
1077 *supptr
= suptr
->un_next
;