rtld - Add fork hooks for libthread_xu to install
[dragonfly.git] / lib / libc / sysvipc / sem.c
blob65f6e658ae43cfef029a1b8c74415199cbf3c6a1
1 /* $FreeBSD: src/sys/kern/sysv_sem.c,v 1.69 2004/03/17 09:37:13 cperciva Exp $ */
3 /*
4 * Implementation of SVID semaphores
6 * Author: Daniel Boulet
7 * Copyright (c) 2013 Larisa Grigore <larisagrigore@gmail.com>
9 * This software is provided ``AS IS'' without any warranties of any kind.
12 #include "namespace.h"
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <errno.h>
16 #include <err.h>
17 #include <pthread.h>
18 #include <string.h>
19 #include <stdarg.h>
20 #include <sys/param.h>
21 #include <sys/queue.h>
22 #include <sys/mman.h>
23 #include <sys/sem.h>
24 #include "un-namespace.h"
26 #include "sysvipc_lock.h"
27 #include "sysvipc_ipc.h"
28 #include "sysvipc_shm.h"
29 #include "sysvipc_sem.h"
30 #include "sysvipc_hash.h"
33 #define SYSV_MUTEX_LOCK(x) if (__isthreaded) _pthread_mutex_lock(x)
34 #define SYSV_MUTEX_UNLOCK(x) if (__isthreaded) _pthread_mutex_unlock(x)
35 #define SYSV_MUTEX_DESTROY(x) if (__isthreaded) _pthread_mutex_destroy(x)
37 extern struct hashtable *shmaddrs;
38 extern struct hashtable *shmres;
39 extern pthread_mutex_t lock_resources;
41 struct sem_undo *undos = NULL;
42 pthread_mutex_t lock_undo = PTHREAD_MUTEX_INITIALIZER;
44 static int semundo_clear(int, int);
46 static int
47 put_shmdata(int id) {
48 struct shm_data *data;
49 int ret = -1;
51 SYSV_MUTEX_LOCK(&lock_resources);
52 data = _hash_lookup(shmres, id);
53 if (!data) {
54 sysv_print_err("something wrong put_shmdata\n");
55 goto done; /* It should not reach here. */
58 data->used--;
59 if (data->used == 0 && data->removed) {
60 sysv_print("really remove the sem\n");
61 SYSV_MUTEX_UNLOCK(&lock_resources);
62 /* OBS: Even if the shmctl fails (the thread doesn't
63 * have IPC_M permissions), all structures associated
64 * with it will be removed in the current process.*/
65 sysvipc_shmdt(data->internal);
66 semundo_clear(id, -1);
67 if (data->removed == SEG_ALREADY_REMOVED)
68 return 1; /* The semaphore was removed
69 by another process so there is nothing else
70 we must do. */
71 /* Else inform the daemon that the segment is removed. */
72 return (sysvipc_shmctl(id, IPC_RMID, NULL));
75 ret = 0;
76 done:
77 SYSV_MUTEX_UNLOCK(&lock_resources);
78 return (ret);
81 static struct semid_pool*
82 get_semaptr(int semid, int to_remove, int shm_access) {
83 struct semid_pool *semaptr;
85 struct shm_data *shmdata = get_shmdata(semid, to_remove, shm_access);
86 if (!shmdata) {
87 /* Error is set in get_shmdata. */
88 return (NULL);
91 semaptr = (struct semid_pool *)shmdata->internal;
92 if (!semaptr) {
93 put_shmdata(semid);
94 errno = EINVAL;
95 return (NULL);
98 return (semaptr);
101 static int
102 sema_exist(int semid, struct semid_pool *semaptr) {
103 /* Was it removed? */
104 if (semaptr->gen == -1 ||
105 semaptr->ds.sem_perm.seq != IPCID_TO_SEQ(semid))
106 return (0);
108 return (1);
111 /* This is the function called when a the semaphore
112 * is descovered as removed. It marks the process
113 * internal data and munmap the */
114 static void
115 mark_for_removal(int shmid) {
116 sysv_print("Mark that the segment was removed\n");
117 get_shmdata(shmid, SEG_ALREADY_REMOVED, 0);
118 /* Setting SEG_ALREADY_REMOVED parameter, when put_shmdata
119 * is called, the internal resources will be freed.
121 /* Decrement the "usage" field. */
122 put_shmdata(shmid);
125 static int
126 try_rwlock_rdlock(int semid, struct semid_pool *semaptr) {
127 sysv_print(" before rd lock id = %d %x\n", semid, semaptr);
128 #ifdef SYSV_RWLOCK
129 sysv_rwlock_rdlock(&semaptr->rwlock);
130 sysv_print("rd lock id = %d\n", semid);
131 #else
132 sysv_mutex_lock(&semaptr->mutex);
133 sysv_print("lock id = %d\n", semid);
134 #endif
135 if (!sema_exist(semid, semaptr)) {
136 errno = EINVAL;
137 sysv_print("error sema %d doesn't exist\n", semid);
138 #ifdef SYSV_RWLOCK
139 sysv_rwlock_unlock(&semaptr->rwlock);
140 #else
141 sysv_mutex_unlock(&semaptr->mutex);
142 #endif
143 /* Internal resources must be freed. */
144 mark_for_removal(semid);
145 return (-1);
147 return (0);
150 static int
151 try_rwlock_wrlock(int semid, struct semid_pool *semaptr) {
152 #ifdef SYSV_RWLOCK
153 sysv_print("before wrlock id = %d %x\n", semid, semaptr);
154 sysv_rwlock_wrlock(&semaptr->rwlock);
155 #else
156 sysv_print("before lock id = %d %x\n", semid, semaptr);
157 sysv_mutex_lock(&semaptr->mutex);
158 #endif
159 sysv_print("lock id = %d\n", semid);
160 if (!sema_exist(semid, semaptr)) {
161 errno = EINVAL;
162 sysv_print("error sema %d doesn't exist\n", semid);
163 #ifdef SYSV_RWLOCK
164 sysv_rwlock_unlock(&semaptr->rwlock);
165 #else
166 sysv_mutex_unlock(&semaptr->mutex);
167 #endif
168 /* Internal resources must be freed. */
169 mark_for_removal(semid);
170 return (-1);
172 return (0);
175 static int
176 rwlock_unlock(int semid, struct semid_pool *semaptr) {
177 sysv_print("unlock id = %d %x\n", semid, semaptr);
178 if (!sema_exist(semid, semaptr)) {
179 /* Internal resources must be freed. */
180 mark_for_removal(semid);
181 errno = EINVAL;
182 return (-1);
184 #ifdef SYSV_RWLOCK
185 sysv_rwlock_unlock(&semaptr->rwlock);
186 #else
187 sysv_mutex_unlock(&semaptr->mutex);
188 #endif
189 return (0);
193 sysvipc_semget(key_t key, int nsems, int semflg) {
194 int semid;
195 void *shmaddr;
196 //int shm_access;
197 int size = sizeof(struct semid_pool) + nsems * sizeof(struct sem);
199 //TODO resources limits
200 sysv_print("handle semget\n");
202 semid = _shmget(key, size, semflg, SEMGET);
203 if (semid == -1) {
204 /* errno already set. */
205 goto done;
208 /* If the semaphore is in process of being removed there are two cases:
209 * - the daemon knows that and it will handle this situation.
210 * - one of the threads from this address space remove it and the daemon
211 * wasn't announced yet; in this scenario, the semaphore is marked
212 * using "removed" field of shm_data and future calls will return
213 * EIDRM error.
216 #if 0
217 /* Set access type. */
218 shm_access = semflg & (IPC_W | IPC_R);
219 if(set_shmdata_access(semid, shm_access) != 0) {
220 /* errno already set. */
221 goto done;
223 #endif
224 shmaddr = sysvipc_shmat(semid, NULL, 0);
225 if (!shmaddr) {
226 semid = -1;
227 sysvipc_shmctl(semid, IPC_RMID, NULL);
228 goto done;
231 //TODO more semaphores in a single file
233 done:
234 sysv_print("end handle semget %d\n", semid);
235 return (semid);
238 static int
239 semundo_clear(int semid, int semnum)
241 struct undo *sunptr;
242 int i;
244 sysv_print("semundo clear\n");
246 SYSV_MUTEX_LOCK(&lock_undo);
247 if (!undos)
248 goto done;
250 sunptr = &undos->un_ent[0];
251 i = 0;
253 while (i < undos->un_cnt) {
254 if (sunptr->un_id == semid) {
255 if (semnum == -1 || sunptr->un_num == semnum) {
256 undos->un_cnt--;
257 if (i < undos->un_cnt) {
258 undos->un_ent[i] =
259 undos->un_ent[undos->un_cnt];
260 continue;
263 if (semnum != -1)
264 break;
266 ++i;
267 ++sunptr;
270 //TODO Shrink memory if case; not sure if necessary
271 done:
272 SYSV_MUTEX_UNLOCK(&lock_undo);
273 sysv_print("end semundo clear\n");
274 return (0);
278 sysvipc___semctl(int semid, int semnum , int cmd, union semun *arg)
280 int i, error;
281 struct semid_pool *semaptr = NULL;
282 struct sem *semptr = NULL;
283 struct shmid_ds shmds;
284 int shm_access = 0;
286 /*if (!jail_sysvipc_allowed && cred->cr_prison != NULL)
287 return (ENOSYS);
290 sysv_print("semctl cmd = %d\n", cmd);
292 error = 0;
294 switch (cmd) {
295 case IPC_SET: /* Originally was IPC_M but this is checked
296 by daemon. */
297 case SETVAL:
298 case SETALL:
299 shm_access = IPC_W;
300 break;
301 case IPC_STAT:
302 case GETNCNT:
303 case GETPID:
304 case GETVAL:
305 case GETALL:
306 case GETZCNT:
307 shm_access = IPC_R;
308 break;
309 default:
310 break;
313 semaptr = get_semaptr(semid, cmd==IPC_RMID, shm_access);
314 if (!semaptr) {
315 /* errno already set. */
316 return (-1);
319 switch (cmd) {
320 case IPC_RMID:
321 /* Mark that the segment is removed. This is done in
322 * get_semaptr call in order to announce other processes.
323 * It will be actually removed after put_shmdata call and
324 * not other thread from this address space use shm_data
325 * structure.
327 break;
329 case IPC_SET:
330 if (!arg->buf) {
331 error = EFAULT;
332 break;
335 memset(&shmds, 0, sizeof(shmds)/sizeof(unsigned char));
336 memcpy(&shmds.shm_perm, &arg->buf->sem_perm,
337 sizeof(struct ipc_perm));
338 error = sysvipc_shmctl(semid, cmd, &shmds);
339 /* OBS: didn't update ctime and mode as in kernel implementation
340 * it is done. Those fields are already updated for shmid_ds
341 * struct when calling shmctl
343 break;
345 case IPC_STAT:
346 if (!arg->buf) {
347 error = EFAULT;
348 break;
351 error = sysvipc_shmctl(semid, cmd, &shmds);
352 if (error)
353 break;
355 memcpy(&arg->buf->sem_perm, &shmds.shm_perm,
356 sizeof(struct ipc_perm));
357 arg->buf->sem_nsems = (shmds.shm_segsz - sizeof(struct semid_pool)) /
358 sizeof(struct sem);
359 arg->buf->sem_ctime = shmds.shm_ctime;
361 /* otime is semaphore specific so read it from
362 * semaptr
364 error = try_rwlock_rdlock(semid, semaptr);
365 if (error)
366 break;
367 arg->buf->sem_otime = semaptr->ds.sem_otime;
368 rwlock_unlock(semid, semaptr);
369 break;
371 case GETNCNT:
372 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
373 errno = EINVAL;
374 break;
377 error = try_rwlock_rdlock(semid, semaptr);
378 if (error)
379 break;
380 error = semaptr->ds.sem_base[semnum].semncnt;
381 rwlock_unlock(semid, semaptr);
382 break;
384 case GETPID:
385 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
386 errno = EINVAL;
387 break;
390 error = try_rwlock_rdlock(semid, semaptr);
391 if (error)
392 break;
393 error = semaptr->ds.sem_base[semnum].sempid;
394 rwlock_unlock(semid, semaptr);
395 break;
397 case GETVAL:
398 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
399 errno = EINVAL;
400 break;
403 error = try_rwlock_rdlock(semid, semaptr);
404 if (error)
405 break;
406 error = semaptr->ds.sem_base[semnum].semval;
407 rwlock_unlock(semid, semaptr);
408 break;
410 case GETALL:
411 if (!arg->array) {
412 error = EFAULT;
413 break;
416 error = try_rwlock_rdlock(semid, semaptr);
417 if (error)
418 break;
419 for (i = 0; i < semaptr->ds.sem_nsems; i++) {
420 arg->array[i] = semaptr->ds.sem_base[i].semval;
422 rwlock_unlock(semid, semaptr);
423 break;
425 case GETZCNT:
426 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
427 errno = EINVAL;
428 break;
431 error = try_rwlock_rdlock(semid, semaptr);
432 if (error)
433 break;
434 error = semaptr->ds.sem_base[semnum].semzcnt;
435 rwlock_unlock(semid, semaptr);
436 break;
438 case SETVAL:
439 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
440 errno = EINVAL;
441 break;
444 error = try_rwlock_wrlock(semid, semaptr);
445 if (error)
446 break;
447 semptr = &semaptr->ds.sem_base[semnum];
448 semptr->semval = arg->val;
449 semundo_clear(semid, semnum);
450 if (semptr->semzcnt || semptr->semncnt)
451 umtx_wakeup((int *)&semptr->semval, 0);
452 rwlock_unlock(semid, semaptr);
453 break;
455 case SETALL:
456 if (!arg->array) {
457 error = EFAULT;
458 break;
461 error = try_rwlock_wrlock(semid, semaptr);
462 if (error)
463 break;
464 for (i = 0; i < semaptr->ds.sem_nsems; i++) {
465 semptr = &semaptr->ds.sem_base[i];
466 semptr->semval = arg->array[i];
467 if (semptr->semzcnt || semptr->semncnt)
468 umtx_wakeup((int *)&semptr->semval, 0);
470 semundo_clear(semid, -1);
471 rwlock_unlock(semid, semaptr);
472 break;
474 default:
475 errno = EINVAL;
476 break;
479 put_shmdata(semid);
481 sysv_print("end semctl\n");
482 return (error);
486 * Adjust a particular entry for a particular proc
488 static int
489 semundo_adjust(int semid, int semnum, int adjval)
491 struct undo *sunptr;
492 int i;
493 int error = 0;
494 size_t size;
495 int undoid;
496 void *addr;
497 struct shm_data *data;
499 sysv_print("semundo adjust\n");
500 if (!adjval)
501 goto done;
503 SYSV_MUTEX_LOCK(&lock_undo);
504 if (!undos) {
505 sysv_print("get undo segment\n");
506 undoid = _shmget(IPC_PRIVATE, PAGE_SIZE, IPC_CREAT | IPC_EXCL | 0600,
507 UNDOGET);
508 if (undoid == -1) {
509 sysv_print_err("no undo segment\n");
510 return (-1);
513 addr = sysvipc_shmat(undoid, NULL, 0);
514 if (!addr) {
515 sysv_print_err("can not map undo segment\n");
516 sysvipc_shmctl(undoid, IPC_RMID, NULL);
517 return (-1);
520 undos = (struct sem_undo *)addr;
521 undos->un_pages = 1;
522 undos->un_cnt = 0;
526 * Look for the requested entry and adjust it (delete if adjval becomes
527 * 0).
529 sunptr = &undos->un_ent[0];
530 for (i = 0; i < undos->un_cnt; i++, sunptr++) {
531 if (sunptr->un_id != semid && sunptr->un_num != semnum)
532 continue;
533 sunptr->un_adjval += adjval;
534 if (sunptr->un_adjval == 0) {
535 undos->un_cnt--;
536 if (i < undos->un_cnt)
537 undos->un_ent[i] = undos->un_ent[undos->un_cnt];
539 goto done;
542 /* Didn't find the right entry - create it */
543 size = sizeof(struct sem_undo) + (undos->un_cnt + 1) *
544 sizeof(struct sem_undo);
545 if (size > (unsigned int)(undos->un_pages * PAGE_SIZE)) {
546 sysv_print("need more undo space\n");
547 sysvipc_shmdt(undos);
548 undos->un_pages++;
550 SYSV_MUTEX_LOCK(&lock_resources);
551 data = _hash_lookup(shmaddrs, (u_long)undos);
552 SYSV_MUTEX_UNLOCK(&lock_resources);
554 /* It is not necessary any lock on "size" because it is used
555 * only by shmat and shmdt.
556 * shmat for undoid is called only from this function and it
557 * is protected by undo_lock.
558 * shmdt for undoid is not called anywhere because the segment
559 * is destroyed by the daemon when the client dies.
561 data->size = undos->un_pages * PAGE_SIZE;
562 undos = sysvipc_shmat(data->shmid, NULL, 0);
565 sunptr = &undos->un_ent[undos->un_cnt];
566 undos->un_cnt++;
567 sunptr->un_adjval = adjval;
568 sunptr->un_id = semid;
569 sunptr->un_num = semnum;
570 //if (suptr->un_cnt == seminfo.semume) TODO move it in daemon
571 /*} else {
572 error = EINVAL; //se face prin notificare
574 done:
575 SYSV_MUTEX_UNLOCK(&lock_undo);
577 sysv_print("semundo adjust end\n");
578 return (error);
581 int sysvipc_semop (int semid, struct sembuf *sops, unsigned nsops) {
582 struct semid_pool *semaptr = NULL, *auxsemaptr = NULL;
583 struct sembuf *sopptr;
584 struct sem *semptr = NULL;
585 struct sem *xsemptr = NULL;
586 int eval = 0;
587 int i, j;
588 int do_undos;
589 int val_to_sleep;
591 sysv_print("[client %d] call to semop(%d, %u)\n",
592 getpid(), semid, nsops);
593 //TODO
594 /*if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
595 return (ENOSYS);
598 semaptr = get_semaptr(semid, 0, IPC_W);
599 if (!semaptr) {
600 errno = EINVAL;
601 return (-1);
604 #ifdef SYSV_SEMS
605 if (try_rwlock_rdlock(semid, semaptr) == -1) {
606 #else
607 if (try_rwlock_wrlock(semid, semaptr) == -1) {
608 #endif
609 sysv_print("sema removed\n");
610 errno = EIDRM;
611 goto done2;
614 if (nsops > MAX_SOPS) {
615 sysv_print("too many sops (max=%d, nsops=%u)\n",
616 getpid(), MAX_SOPS, nsops);
617 eval = E2BIG;
618 goto done;
622 * Loop trying to satisfy the vector of requests.
623 * If we reach a point where we must wait, any requests already
624 * performed are rolled back and we go to sleep until some other
625 * process wakes us up. At this point, we start all over again.
627 * This ensures that from the perspective of other tasks, a set
628 * of requests is atomic (never partially satisfied).
630 do_undos = 0;
632 for (;;) {
634 semptr = NULL;
636 for (i = 0; i < (int)nsops; i++) {
637 sopptr = &sops[i];
639 if (sopptr->sem_num >= semaptr->ds.sem_nsems) {
640 eval = EFBIG;
641 goto done;
644 semptr = &semaptr->ds.sem_base[sopptr->sem_num];
645 #ifdef SYSV_SEMS
646 sysv_mutex_lock(&semptr->sem_mutex);
647 #endif
648 sysv_print("semop: sem[%d]=%d : op=%d, flag=%s\n",
649 sopptr->sem_num, semptr->semval, sopptr->sem_op,
650 (sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
652 if (sopptr->sem_op < 0) {
653 if (semptr->semval + sopptr->sem_op < 0) {
654 sysv_print("semop: can't do it now\n");
655 break;
656 } else {
657 semptr->semval += sopptr->sem_op;
658 if (semptr->semval == 0 &&
659 semptr->semzcnt > 0)
660 umtx_wakeup((int *)&semptr->semval, 0);
662 if (sopptr->sem_flg & SEM_UNDO)
663 do_undos = 1;
664 } else if (sopptr->sem_op == 0) {
665 if (semptr->semval > 0) {
666 sysv_print("semop: not zero now\n");
667 break;
669 } else {
670 semptr->semval += sopptr->sem_op;
671 if (sopptr->sem_flg & SEM_UNDO)
672 do_undos = 1;
673 if (semptr->semncnt > 0)
674 umtx_wakeup((int *)&semptr->semval, 0);
676 #ifdef SYSV_SEMS
677 sysv_mutex_unlock(&semptr->sem_mutex);
678 #endif
682 * Did we get through the entire vector?
684 if (i >= (int)nsops)
685 goto donex;
687 if (sopptr->sem_op == 0)
688 semptr->semzcnt++;
689 else
690 semptr->semncnt++;
693 * Get interlock value before rleeasing sem_mutex.
695 * XXX horrible hack until we get a umtx_sleep16() (and a umtx_sleep64())
696 * system call.
698 val_to_sleep = *(int *)&semptr->semval;
699 #ifdef SYSV_SEMS
700 sysv_mutex_unlock(&semptr->sem_mutex);
701 #endif
703 * Rollback the semaphores we had acquired.
705 sysv_print("semop: rollback 0 through %d\n", i-1);
706 for (j = 0; j < i; j++) {
707 xsemptr = &semaptr->ds.sem_base[sops[j].sem_num];
708 #ifdef SYSV_SEMS
709 sysv_mutex_lock(&xsemptr->sem_mutex);
710 #endif
711 xsemptr->semval -= sops[j].sem_op;
712 if (xsemptr->semval == 0 && xsemptr->semzcnt > 0)
713 umtx_wakeup((int *)&xsemptr->semval, 0);
714 if (xsemptr->semval <= 0 && xsemptr->semncnt > 0)
715 umtx_wakeup((int *)&xsemptr->semval, 0); //?!
716 #ifdef SYSV_SEMS
717 sysv_mutex_unlock(&xsemptr->sem_mutex);
718 #endif
722 * If the request that we couldn't satisfy has the
723 * NOWAIT flag set then return with EAGAIN.
725 if (sopptr->sem_flg & IPC_NOWAIT) {
726 eval = EAGAIN;
727 goto done;
731 * Release semaptr->lock while sleeping, allowing other
732 * semops (like SETVAL, SETALL, etc), which require an
733 * exclusive lock and might wake us up.
735 * Reload and recheck the validity of semaptr on return.
736 * Note that semptr itself might have changed too, but
737 * we've already interlocked for semptr and that is what
738 * will be woken up if it wakes up the tsleep on a MP
739 * race.
742 sysv_print("semop: good night!\n");
743 rwlock_unlock(semid, semaptr);
744 put_shmdata(semid);
746 /* We don't sleep more than SYSV_TIMEOUT because we could
747 * go to sleep after another process calls wakeup and remain
748 * blocked.
750 eval = umtx_sleep((int *)&semptr->semval, val_to_sleep, SYSV_TIMEOUT);
751 /* return code is checked below, after sem[nz]cnt-- */
754 * Make sure that the semaphore still exists
757 /* Check if another thread didn't remove the semaphore. */
758 auxsemaptr = get_semaptr(semid, 0, IPC_W); /* Redundant access check. */
759 if (!auxsemaptr) {
760 errno = EIDRM;
761 return (-1);
764 if (auxsemaptr != semaptr) {
765 errno = EIDRM;
766 goto done;
769 /* Check if another process didn't remove the semaphore. */
770 #ifdef SYSV_SEMS
771 if (try_rwlock_rdlock(semid, semaptr) == -1) {
772 #else
773 if (try_rwlock_wrlock(semid, semaptr) == -1) {
774 #endif
775 errno = EIDRM;
776 goto done;
778 sysv_print("semop: good morning (eval=%d)!\n", eval);
780 /* The semaphore is still alive. Readjust the count of
781 * waiting processes.
783 semptr = &semaptr->ds.sem_base[sopptr->sem_num];
784 #ifdef SYSV_SEMS
785 sysv_mutex_lock(&semptr->sem_mutex);
786 #endif
787 if (sopptr->sem_op == 0)
788 semptr->semzcnt--;
789 else
790 semptr->semncnt--;
791 #ifdef SYSV_SEMS
792 sysv_mutex_unlock(&semptr->sem_mutex);
793 #endif
796 * Is it really morning, or was our sleep interrupted?
797 * (Delayed check of tsleep() return code because we
798 * need to decrement sem[nz]cnt either way.)
800 * Always retry on EBUSY
802 if (eval == EAGAIN) {
803 eval = EINTR;
804 goto done;
807 sysv_print("semop: good morning!\n");
808 /* RETRY LOOP */
811 donex:
813 * Process any SEM_UNDO requests.
815 if (do_undos) {
816 for (i = 0; i < (int)nsops; i++) {
818 * We only need to deal with SEM_UNDO's for non-zero
819 * op's.
821 int adjval;
823 if ((sops[i].sem_flg & SEM_UNDO) == 0)
824 continue;
825 adjval = sops[i].sem_op;
826 if (adjval == 0)
827 continue;
828 eval = semundo_adjust(semid, sops[i].sem_num, -adjval);
829 if (eval == 0)
830 continue;
833 * Oh-Oh! We ran out of either sem_undo's or undo's.
834 * Rollback the adjustments to this point and then
835 * rollback the semaphore ups and down so we can return
836 * with an error with all structures restored. We
837 * rollback the undo's in the exact reverse order that
838 * we applied them. This guarantees that we won't run
839 * out of space as we roll things back out.
841 for (j = i - 1; j >= 0; j--) {
842 if ((sops[j].sem_flg & SEM_UNDO) == 0)
843 continue;
844 adjval = sops[j].sem_op;
845 if (adjval == 0)
846 continue;
847 if (semundo_adjust(semid, sops[j].sem_num,
848 adjval) != 0)
849 sysv_print("semop - can't undo undos");
852 for (j = 0; j < (int)nsops; j++) {
853 xsemptr = &semaptr->ds.sem_base[
854 sops[j].sem_num];
855 #ifdef SYSV_SEMS
856 sysv_mutex_lock(&semptr->sem_mutex);
857 #endif
858 xsemptr->semval -= sops[j].sem_op;
859 if (xsemptr->semval == 0 &&
860 xsemptr->semzcnt > 0)
861 umtx_wakeup((int *)&xsemptr->semval, 0);
862 if (xsemptr->semval <= 0 &&
863 xsemptr->semncnt > 0)
864 umtx_wakeup((int *)&xsemptr->semval, 0); //?!
865 #ifdef SYSV_SEMS
866 sysv_mutex_unlock(&semptr->sem_mutex);
867 #endif
870 sysv_print("eval = %d from semundo_adjust\n", eval);
871 goto done;
875 /* Set sempid field for each semaphore. */
876 for (i = 0; i < (int)nsops; i++) {
877 sopptr = &sops[i];
878 semptr = &semaptr->ds.sem_base[sopptr->sem_num];
879 #ifdef SYSV_SEMS
880 sysv_mutex_lock(&semptr->sem_mutex);
881 #endif
882 semptr->sempid = getpid();
883 #ifdef SYSV_SEMS
884 sysv_mutex_unlock(&semptr->sem_mutex);
885 #endif
888 sysv_print("semop: done\n");
889 semaptr->ds.sem_otime = time(NULL);
890 done:
891 rwlock_unlock(semid, semaptr);
892 done2:
893 put_shmdata(semid);
895 return (eval);