Import 2.3.12pre9
[davej-history.git] / ipc / sem.c
blob5a48748a2a8ec1dfb6c4f4979fc70b5413a97490
1 /*
2 * linux/ipc/sem.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
6 * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
7 * This code underwent a massive rewrite in order to solve some problems
8 * with the original code. In particular the original code failed to
9 * wake up processes that were waiting for semval to go to 0 if the
10 * value went to 0 and was then incremented rapidly enough. In solving
11 * this problem I have also modified the implementation so that it
12 * processes pending operations in a FIFO manner, thus give a guarantee
13 * that processes waiting for a lock on the semaphore won't starve
14 * unless another locking process fails to unlock.
15 * In addition the following two changes in behavior have been introduced:
16 * - The original implementation of semop returned the value
17 * last semaphore element examined on success. This does not
18 * match the manual page specifications, and effectively
19 * allows the user to read the semaphore even if they do not
20 * have read permissions. The implementation now returns 0
21 * on success as stated in the manual page.
22 * - There is some confusion over whether the set of undo adjustments
23 * to be performed at exit should be done in an atomic manner.
24 * That is, if we are attempting to decrement the semval should we queue
25 * up and wait until we can do so legally?
26 * The original implementation attempted to do this.
27 * The current implementation does not do so. This is because I don't
28 * think it is the right thing (TM) to do, and because I couldn't
29 * see a clean way to get the old behavior with the new design.
30 * The POSIX standard and SVID should be consulted to determine
31 * what behavior is mandated.
33 * Further notes on refinement (Christoph Rohland, December 1998):
34 * - The POSIX standard says, that the undo adjustments simply should
35 * redo. So the current implementation is o.K.
36 * - The previous code had two flaws:
37 * 1) It actively gave the semaphore to the next waiting process
38 * sleeping on the semaphore. Since this process did not have the
39 * cpu this led to many unnecessary context switches and bad
40 * performance. Now we only check which process should be able to
41 * get the semaphore and if this process wants to reduce some
42 * semaphore value we simply wake it up without doing the
43 * operation. So it has to try to get it later. Thus e.g. the
44 * running process may reaquire the semaphore during the current
45 * time slice. If it only waits for zero or increases the semaphore,
46 * we do the operation in advance and wake it up.
47 * 2) It did not wake up all zero waiting processes. We try to do
48 * better but only get the semops right which only wait for zero or
49 * increase. If there are decrement operations in the operations
50 * array we do the same as before.
52 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
55 #include <linux/config.h>
56 #include <linux/malloc.h>
57 #include <linux/smp_lock.h>
58 #include <linux/init.h>
59 #include <linux/proc_fs.h>
61 #include <asm/uaccess.h>
63 extern int ipcperms (struct ipc_perm *ipcp, short semflg);
64 static int newary (key_t, int, int);
65 static int findkey (key_t key);
66 static void freeary (int id);
67 #ifdef CONFIG_PROC_FS
68 static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
69 #endif
71 static struct semid_ds *semary[SEMMNI];
72 static int used_sems = 0, used_semids = 0;
73 static DECLARE_WAIT_QUEUE_HEAD(sem_lock);
74 static int max_semid = 0;
76 static unsigned short sem_seq = 0;
78 void __init sem_init (void)
80 int i;
81 #ifdef CONFIG_PROC_FS
82 struct proc_dir_entry *ent;
83 #endif
85 init_waitqueue_head(&sem_lock);
86 used_sems = used_semids = max_semid = sem_seq = 0;
87 for (i = 0; i < SEMMNI; i++)
88 semary[i] = (struct semid_ds *) IPC_UNUSED;
89 #ifdef CONFIG_PROC_FS
90 ent = create_proc_entry("sysvipc/sem", 0, 0);
91 ent->read_proc = sysvipc_sem_read_proc;
92 #endif
93 return;
96 static int findkey (key_t key)
98 int id;
99 struct semid_ds *sma;
101 for (id = 0; id <= max_semid; id++) {
102 while ((sma = semary[id]) == IPC_NOID)
103 interruptible_sleep_on (&sem_lock);
104 if (sma == IPC_UNUSED)
105 continue;
106 if (key == sma->sem_perm.key)
107 return id;
109 return -1;
112 static int newary (key_t key, int nsems, int semflg)
114 int id;
115 struct semid_ds *sma;
116 struct ipc_perm *ipcp;
117 int size;
119 if (!nsems)
120 return -EINVAL;
121 if (used_sems + nsems > SEMMNS)
122 return -ENOSPC;
123 for (id = 0; id < SEMMNI; id++)
124 if (semary[id] == IPC_UNUSED) {
125 semary[id] = (struct semid_ds *) IPC_NOID;
126 goto found;
128 return -ENOSPC;
129 found:
130 size = sizeof (*sma) + nsems * sizeof (struct sem);
131 used_sems += nsems;
132 sma = (struct semid_ds *) kmalloc (size, GFP_KERNEL);
133 if (!sma) {
134 semary[id] = (struct semid_ds *) IPC_UNUSED;
135 used_sems -= nsems;
136 wake_up (&sem_lock);
137 return -ENOMEM;
139 memset (sma, 0, size);
140 sma->sem_base = (struct sem *) &sma[1];
141 ipcp = &sma->sem_perm;
142 ipcp->mode = (semflg & S_IRWXUGO);
143 ipcp->key = key;
144 ipcp->cuid = ipcp->uid = current->euid;
145 ipcp->gid = ipcp->cgid = current->egid;
146 sma->sem_perm.seq = sem_seq;
147 /* sma->sem_pending = NULL; */
148 sma->sem_pending_last = &sma->sem_pending;
149 /* sma->undo = NULL; */
150 sma->sem_nsems = nsems;
151 sma->sem_ctime = CURRENT_TIME;
152 if (id > max_semid)
153 max_semid = id;
154 used_semids++;
155 semary[id] = sma;
156 wake_up (&sem_lock);
157 return (unsigned int) sma->sem_perm.seq * SEMMNI + id;
160 asmlinkage int sys_semget (key_t key, int nsems, int semflg)
162 int id, err = -EINVAL;
163 struct semid_ds *sma;
165 lock_kernel();
166 if (nsems < 0 || nsems > SEMMSL)
167 goto out;
168 if (key == IPC_PRIVATE) {
169 err = newary(key, nsems, semflg);
170 } else if ((id = findkey (key)) == -1) { /* key not used */
171 if (!(semflg & IPC_CREAT))
172 err = -ENOENT;
173 else
174 err = newary(key, nsems, semflg);
175 } else if (semflg & IPC_CREAT && semflg & IPC_EXCL) {
176 err = -EEXIST;
177 } else {
178 sma = semary[id];
179 if (nsems > sma->sem_nsems)
180 err = -EINVAL;
181 else if (ipcperms(&sma->sem_perm, semflg))
182 err = -EACCES;
183 else
184 err = (int) sma->sem_perm.seq * SEMMNI + id;
186 out:
187 unlock_kernel();
188 return err;
191 /* Manage the doubly linked list sma->sem_pending as a FIFO:
192 * insert new queue elements at the tail sma->sem_pending_last.
194 static inline void append_to_queue (struct semid_ds * sma,
195 struct sem_queue * q)
197 *(q->prev = sma->sem_pending_last) = q;
198 *(sma->sem_pending_last = &q->next) = NULL;
201 static inline void prepend_to_queue (struct semid_ds * sma,
202 struct sem_queue * q)
204 q->next = sma->sem_pending;
205 *(q->prev = &sma->sem_pending) = q;
206 if (q->next)
207 q->next->prev = &q->next;
208 else /* sma->sem_pending_last == &sma->sem_pending */
209 sma->sem_pending_last = &q->next;
212 static inline void remove_from_queue (struct semid_ds * sma,
213 struct sem_queue * q)
215 *(q->prev) = q->next;
216 if (q->next)
217 q->next->prev = q->prev;
218 else /* sma->sem_pending_last == &q->next */
219 sma->sem_pending_last = q->prev;
220 q->prev = NULL; /* mark as removed */
224 * Determine whether a sequence of semaphore operations would succeed
225 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
228 static int try_atomic_semop (struct semid_ds * sma, struct sembuf * sops,
229 int nsops, struct sem_undo *un, int pid,
230 int do_undo)
232 int result, sem_op;
233 struct sembuf *sop;
234 struct sem * curr;
236 for (sop = sops; sop < sops + nsops; sop++) {
237 curr = sma->sem_base + sop->sem_num;
238 sem_op = sop->sem_op;
240 if (!sem_op && curr->semval)
241 goto would_block;
243 curr->sempid = (curr->sempid << 16) | pid;
244 curr->semval += sem_op;
245 if (sop->sem_flg & SEM_UNDO)
246 un->semadj[sop->sem_num] -= sem_op;
248 if (curr->semval < 0)
249 goto would_block;
250 if (curr->semval > SEMVMX)
251 goto out_of_range;
254 if (do_undo)
256 sop--;
257 result = 0;
258 goto undo;
261 sma->sem_otime = CURRENT_TIME;
262 return 0;
264 out_of_range:
265 result = -ERANGE;
266 goto undo;
268 would_block:
269 if (sop->sem_flg & IPC_NOWAIT)
270 result = -EAGAIN;
271 else
272 result = 1;
274 undo:
275 while (sop >= sops) {
276 curr = sma->sem_base + sop->sem_num;
277 curr->semval -= sop->sem_op;
278 curr->sempid >>= 16;
280 if (sop->sem_flg & SEM_UNDO)
281 un->semadj[sop->sem_num] += sop->sem_op;
282 sop--;
285 return result;
288 /* Go through the pending queue for the indicated semaphore
289 * looking for tasks that can be completed.
291 static void update_queue (struct semid_ds * sma)
293 int error;
294 struct sem_queue * q;
296 for (q = sma->sem_pending; q; q = q->next) {
298 if (q->status == 1)
299 return; /* wait for other process */
301 error = try_atomic_semop(sma, q->sops, q->nsops,
302 q->undo, q->pid, q->alter);
304 /* Does q->sleeper still need to sleep? */
305 if (error <= 0) {
306 /* Found one, wake it up */
307 wake_up_interruptible(&q->sleeper);
308 if (error == 0 && q->alter) {
309 /* if q-> alter let it self try */
310 q->status = 1;
311 return;
313 q->status = error;
314 remove_from_queue(sma,q);
319 /* The following counts are associated to each semaphore:
320 * semncnt number of tasks waiting on semval being nonzero
321 * semzcnt number of tasks waiting on semval being zero
322 * This model assumes that a task waits on exactly one semaphore.
323 * Since semaphore operations are to be performed atomically, tasks actually
324 * wait on a whole sequence of semaphores simultaneously.
325 * The counts we return here are a rough approximation, but still
326 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
328 static int count_semncnt (struct semid_ds * sma, ushort semnum)
330 int semncnt;
331 struct sem_queue * q;
333 semncnt = 0;
334 for (q = sma->sem_pending; q; q = q->next) {
335 struct sembuf * sops = q->sops;
336 int nsops = q->nsops;
337 int i;
338 for (i = 0; i < nsops; i++)
339 if (sops[i].sem_num == semnum
340 && (sops[i].sem_op < 0)
341 && !(sops[i].sem_flg & IPC_NOWAIT))
342 semncnt++;
344 return semncnt;
346 static int count_semzcnt (struct semid_ds * sma, ushort semnum)
348 int semzcnt;
349 struct sem_queue * q;
351 semzcnt = 0;
352 for (q = sma->sem_pending; q; q = q->next) {
353 struct sembuf * sops = q->sops;
354 int nsops = q->nsops;
355 int i;
356 for (i = 0; i < nsops; i++)
357 if (sops[i].sem_num == semnum
358 && (sops[i].sem_op == 0)
359 && !(sops[i].sem_flg & IPC_NOWAIT))
360 semzcnt++;
362 return semzcnt;
365 /* Free a semaphore set. */
366 static void freeary (int id)
368 struct semid_ds *sma = semary[id];
369 struct sem_undo *un;
370 struct sem_queue *q;
372 /* Invalidate this semaphore set */
373 sma->sem_perm.seq++;
374 sem_seq = (sem_seq+1) % ((unsigned)(1<<31)/SEMMNI); /* increment, but avoid overflow */
375 used_sems -= sma->sem_nsems;
376 if (id == max_semid)
377 while (max_semid && (semary[--max_semid] == IPC_UNUSED));
378 semary[id] = (struct semid_ds *) IPC_UNUSED;
379 used_semids--;
381 /* Invalidate the existing undo structures for this semaphore set.
382 * (They will be freed without any further action in sem_exit().)
384 for (un = sma->undo; un; un = un->id_next)
385 un->semid = -1;
387 /* Wake up all pending processes and let them fail with EIDRM. */
388 for (q = sma->sem_pending; q; q = q->next) {
389 q->status = -EIDRM;
390 q->prev = NULL;
391 wake_up_interruptible(&q->sleeper); /* doesn't sleep! */
394 kfree(sma);
397 asmlinkage int sys_semctl (int semid, int semnum, int cmd, union semun arg)
399 struct semid_ds *buf = NULL;
400 struct semid_ds tbuf;
401 int i, id, val = 0;
402 struct semid_ds *sma;
403 struct ipc_perm *ipcp;
404 struct sem *curr = NULL;
405 struct sem_undo *un;
406 unsigned int nsems;
407 ushort *array = NULL;
408 ushort sem_io[SEMMSL];
409 int err = -EINVAL;
411 lock_kernel();
412 if (semid < 0 || semnum < 0 || cmd < 0)
413 goto out;
415 switch (cmd) {
416 case IPC_INFO:
417 case SEM_INFO:
419 struct seminfo seminfo, *tmp = arg.__buf;
420 seminfo.semmni = SEMMNI;
421 seminfo.semmns = SEMMNS;
422 seminfo.semmsl = SEMMSL;
423 seminfo.semopm = SEMOPM;
424 seminfo.semvmx = SEMVMX;
425 seminfo.semmnu = SEMMNU;
426 seminfo.semmap = SEMMAP;
427 seminfo.semume = SEMUME;
428 seminfo.semusz = SEMUSZ;
429 seminfo.semaem = SEMAEM;
430 if (cmd == SEM_INFO) {
431 seminfo.semusz = used_semids;
432 seminfo.semaem = used_sems;
434 err = -EFAULT;
435 if (copy_to_user (tmp, &seminfo, sizeof(struct seminfo)))
436 goto out;
437 err = max_semid;
438 goto out;
441 case SEM_STAT:
442 buf = arg.buf;
443 err = -EINVAL;
444 if (semid > max_semid)
445 goto out;
446 sma = semary[semid];
447 if (sma == IPC_UNUSED || sma == IPC_NOID)
448 goto out;
449 err = -EACCES;
450 if (ipcperms (&sma->sem_perm, S_IRUGO))
451 goto out;
452 id = (unsigned int) sma->sem_perm.seq * SEMMNI + semid;
453 tbuf.sem_perm = sma->sem_perm;
454 tbuf.sem_otime = sma->sem_otime;
455 tbuf.sem_ctime = sma->sem_ctime;
456 tbuf.sem_nsems = sma->sem_nsems;
457 err = -EFAULT;
458 if (copy_to_user (buf, &tbuf, sizeof(*buf)) == 0)
459 err = id;
460 goto out;
463 id = (unsigned int) semid % SEMMNI;
464 sma = semary [id];
465 err = -EINVAL;
466 if (sma == IPC_UNUSED || sma == IPC_NOID)
467 goto out;
468 ipcp = &sma->sem_perm;
469 nsems = sma->sem_nsems;
470 err = -EIDRM;
471 if (sma->sem_perm.seq != (unsigned int) semid / SEMMNI)
472 goto out;
474 switch (cmd) {
475 case GETVAL:
476 case GETPID:
477 case GETNCNT:
478 case GETZCNT:
479 case SETVAL:
480 err = -EINVAL;
481 if (semnum >= nsems)
482 goto out;
483 curr = &sma->sem_base[semnum];
484 break;
487 switch (cmd) {
488 case GETVAL:
489 case GETPID:
490 case GETNCNT:
491 case GETZCNT:
492 case GETALL:
493 err = -EACCES;
494 if (ipcperms (ipcp, S_IRUGO))
495 goto out;
496 switch (cmd) {
497 case GETVAL : err = curr->semval; goto out;
498 case GETPID : err = curr->sempid & 0xffff; goto out;
499 case GETNCNT: err = count_semncnt(sma,semnum); goto out;
500 case GETZCNT: err = count_semzcnt(sma,semnum); goto out;
501 case GETALL:
502 array = arg.array;
503 break;
505 break;
506 case SETVAL:
507 val = arg.val;
508 err = -ERANGE;
509 if (val > SEMVMX || val < 0)
510 goto out;
511 break;
512 case IPC_RMID:
513 if (current->euid == ipcp->cuid ||
514 current->euid == ipcp->uid || capable(CAP_SYS_ADMIN)) {
515 freeary (id);
516 err = 0;
517 goto out;
519 err = -EPERM;
520 goto out;
521 case SETALL: /* arg is a pointer to an array of ushort */
522 array = arg.array;
523 err = -EFAULT;
524 if (copy_from_user (sem_io, array, nsems*sizeof(ushort)))
525 goto out;
526 err = 0;
527 for (i = 0; i < nsems; i++)
528 if (sem_io[i] > SEMVMX) {
529 err = -ERANGE;
530 goto out;
532 break;
533 case IPC_STAT:
534 buf = arg.buf;
535 break;
536 case IPC_SET:
537 buf = arg.buf;
538 err = copy_from_user (&tbuf, buf, sizeof (*buf));
539 if (err)
540 err = -EFAULT;
541 break;
544 err = -EIDRM;
545 if (semary[id] == IPC_UNUSED || semary[id] == IPC_NOID)
546 goto out;
547 if (sma->sem_perm.seq != (unsigned int) semid / SEMMNI)
548 goto out;
550 switch (cmd) {
551 case GETALL:
552 err = -EACCES;
553 if (ipcperms (ipcp, S_IRUGO))
554 goto out;
555 for (i = 0; i < sma->sem_nsems; i++)
556 sem_io[i] = sma->sem_base[i].semval;
557 if (copy_to_user (array, sem_io, nsems*sizeof(ushort)))
558 err = -EFAULT;
559 break;
560 case SETVAL:
561 err = -EACCES;
562 if (ipcperms (ipcp, S_IWUGO))
563 goto out;
564 for (un = sma->undo; un; un = un->id_next)
565 un->semadj[semnum] = 0;
566 curr->semval = val;
567 sma->sem_ctime = CURRENT_TIME;
568 /* maybe some queued-up processes were waiting for this */
569 update_queue(sma);
570 break;
571 case IPC_SET:
572 if (current->euid == ipcp->cuid ||
573 current->euid == ipcp->uid || capable(CAP_SYS_ADMIN)) {
574 ipcp->uid = tbuf.sem_perm.uid;
575 ipcp->gid = tbuf.sem_perm.gid;
576 ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
577 | (tbuf.sem_perm.mode & S_IRWXUGO);
578 sma->sem_ctime = CURRENT_TIME;
579 err = 0;
580 goto out;
582 err = -EPERM;
583 goto out;
584 case IPC_STAT:
585 err = -EACCES;
586 if (ipcperms (ipcp, S_IRUGO))
587 goto out;
588 tbuf.sem_perm = sma->sem_perm;
589 tbuf.sem_otime = sma->sem_otime;
590 tbuf.sem_ctime = sma->sem_ctime;
591 tbuf.sem_nsems = sma->sem_nsems;
592 if (copy_to_user (buf, &tbuf, sizeof(*buf)))
593 err = -EFAULT;
594 break;
595 case SETALL:
596 err = -EACCES;
597 if (ipcperms (ipcp, S_IWUGO))
598 goto out;
599 for (i = 0; i < nsems; i++)
600 sma->sem_base[i].semval = sem_io[i];
601 for (un = sma->undo; un; un = un->id_next)
602 for (i = 0; i < nsems; i++)
603 un->semadj[i] = 0;
604 sma->sem_ctime = CURRENT_TIME;
605 /* maybe some queued-up processes were waiting for this */
606 update_queue(sma);
607 break;
608 default:
609 err = -EINVAL;
610 goto out;
612 err = 0;
613 out:
614 unlock_kernel();
615 return err;
618 asmlinkage int sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
620 int id, size, error = -EINVAL;
621 struct semid_ds *sma;
622 struct sembuf sops[SEMOPM], *sop;
623 struct sem_undo *un;
624 int undos = 0, decrease = 0, alter = 0;
625 struct sem_queue queue;
627 lock_kernel();
628 if (nsops < 1 || semid < 0)
629 goto out;
630 error = -E2BIG;
631 if (nsops > SEMOPM)
632 goto out;
633 error = -EFAULT;
634 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops)))
635 goto out;
636 id = (unsigned int) semid % SEMMNI;
637 error = -EINVAL;
638 if ((sma = semary[id]) == IPC_UNUSED || sma == IPC_NOID)
639 goto out;
640 error = -EIDRM;
641 if (sma->sem_perm.seq != (unsigned int) semid / SEMMNI)
642 goto out;
644 error = -EFBIG;
645 for (sop = sops; sop < sops + nsops; sop++) {
646 if (sop->sem_num >= sma->sem_nsems)
647 goto out;
648 if (sop->sem_flg & SEM_UNDO)
649 undos++;
650 if (sop->sem_op < 0)
651 decrease = 1;
652 if (sop->sem_op > 0)
653 alter = 1;
655 alter |= decrease;
657 error = -EACCES;
658 if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
659 goto out;
660 if (undos) {
661 /* Make sure we have an undo structure
662 * for this process and this semaphore set.
664 for (un = current->semundo; un; un = un->proc_next)
665 if (un->semid == semid)
666 break;
667 if (!un) {
668 size = sizeof(struct sem_undo) + sizeof(short)*sma->sem_nsems;
669 un = (struct sem_undo *) kmalloc(size, GFP_ATOMIC);
670 if (!un) {
671 error = -ENOMEM;
672 goto out;
674 memset(un, 0, size);
675 un->semadj = (short *) &un[1];
676 un->semid = semid;
677 un->proc_next = current->semundo;
678 current->semundo = un;
679 un->id_next = sma->undo;
680 sma->undo = un;
682 } else
683 un = NULL;
685 error = try_atomic_semop (sma, sops, nsops, un, current->pid, 0);
686 if (error <= 0)
687 goto update;
689 /* We need to sleep on this operation, so we put the current
690 * task into the pending queue and go to sleep.
693 queue.sma = sma;
694 queue.sops = sops;
695 queue.nsops = nsops;
696 queue.undo = un;
697 queue.pid = current->pid;
698 queue.alter = decrease;
699 current->semsleeping = &queue;
700 if (alter)
701 append_to_queue(sma ,&queue);
702 else
703 prepend_to_queue(sma ,&queue);
705 for (;;) {
706 queue.status = -EINTR;
707 init_waitqueue_head(&queue.sleeper);
708 interruptible_sleep_on(&queue.sleeper);
711 * If queue.status == 1 we where woken up and
712 * have to retry else we simply return.
713 * If an interrupt occurred we have to clean up the
714 * queue
717 if (queue.status == 1)
719 error = try_atomic_semop (sma, sops, nsops, un,
720 current->pid,0);
721 if (error <= 0)
722 break;
723 } else {
724 error = queue.status;;
725 if (queue.prev) /* got Interrupt */
726 break;
727 /* Everything done by update_queue */
728 current->semsleeping = NULL;
729 goto out;
732 current->semsleeping = NULL;
733 remove_from_queue(sma,&queue);
734 update:
735 if (alter)
736 update_queue (sma);
737 out:
738 unlock_kernel();
739 return error;
743 * add semadj values to semaphores, free undo structures.
744 * undo structures are not freed when semaphore arrays are destroyed
745 * so some of them may be out of date.
746 * IMPLEMENTATION NOTE: There is some confusion over whether the
747 * set of adjustments that needs to be done should be done in an atomic
748 * manner or not. That is, if we are attempting to decrement the semval
749 * should we queue up and wait until we can do so legally?
750 * The original implementation attempted to do this (queue and wait).
751 * The current implementation does not do so. The POSIX standard
752 * and SVID should be consulted to determine what behavior is mandated.
754 void sem_exit (void)
756 struct sem_queue *q;
757 struct sem_undo *u, *un = NULL, **up, **unp;
758 struct semid_ds *sma;
759 int nsems, i;
761 /* If the current process was sleeping for a semaphore,
762 * remove it from the queue.
764 if ((q = current->semsleeping)) {
765 if (q->prev)
766 remove_from_queue(q->sma,q);
767 current->semsleeping = NULL;
770 for (up = &current->semundo; (u = *up); *up = u->proc_next, kfree(u)) {
771 if (u->semid == -1)
772 continue;
773 sma = semary[(unsigned int) u->semid % SEMMNI];
774 if (sma == IPC_UNUSED || sma == IPC_NOID)
775 continue;
776 if (sma->sem_perm.seq != (unsigned int) u->semid / SEMMNI)
777 continue;
778 /* remove u from the sma->undo list */
779 for (unp = &sma->undo; (un = *unp); unp = &un->id_next) {
780 if (u == un)
781 goto found;
783 printk ("sem_exit undo list error id=%d\n", u->semid);
784 break;
785 found:
786 *unp = un->id_next;
787 /* perform adjustments registered in u */
788 nsems = sma->sem_nsems;
789 for (i = 0; i < nsems; i++) {
790 struct sem * sem = &sma->sem_base[i];
791 sem->semval += u->semadj[i];
792 if (sem->semval < 0)
793 sem->semval = 0; /* shouldn't happen */
794 sem->sempid = current->pid;
796 sma->sem_otime = CURRENT_TIME;
797 /* maybe some queued-up processes were waiting for this */
798 update_queue(sma);
800 current->semundo = NULL;
803 #ifdef CONFIG_PROC_FS
804 static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
806 off_t pos = 0;
807 off_t begin = 0;
808 int i, len = 0;
810 len += sprintf(buffer, " key semid perms nsems uid gid cuid cgid otime ctime\n");
812 for(i = 0; i < SEMMNI; i++)
813 if(semary[i] != IPC_UNUSED) {
814 len += sprintf(buffer + len, "%10d %10d %4o %5u %5u %5u %5u %5u %10lu %10lu\n",
815 semary[i]->sem_perm.key,
816 semary[i]->sem_perm.seq * SEMMNI + i,
817 semary[i]->sem_perm.mode,
818 semary[i]->sem_nsems,
819 semary[i]->sem_perm.uid,
820 semary[i]->sem_perm.gid,
821 semary[i]->sem_perm.cuid,
822 semary[i]->sem_perm.cgid,
823 semary[i]->sem_otime,
824 semary[i]->sem_ctime);
826 pos += len;
827 if(pos < offset) {
828 len = 0;
829 begin = pos;
831 if(pos > offset + length)
832 goto done;
834 *eof = 1;
835 done:
836 *start = buffer + (offset - begin);
837 len -= (offset - begin);
838 if(len > length)
839 len = length;
840 if(len < 0)
841 len = 0;
842 return len;
844 #endif