3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
6 * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
7 * This code underwent a massive rewrite in order to solve some problems
8 * with the original code. In particular the original code failed to
9 * wake up processes that were waiting for semval to go to 0 if the
10 * value went to 0 and was then incremented rapidly enough. In solving
11 * this problem I have also modified the implementation so that it
12 * processes pending operations in a FIFO manner, thus give a guarantee
13 * that processes waiting for a lock on the semaphore won't starve
14 * unless another locking process fails to unlock.
15 * In addition the following two changes in behavior have been introduced:
16 * - The original implementation of semop returned the value
17 * last semaphore element examined on success. This does not
18 * match the manual page specifications, and effectively
19 * allows the user to read the semaphore even if they do not
20 * have read permissions. The implementation now returns 0
21 * on success as stated in the manual page.
22 * - There is some confusion over whether the set of undo adjustments
23 * to be performed at exit should be done in an atomic manner.
24 * That is, if we are attempting to decrement the semval should we queue
25 * up and wait until we can do so legally?
26 * The original implementation attempted to do this.
27 * The current implementation does not do so. This is because I don't
28 * think it is the right thing (TM) to do, and because I couldn't
29 * see a clean way to get the old behavior with the new design.
30 * The POSIX standard and SVID should be consulted to determine
31 * what behavior is mandated.
33 * Further notes on refinement (Christoph Rohland, December 1998):
34 * - The POSIX standard says, that the undo adjustments simply should
35 * redo. So the current implementation is o.K.
36 * - The previous code had two flaws:
37 * 1) It actively gave the semaphore to the next waiting process
38 * sleeping on the semaphore. Since this process did not have the
39 * cpu this led to many unnecessary context switches and bad
40 * performance. Now we only check which process should be able to
41 * get the semaphore and if this process wants to reduce some
42 * semaphore value we simply wake it up without doing the
43 * operation. So it has to try to get it later. Thus e.g. the
44 * running process may reaquire the semaphore during the current
45 * time slice. If it only waits for zero or increases the semaphore,
46 * we do the operation in advance and wake it up.
47 * 2) It did not wake up all zero waiting processes. We try to do
48 * better but only get the semops right which only wait for zero or
49 * increase. If there are decrement operations in the operations
50 * array we do the same as before.
52 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
54 * SMP-threaded, sysctl's added
55 * (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
58 #include <linux/config.h>
59 #include <linux/malloc.h>
60 #include <linux/spinlock.h>
61 #include <linux/init.h>
62 #include <linux/proc_fs.h>
63 #include <asm/uaccess.h>
67 #define sem_lock(id) ((struct sem_array*)ipc_lock(&sem_ids,id))
68 #define sem_unlock(id) ipc_unlock(&sem_ids,id)
69 #define sem_rmid(id) ((struct sem_array*)ipc_rmid(&sem_ids,id))
70 #define sem_checkid(sma, semid) \
71 ipc_checkid(&sem_ids,&sma->sem_perm,semid)
72 #define sem_buildid(id, seq) \
73 ipc_buildid(&sem_ids, id, seq)
74 static struct ipc_ids sem_ids
;
76 static int newary (key_t
, int, int);
77 static void freeary (int id
);
79 static int sysvipc_sem_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
);
82 #define SEMMSL_FAST 256 /* 512 bytes on stack */
83 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
86 * linked list protection:
88 * sem_array.sem_pending{,last},
89 * sem_array.sem_undo: sem_lock() for read/write
90 * sem_undo.proc_next: only "current" is allowed to read/write that field.
94 int sem_ctls
[4] = {SEMMSL
, SEMMNS
, SEMOPM
, SEMMNI
};
95 #define sc_semmsl (sem_ctls[0])
96 #define sc_semmns (sem_ctls[1])
97 #define sc_semopm (sem_ctls[2])
98 #define sc_semmni (sem_ctls[3])
100 static int used_sems
;
102 void __init
sem_init (void)
105 ipc_init_ids(&sem_ids
,sc_semmni
);
107 #ifdef CONFIG_PROC_FS
108 create_proc_read_entry("sysvipc/sem", 0, 0, sysvipc_sem_read_proc
, NULL
);
112 static int newary (key_t key
, int nsems
, int semflg
)
115 struct sem_array
*sma
;
120 if (used_sems
+ nsems
> sc_semmns
)
123 size
= sizeof (*sma
) + nsems
* sizeof (struct sem
);
124 sma
= (struct sem_array
*) ipc_alloc(size
);
128 memset (sma
, 0, size
);
129 id
= ipc_addid(&sem_ids
, &sma
->sem_perm
, sc_semmni
);
136 sma
->sem_perm
.mode
= (semflg
& S_IRWXUGO
);
137 sma
->sem_perm
.key
= key
;
139 sma
->sem_base
= (struct sem
*) &sma
[1];
140 /* sma->sem_pending = NULL; */
141 sma
->sem_pending_last
= &sma
->sem_pending
;
142 /* sma->undo = NULL; */
143 sma
->sem_nsems
= nsems
;
144 sma
->sem_ctime
= CURRENT_TIME
;
147 return sem_buildid(id
, sma
->sem_perm
.seq
);
150 asmlinkage
long sys_semget (key_t key
, int nsems
, int semflg
)
152 int id
, err
= -EINVAL
;
153 struct sem_array
*sma
;
155 if (nsems
< 0 || nsems
> sc_semmsl
)
159 if (key
== IPC_PRIVATE
) {
160 err
= newary(key
, nsems
, semflg
);
161 } else if ((id
= ipc_findkey(&sem_ids
, key
)) == -1) { /* key not used */
162 if (!(semflg
& IPC_CREAT
))
165 err
= newary(key
, nsems
, semflg
);
166 } else if (semflg
& IPC_CREAT
&& semflg
& IPC_EXCL
) {
172 if (nsems
> sma
->sem_nsems
)
174 else if (ipcperms(&sma
->sem_perm
, semflg
))
177 err
= sem_buildid(id
, sma
->sem_perm
.seq
);
185 /* doesn't acquire the sem_lock on error! */
186 static int sem_revalidate(int semid
, struct sem_array
* sma
, int nsems
, short flg
)
188 struct sem_array
* smanew
;
190 smanew
= sem_lock(semid
);
193 if(smanew
!= sma
|| sem_checkid(sma
,semid
) || sma
->sem_nsems
!= nsems
) {
198 if (ipcperms(&sma
->sem_perm
, flg
)) {
204 /* Manage the doubly linked list sma->sem_pending as a FIFO:
205 * insert new queue elements at the tail sma->sem_pending_last.
207 static inline void append_to_queue (struct sem_array
* sma
,
208 struct sem_queue
* q
)
210 *(q
->prev
= sma
->sem_pending_last
) = q
;
211 *(sma
->sem_pending_last
= &q
->next
) = NULL
;
214 static inline void prepend_to_queue (struct sem_array
* sma
,
215 struct sem_queue
* q
)
217 q
->next
= sma
->sem_pending
;
218 *(q
->prev
= &sma
->sem_pending
) = q
;
220 q
->next
->prev
= &q
->next
;
221 else /* sma->sem_pending_last == &sma->sem_pending */
222 sma
->sem_pending_last
= &q
->next
;
225 static inline void remove_from_queue (struct sem_array
* sma
,
226 struct sem_queue
* q
)
228 *(q
->prev
) = q
->next
;
230 q
->next
->prev
= q
->prev
;
231 else /* sma->sem_pending_last == &q->next */
232 sma
->sem_pending_last
= q
->prev
;
233 q
->prev
= NULL
; /* mark as removed */
237 * Determine whether a sequence of semaphore operations would succeed
238 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
241 static int try_atomic_semop (struct sem_array
* sma
, struct sembuf
* sops
,
242 int nsops
, struct sem_undo
*un
, int pid
,
249 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
250 curr
= sma
->sem_base
+ sop
->sem_num
;
251 sem_op
= sop
->sem_op
;
253 if (!sem_op
&& curr
->semval
)
256 curr
->sempid
= (curr
->sempid
<< 16) | pid
;
257 curr
->semval
+= sem_op
;
258 if (sop
->sem_flg
& SEM_UNDO
)
259 un
->semadj
[sop
->sem_num
] -= sem_op
;
261 if (curr
->semval
< 0)
263 if (curr
->semval
> SEMVMX
)
274 sma
->sem_otime
= CURRENT_TIME
;
282 if (sop
->sem_flg
& IPC_NOWAIT
)
288 while (sop
>= sops
) {
289 curr
= sma
->sem_base
+ sop
->sem_num
;
290 curr
->semval
-= sop
->sem_op
;
293 if (sop
->sem_flg
& SEM_UNDO
)
294 un
->semadj
[sop
->sem_num
] += sop
->sem_op
;
301 /* Go through the pending queue for the indicated semaphore
302 * looking for tasks that can be completed.
304 static void update_queue (struct sem_array
* sma
)
307 struct sem_queue
* q
;
309 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
312 continue; /* this one was woken up before */
314 error
= try_atomic_semop(sma
, q
->sops
, q
->nsops
,
315 q
->undo
, q
->pid
, q
->alter
);
317 /* Does q->sleeper still need to sleep? */
319 /* Found one, wake it up */
320 wake_up_process(q
->sleeper
);
321 if (error
== 0 && q
->alter
) {
322 /* if q-> alter let it self try */
327 remove_from_queue(sma
,q
);
332 /* The following counts are associated to each semaphore:
333 * semncnt number of tasks waiting on semval being nonzero
334 * semzcnt number of tasks waiting on semval being zero
335 * This model assumes that a task waits on exactly one semaphore.
336 * Since semaphore operations are to be performed atomically, tasks actually
337 * wait on a whole sequence of semaphores simultaneously.
338 * The counts we return here are a rough approximation, but still
339 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
341 static int count_semncnt (struct sem_array
* sma
, ushort semnum
)
344 struct sem_queue
* q
;
347 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
348 struct sembuf
* sops
= q
->sops
;
349 int nsops
= q
->nsops
;
351 for (i
= 0; i
< nsops
; i
++)
352 if (sops
[i
].sem_num
== semnum
353 && (sops
[i
].sem_op
< 0)
354 && !(sops
[i
].sem_flg
& IPC_NOWAIT
))
359 static int count_semzcnt (struct sem_array
* sma
, ushort semnum
)
362 struct sem_queue
* q
;
365 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
366 struct sembuf
* sops
= q
->sops
;
367 int nsops
= q
->nsops
;
369 for (i
= 0; i
< nsops
; i
++)
370 if (sops
[i
].sem_num
== semnum
371 && (sops
[i
].sem_op
== 0)
372 && !(sops
[i
].sem_flg
& IPC_NOWAIT
))
378 /* Free a semaphore set. */
379 static void freeary (int id
)
381 struct sem_array
*sma
;
388 /* Invalidate the existing undo structures for this semaphore set.
389 * (They will be freed without any further action in sem_exit()
390 * or during the next semop.)
392 for (un
= sma
->undo
; un
; un
= un
->id_next
)
395 /* Wake up all pending processes and let them fail with EIDRM. */
396 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
399 wake_up_process(q
->sleeper
); /* doesn't sleep */
403 used_sems
-= sma
->sem_nsems
;
404 size
= sizeof (*sma
) + sma
->sem_nsems
* sizeof (struct sem
);
408 static unsigned long copy_semid_to_user(void *buf
, struct semid64_ds
*in
, int version
)
412 return copy_to_user(buf
, in
, sizeof(*in
));
417 ipc64_perm_to_ipc_perm(&in
->sem_perm
, &out
.sem_perm
);
419 out
.sem_otime
= in
->sem_otime
;
420 out
.sem_ctime
= in
->sem_ctime
;
421 out
.sem_nsems
= in
->sem_nsems
;
423 return copy_to_user(buf
, &out
, sizeof(out
));
430 int semctl_nolock(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
438 struct seminfo seminfo
;
441 memset(&seminfo
,0,sizeof(seminfo
));
442 seminfo
.semmni
= sc_semmni
;
443 seminfo
.semmns
= sc_semmns
;
444 seminfo
.semmsl
= sc_semmsl
;
445 seminfo
.semopm
= sc_semopm
;
446 seminfo
.semvmx
= SEMVMX
;
447 seminfo
.semmnu
= SEMMNU
;
448 seminfo
.semmap
= SEMMAP
;
449 seminfo
.semume
= SEMUME
;
451 if (cmd
== SEM_INFO
) {
452 seminfo
.semusz
= sem_ids
.in_use
;
453 seminfo
.semaem
= used_sems
;
455 seminfo
.semusz
= SEMUSZ
;
456 seminfo
.semaem
= SEMAEM
;
458 max_id
= sem_ids
.max_id
;
460 if (copy_to_user (arg
.__buf
, &seminfo
, sizeof(struct seminfo
)))
462 return (max_id
< 0) ? 0: max_id
;
466 struct sem_array
*sma
;
467 struct semid64_ds tbuf
;
470 if(semid
> sem_ids
.size
)
473 memset(&tbuf
,0,sizeof(tbuf
));
475 sma
= sem_lock(semid
);
480 if (ipcperms (&sma
->sem_perm
, S_IRUGO
))
482 id
= sem_buildid(semid
, sma
->sem_perm
.seq
);
484 kernel_to_ipc64_perm(&sma
->sem_perm
, &tbuf
.sem_perm
);
485 tbuf
.sem_otime
= sma
->sem_otime
;
486 tbuf
.sem_ctime
= sma
->sem_ctime
;
487 tbuf
.sem_nsems
= sma
->sem_nsems
;
489 if (copy_semid_to_user (arg
.buf
, &tbuf
, version
))
502 int semctl_main(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
504 struct sem_array
*sma
;
507 ushort fast_sem_io
[SEMMSL_FAST
];
508 ushort
* sem_io
= fast_sem_io
;
511 sma
= sem_lock(semid
);
515 nsems
= sma
->sem_nsems
;
518 if (sem_checkid(sma
,semid
))
522 if (ipcperms (&sma
->sem_perm
, (cmd
==SETVAL
||cmd
==SETALL
)?S_IWUGO
:S_IRUGO
))
528 ushort
*array
= arg
.array
;
531 if(nsems
> SEMMSL_FAST
) {
533 sem_io
= ipc_alloc(sizeof(ushort
)*nsems
);
536 err
= sem_revalidate(semid
, sma
, nsems
, S_IRUGO
);
541 for (i
= 0; i
< sma
->sem_nsems
; i
++)
542 sem_io
[i
] = sma
->sem_base
[i
].semval
;
545 if(copy_to_user(array
, sem_io
, nsems
*sizeof(ushort
)))
556 if(nsems
> SEMMSL_FAST
) {
557 sem_io
= ipc_alloc(sizeof(ushort
)*nsems
);
562 if (copy_from_user (sem_io
, arg
.array
, nsems
*sizeof(ushort
))) {
567 for (i
= 0; i
< nsems
; i
++) {
568 if (sem_io
[i
] > SEMVMX
) {
573 err
= sem_revalidate(semid
, sma
, nsems
, S_IWUGO
);
577 for (i
= 0; i
< nsems
; i
++)
578 sma
->sem_base
[i
].semval
= sem_io
[i
];
579 for (un
= sma
->undo
; un
; un
= un
->id_next
)
580 for (i
= 0; i
< nsems
; i
++)
582 sma
->sem_ctime
= CURRENT_TIME
;
583 /* maybe some queued-up processes were waiting for this */
590 struct semid64_ds tbuf
;
591 memset(&tbuf
,0,sizeof(tbuf
));
592 kernel_to_ipc64_perm(&sma
->sem_perm
, &tbuf
.sem_perm
);
593 tbuf
.sem_otime
= sma
->sem_otime
;
594 tbuf
.sem_ctime
= sma
->sem_ctime
;
595 tbuf
.sem_nsems
= sma
->sem_nsems
;
597 if (copy_semid_to_user (arg
.buf
, &tbuf
, version
))
601 /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
604 if(semnum
< 0 || semnum
>= nsems
)
607 curr
= &sma
->sem_base
[semnum
];
614 err
= curr
->sempid
& 0xffff;
617 err
= count_semncnt(sma
,semnum
);
620 err
= count_semzcnt(sma
,semnum
);
627 if (val
> SEMVMX
|| val
< 0)
630 for (un
= sma
->undo
; un
; un
= un
->id_next
)
631 un
->semadj
[semnum
] = 0;
633 sma
->sem_ctime
= CURRENT_TIME
;
634 /* maybe some queued-up processes were waiting for this */
643 if(sem_io
!= fast_sem_io
)
644 ipc_free(sem_io
, sizeof(ushort
)*nsems
);
654 static inline unsigned long copy_semid_from_user(struct sem_setbuf
*out
, void *buf
, int version
)
659 struct semid64_ds tbuf
;
661 if(copy_from_user(&tbuf
, buf
, sizeof(tbuf
)))
664 out
->uid
= tbuf
.sem_perm
.uid
;
665 out
->gid
= tbuf
.sem_perm
.gid
;
666 out
->mode
= tbuf
.sem_perm
.mode
;
672 struct semid_ds tbuf_old
;
674 if(copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
677 out
->uid
= tbuf_old
.sem_perm
.uid
;
678 out
->gid
= tbuf_old
.sem_perm
.gid
;
679 out
->mode
= tbuf_old
.sem_perm
.mode
;
688 int semctl_down(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
690 struct sem_array
*sma
;
692 struct sem_setbuf setbuf
;
693 struct kern_ipc_perm
*ipcp
;
696 if(copy_semid_from_user (&setbuf
, arg
.buf
, version
))
699 sma
= sem_lock(semid
);
703 if (sem_checkid(sma
,semid
)) {
707 ipcp
= &sma
->sem_perm
;
709 if (current
->euid
!= ipcp
->cuid
&&
710 current
->euid
!= ipcp
->uid
&& !capable(CAP_SYS_ADMIN
)) {
721 ipcp
->uid
= setbuf
.uid
;
722 ipcp
->gid
= setbuf
.gid
;
723 ipcp
->mode
= (ipcp
->mode
& ~S_IRWXUGO
)
724 | (setbuf
.mode
& S_IRWXUGO
);
725 sma
->sem_ctime
= CURRENT_TIME
;
741 asmlinkage
long sys_semctl (int semid
, int semnum
, int cmd
, union semun arg
)
749 version
= ipc_parse_version(&cmd
);
755 err
= semctl_nolock(semid
,semnum
,cmd
,version
,arg
);
765 err
= semctl_main(semid
,semnum
,cmd
,version
,arg
);
770 err
= semctl_down(semid
,semnum
,cmd
,version
,arg
);
778 static struct sem_undo
* freeundos(struct sem_array
*sma
, struct sem_undo
* un
)
781 struct sem_undo
** up
;
783 for(up
= ¤t
->semundo
;(u
=*up
);up
=&u
->proc_next
) {
791 printk ("freeundos undo list error id=%d\n", un
->semid
);
792 return un
->proc_next
;
795 /* returns without sem_lock on error! */
796 static int alloc_undo(struct sem_array
*sma
, struct sem_undo
** unp
, int semid
, int alter
)
798 int size
, nsems
, error
;
801 nsems
= sma
->sem_nsems
;
802 size
= sizeof(struct sem_undo
) + sizeof(short)*nsems
;
805 un
= (struct sem_undo
*) kmalloc(size
, GFP_KERNEL
);
810 error
= sem_revalidate(semid
, sma
, nsems
, alter
? S_IWUGO
: S_IRUGO
);
816 un
->semadj
= (short *) &un
[1];
818 un
->proc_next
= current
->semundo
;
819 current
->semundo
= un
;
820 un
->id_next
= sma
->undo
;
826 asmlinkage
long sys_semop (int semid
, struct sembuf
*tsops
, unsigned nsops
)
829 struct sem_array
*sma
;
830 struct sembuf fast_sops
[SEMOPM_FAST
];
831 struct sembuf
* sops
= fast_sops
, *sop
;
833 int undos
= 0, decrease
= 0, alter
= 0;
834 struct sem_queue queue
;
836 if (nsops
< 1 || semid
< 0)
838 if (nsops
> sc_semopm
)
840 if(nsops
> SEMOPM_FAST
) {
841 sops
= kmalloc(sizeof(*sops
)*nsops
,GFP_KERNEL
);
845 if (copy_from_user (sops
, tsops
, nsops
* sizeof(*tsops
))) {
849 sma
= sem_lock(semid
);
854 if (sem_checkid(sma
,semid
))
855 goto out_unlock_free
;
857 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
858 if (sop
->sem_num
>= sma
->sem_nsems
)
859 goto out_unlock_free
;
860 if (sop
->sem_flg
& SEM_UNDO
)
870 if (ipcperms(&sma
->sem_perm
, alter
? S_IWUGO
: S_IRUGO
))
871 goto out_unlock_free
;
873 /* Make sure we have an undo structure
874 * for this process and this semaphore set.
881 un
=freeundos(sma
,un
);
886 error
= alloc_undo(sma
,&un
,semid
,alter
);
893 error
= try_atomic_semop (sma
, sops
, nsops
, un
, current
->pid
, 0);
897 /* We need to sleep on this operation, so we put the current
898 * task into the pending queue and go to sleep.
905 queue
.pid
= current
->pid
;
906 queue
.alter
= decrease
;
909 append_to_queue(sma
,&queue
);
911 prepend_to_queue(sma
,&queue
);
912 current
->semsleeping
= &queue
;
915 struct sem_array
* tmp
;
916 queue
.status
= -EINTR
;
917 queue
.sleeper
= current
;
918 current
->state
= TASK_INTERRUPTIBLE
;
923 tmp
= sem_lock(semid
);
925 if(queue
.status
!= -EIDRM
)
927 current
->semsleeping
= NULL
;
932 * If queue.status == 1 we where woken up and
933 * have to retry else we simply return.
934 * If an interrupt occurred we have to clean up the
938 if (queue
.status
== 1)
940 error
= try_atomic_semop (sma
, sops
, nsops
, un
,
945 error
= queue
.status
;
946 if (queue
.prev
) /* got Interrupt */
948 /* Everything done by update_queue */
949 current
->semsleeping
= NULL
;
950 goto out_unlock_free
;
953 current
->semsleeping
= NULL
;
954 remove_from_queue(sma
,&queue
);
961 if(sops
!= fast_sops
)
967 * add semadj values to semaphores, free undo structures.
968 * undo structures are not freed when semaphore arrays are destroyed
969 * so some of them may be out of date.
970 * IMPLEMENTATION NOTE: There is some confusion over whether the
971 * set of adjustments that needs to be done should be done in an atomic
972 * manner or not. That is, if we are attempting to decrement the semval
973 * should we queue up and wait until we can do so legally?
974 * The original implementation attempted to do this (queue and wait).
975 * The current implementation does not do so. The POSIX standard
976 * and SVID should be consulted to determine what behavior is mandated.
981 struct sem_undo
*u
, *un
= NULL
, **up
, **unp
;
982 struct sem_array
*sma
;
985 /* If the current process was sleeping for a semaphore,
986 * remove it from the queue.
988 if ((q
= current
->semsleeping
)) {
990 sma
= sem_lock(semid
);
991 current
->semsleeping
= NULL
;
996 remove_from_queue(q
->sma
,q
);
1002 for (up
= ¤t
->semundo
; (u
= *up
); *up
= u
->proc_next
, kfree(u
)) {
1003 int semid
= u
->semid
;
1006 sma
= sem_lock(semid
);
1013 if (sem_checkid(sma
,u
->semid
))
1016 /* remove u from the sma->undo list */
1017 for (unp
= &sma
->undo
; (un
= *unp
); unp
= &un
->id_next
) {
1021 printk ("sem_exit undo list error id=%d\n", u
->semid
);
1025 /* perform adjustments registered in u */
1026 nsems
= sma
->sem_nsems
;
1027 for (i
= 0; i
< nsems
; i
++) {
1028 struct sem
* sem
= &sma
->sem_base
[i
];
1029 sem
->semval
+= u
->semadj
[i
];
1030 if (sem
->semval
< 0)
1031 sem
->semval
= 0; /* shouldn't happen */
1032 sem
->sempid
= current
->pid
;
1034 sma
->sem_otime
= CURRENT_TIME
;
1035 /* maybe some queued-up processes were waiting for this */
1040 current
->semundo
= NULL
;
1043 #ifdef CONFIG_PROC_FS
1044 static int sysvipc_sem_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
)
1050 len
+= sprintf(buffer
, " key semid perms nsems uid gid cuid cgid otime ctime\n");
1053 for(i
= 0; i
<= sem_ids
.max_id
; i
++) {
1054 struct sem_array
*sma
;
1057 len
+= sprintf(buffer
+ len
, "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
1059 sem_buildid(i
,sma
->sem_perm
.seq
),
1075 if(pos
> offset
+ length
)
1082 *start
= buffer
+ (offset
- begin
);
1083 len
-= (offset
- begin
);