3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
6 * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
7 * This code underwent a massive rewrite in order to solve some problems
8 * with the original code. In particular the original code failed to
9 * wake up processes that were waiting for semval to go to 0 if the
10 * value went to 0 and was then incremented rapidly enough. In solving
11 * this problem I have also modified the implementation so that it
12 * processes pending operations in a FIFO manner, thus give a guarantee
13 * that processes waiting for a lock on the semaphore won't starve
14 * unless another locking process fails to unlock.
15 * In addition the following two changes in behavior have been introduced:
16 * - The original implementation of semop returned the value
17 * last semaphore element examined on success. This does not
18 * match the manual page specifications, and effectively
19 * allows the user to read the semaphore even if they do not
20 * have read permissions. The implementation now returns 0
21 * on success as stated in the manual page.
22 * - There is some confusion over whether the set of undo adjustments
23 * to be performed at exit should be done in an atomic manner.
24 * That is, if we are attempting to decrement the semval should we queue
25 * up and wait until we can do so legally?
26 * The original implementation attempted to do this.
27 * The current implementation does not do so. This is because I don't
28 * think it is the right thing (TM) to do, and because I couldn't
29 * see a clean way to get the old behavior with the new design.
30 * The POSIX standard and SVID should be consulted to determine
31 * what behavior is mandated.
33 * Further notes on refinement (Christoph Rohland, December 1998):
34 * - The POSIX standard says, that the undo adjustments simply should
35 * redo. So the current implementation is o.K.
36 * - The previous code had two flaws:
37 * 1) It actively gave the semaphore to the next waiting process
38 * sleeping on the semaphore. Since this process did not have the
39 * cpu this led to many unnecessary context switches and bad
40 * performance. Now we only check which process should be able to
41 * get the semaphore and if this process wants to reduce some
42 * semaphore value we simply wake it up without doing the
43 * operation. So it has to try to get it later. Thus e.g. the
44 * running process may reacquire the semaphore during the current
45 * time slice. If it only waits for zero or increases the semaphore,
46 * we do the operation in advance and wake it up.
47 * 2) It did not wake up all zero waiting processes. We try to do
48 * better but only get the semops right which only wait for zero or
49 * increase. If there are decrement operations in the operations
50 * array we do the same as before.
52 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
54 * SMP-threaded, sysctl's added
55 * (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
56 * Enforced range limit on SEM_UNDO
57 * (c) 2001 Red Hat Inc <alan@redhat.com>
60 #include <linux/config.h>
61 #include <linux/slab.h>
62 #include <linux/spinlock.h>
63 #include <linux/init.h>
64 #include <linux/proc_fs.h>
65 #include <linux/smp_lock.h>
66 #include <linux/security.h>
67 #include <asm/uaccess.h>
71 #define sem_lock(id) ((struct sem_array*)ipc_lock(&sem_ids,id))
72 #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
73 #define sem_rmid(id) ((struct sem_array*)ipc_rmid(&sem_ids,id))
74 #define sem_checkid(sma, semid) \
75 ipc_checkid(&sem_ids,&sma->sem_perm,semid)
76 #define sem_buildid(id, seq) \
77 ipc_buildid(&sem_ids, id, seq)
78 static struct ipc_ids sem_ids
;
80 static int newary (key_t
, int, int);
81 static void freeary (int id
);
83 static int sysvipc_sem_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
);
86 #define SEMMSL_FAST 256 /* 512 bytes on stack */
87 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
90 * linked list protection:
92 * sem_array.sem_pending{,last},
93 * sem_array.sem_undo: sem_lock() for read/write
94 * sem_undo.proc_next: only "current" is allowed to read/write that field.
98 int sem_ctls
[4] = {SEMMSL
, SEMMNS
, SEMOPM
, SEMMNI
};
99 #define sc_semmsl (sem_ctls[0])
100 #define sc_semmns (sem_ctls[1])
101 #define sc_semopm (sem_ctls[2])
102 #define sc_semmni (sem_ctls[3])
104 static int used_sems
;
106 void __init
sem_init (void)
109 ipc_init_ids(&sem_ids
,sc_semmni
);
111 #ifdef CONFIG_PROC_FS
112 create_proc_read_entry("sysvipc/sem", 0, 0, sysvipc_sem_read_proc
, NULL
);
116 static int newary (key_t key
, int nsems
, int semflg
)
120 struct sem_array
*sma
;
125 if (used_sems
+ nsems
> sc_semmns
)
128 size
= sizeof (*sma
) + nsems
* sizeof (struct sem
);
129 sma
= ipc_rcu_alloc(size
);
133 memset (sma
, 0, size
);
135 sma
->sem_perm
.mode
= (semflg
& S_IRWXUGO
);
136 sma
->sem_perm
.key
= key
;
138 sma
->sem_perm
.security
= NULL
;
139 retval
= security_ops
->sem_alloc_security(sma
);
141 ipc_rcu_free(sma
, size
);
145 id
= ipc_addid(&sem_ids
, &sma
->sem_perm
, sc_semmni
);
147 security_ops
->sem_free_security(sma
);
148 ipc_rcu_free(sma
, size
);
153 sma
->sem_base
= (struct sem
*) &sma
[1];
154 /* sma->sem_pending = NULL; */
155 sma
->sem_pending_last
= &sma
->sem_pending
;
156 /* sma->undo = NULL; */
157 sma
->sem_nsems
= nsems
;
158 sma
->sem_ctime
= get_seconds();
161 return sem_buildid(id
, sma
->sem_perm
.seq
);
164 asmlinkage
long sys_semget (key_t key
, int nsems
, int semflg
)
166 int id
, err
= -EINVAL
;
167 struct sem_array
*sma
;
169 if (nsems
< 0 || nsems
> sc_semmsl
)
173 if (key
== IPC_PRIVATE
) {
174 err
= newary(key
, nsems
, semflg
);
175 } else if ((id
= ipc_findkey(&sem_ids
, key
)) == -1) { /* key not used */
176 if (!(semflg
& IPC_CREAT
))
179 err
= newary(key
, nsems
, semflg
);
180 } else if (semflg
& IPC_CREAT
&& semflg
& IPC_EXCL
) {
186 if (nsems
> sma
->sem_nsems
)
188 else if (ipcperms(&sma
->sem_perm
, semflg
))
191 err
= sem_buildid(id
, sma
->sem_perm
.seq
);
199 /* doesn't acquire the sem_lock on error! */
200 static int sem_revalidate(int semid
, struct sem_array
* sma
, int nsems
, short flg
)
202 struct sem_array
* smanew
;
204 smanew
= sem_lock(semid
);
207 if(smanew
!= sma
|| sem_checkid(sma
,semid
) || sma
->sem_nsems
!= nsems
) {
212 if (ipcperms(&sma
->sem_perm
, flg
)) {
218 /* Manage the doubly linked list sma->sem_pending as a FIFO:
219 * insert new queue elements at the tail sma->sem_pending_last.
221 static inline void append_to_queue (struct sem_array
* sma
,
222 struct sem_queue
* q
)
224 *(q
->prev
= sma
->sem_pending_last
) = q
;
225 *(sma
->sem_pending_last
= &q
->next
) = NULL
;
228 static inline void prepend_to_queue (struct sem_array
* sma
,
229 struct sem_queue
* q
)
231 q
->next
= sma
->sem_pending
;
232 *(q
->prev
= &sma
->sem_pending
) = q
;
234 q
->next
->prev
= &q
->next
;
235 else /* sma->sem_pending_last == &sma->sem_pending */
236 sma
->sem_pending_last
= &q
->next
;
239 static inline void remove_from_queue (struct sem_array
* sma
,
240 struct sem_queue
* q
)
242 *(q
->prev
) = q
->next
;
244 q
->next
->prev
= q
->prev
;
245 else /* sma->sem_pending_last == &q->next */
246 sma
->sem_pending_last
= q
->prev
;
247 q
->prev
= NULL
; /* mark as removed */
251 * Determine whether a sequence of semaphore operations would succeed
252 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
255 static int try_atomic_semop (struct sem_array
* sma
, struct sembuf
* sops
,
256 int nsops
, struct sem_undo
*un
, int pid
,
263 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
264 curr
= sma
->sem_base
+ sop
->sem_num
;
265 sem_op
= sop
->sem_op
;
267 if (!sem_op
&& curr
->semval
)
270 curr
->sempid
= (curr
->sempid
<< 16) | pid
;
271 curr
->semval
+= sem_op
;
272 if (sop
->sem_flg
& SEM_UNDO
)
274 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
276 * Exceeding the undo range is an error.
278 if (undo
< (-SEMAEM
- 1) || undo
> SEMAEM
)
280 /* Don't undo the undo */
281 sop
->sem_flg
&= ~SEM_UNDO
;
284 un
->semadj
[sop
->sem_num
] = undo
;
286 if (curr
->semval
< 0)
288 if (curr
->semval
> SEMVMX
)
299 sma
->sem_otime
= get_seconds();
307 if (sop
->sem_flg
& IPC_NOWAIT
)
313 while (sop
>= sops
) {
314 curr
= sma
->sem_base
+ sop
->sem_num
;
315 curr
->semval
-= sop
->sem_op
;
318 if (sop
->sem_flg
& SEM_UNDO
)
319 un
->semadj
[sop
->sem_num
] += sop
->sem_op
;
326 /* Go through the pending queue for the indicated semaphore
327 * looking for tasks that can be completed.
329 static void update_queue (struct sem_array
* sma
)
332 struct sem_queue
* q
;
334 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
337 continue; /* this one was woken up before */
339 error
= try_atomic_semop(sma
, q
->sops
, q
->nsops
,
340 q
->undo
, q
->pid
, q
->alter
);
342 /* Does q->sleeper still need to sleep? */
344 /* Found one, wake it up */
345 wake_up_process(q
->sleeper
);
346 if (error
== 0 && q
->alter
) {
347 /* if q-> alter let it self try */
352 remove_from_queue(sma
,q
);
357 /* The following counts are associated to each semaphore:
358 * semncnt number of tasks waiting on semval being nonzero
359 * semzcnt number of tasks waiting on semval being zero
360 * This model assumes that a task waits on exactly one semaphore.
361 * Since semaphore operations are to be performed atomically, tasks actually
362 * wait on a whole sequence of semaphores simultaneously.
363 * The counts we return here are a rough approximation, but still
364 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
366 static int count_semncnt (struct sem_array
* sma
, ushort semnum
)
369 struct sem_queue
* q
;
372 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
373 struct sembuf
* sops
= q
->sops
;
374 int nsops
= q
->nsops
;
376 for (i
= 0; i
< nsops
; i
++)
377 if (sops
[i
].sem_num
== semnum
378 && (sops
[i
].sem_op
< 0)
379 && !(sops
[i
].sem_flg
& IPC_NOWAIT
))
384 static int count_semzcnt (struct sem_array
* sma
, ushort semnum
)
387 struct sem_queue
* q
;
390 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
391 struct sembuf
* sops
= q
->sops
;
392 int nsops
= q
->nsops
;
394 for (i
= 0; i
< nsops
; i
++)
395 if (sops
[i
].sem_num
== semnum
396 && (sops
[i
].sem_op
== 0)
397 && !(sops
[i
].sem_flg
& IPC_NOWAIT
))
403 /* Free a semaphore set. */
404 static void freeary (int id
)
406 struct sem_array
*sma
;
413 /* Invalidate the existing undo structures for this semaphore set.
414 * (They will be freed without any further action in sem_exit()
415 * or during the next semop.)
417 for (un
= sma
->undo
; un
; un
= un
->id_next
)
420 /* Wake up all pending processes and let them fail with EIDRM. */
421 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
424 wake_up_process(q
->sleeper
); /* doesn't sleep */
428 used_sems
-= sma
->sem_nsems
;
429 size
= sizeof (*sma
) + sma
->sem_nsems
* sizeof (struct sem
);
430 security_ops
->sem_free_security(sma
);
431 ipc_rcu_free(sma
, size
);
434 static unsigned long copy_semid_to_user(void *buf
, struct semid64_ds
*in
, int version
)
438 return copy_to_user(buf
, in
, sizeof(*in
));
443 ipc64_perm_to_ipc_perm(&in
->sem_perm
, &out
.sem_perm
);
445 out
.sem_otime
= in
->sem_otime
;
446 out
.sem_ctime
= in
->sem_ctime
;
447 out
.sem_nsems
= in
->sem_nsems
;
449 return copy_to_user(buf
, &out
, sizeof(out
));
456 static int semctl_nolock(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
459 struct sem_array
*sma
;
465 struct seminfo seminfo
;
468 memset(&seminfo
,0,sizeof(seminfo
));
469 seminfo
.semmni
= sc_semmni
;
470 seminfo
.semmns
= sc_semmns
;
471 seminfo
.semmsl
= sc_semmsl
;
472 seminfo
.semopm
= sc_semopm
;
473 seminfo
.semvmx
= SEMVMX
;
474 seminfo
.semmnu
= SEMMNU
;
475 seminfo
.semmap
= SEMMAP
;
476 seminfo
.semume
= SEMUME
;
478 if (cmd
== SEM_INFO
) {
479 seminfo
.semusz
= sem_ids
.in_use
;
480 seminfo
.semaem
= used_sems
;
482 seminfo
.semusz
= SEMUSZ
;
483 seminfo
.semaem
= SEMAEM
;
485 max_id
= sem_ids
.max_id
;
487 if (copy_to_user (arg
.__buf
, &seminfo
, sizeof(struct seminfo
)))
489 return (max_id
< 0) ? 0: max_id
;
493 struct semid64_ds tbuf
;
496 if(semid
>= sem_ids
.size
)
499 memset(&tbuf
,0,sizeof(tbuf
));
501 sma
= sem_lock(semid
);
506 if (ipcperms (&sma
->sem_perm
, S_IRUGO
))
508 id
= sem_buildid(semid
, sma
->sem_perm
.seq
);
510 kernel_to_ipc64_perm(&sma
->sem_perm
, &tbuf
.sem_perm
);
511 tbuf
.sem_otime
= sma
->sem_otime
;
512 tbuf
.sem_ctime
= sma
->sem_ctime
;
513 tbuf
.sem_nsems
= sma
->sem_nsems
;
515 if (copy_semid_to_user (arg
.buf
, &tbuf
, version
))
528 static int semctl_main(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
530 struct sem_array
*sma
;
533 ushort fast_sem_io
[SEMMSL_FAST
];
534 ushort
* sem_io
= fast_sem_io
;
537 sma
= sem_lock(semid
);
541 nsems
= sma
->sem_nsems
;
544 if (sem_checkid(sma
,semid
))
548 if (ipcperms (&sma
->sem_perm
, (cmd
==SETVAL
||cmd
==SETALL
)?S_IWUGO
:S_IRUGO
))
554 ushort
*array
= arg
.array
;
557 if(nsems
> SEMMSL_FAST
) {
559 sem_io
= ipc_alloc(sizeof(ushort
)*nsems
);
562 err
= sem_revalidate(semid
, sma
, nsems
, S_IRUGO
);
567 for (i
= 0; i
< sma
->sem_nsems
; i
++)
568 sem_io
[i
] = sma
->sem_base
[i
].semval
;
571 if(copy_to_user(array
, sem_io
, nsems
*sizeof(ushort
)))
582 if(nsems
> SEMMSL_FAST
) {
583 sem_io
= ipc_alloc(sizeof(ushort
)*nsems
);
588 if (copy_from_user (sem_io
, arg
.array
, nsems
*sizeof(ushort
))) {
593 for (i
= 0; i
< nsems
; i
++) {
594 if (sem_io
[i
] > SEMVMX
) {
599 err
= sem_revalidate(semid
, sma
, nsems
, S_IWUGO
);
603 for (i
= 0; i
< nsems
; i
++)
604 sma
->sem_base
[i
].semval
= sem_io
[i
];
605 for (un
= sma
->undo
; un
; un
= un
->id_next
)
606 for (i
= 0; i
< nsems
; i
++)
608 sma
->sem_ctime
= get_seconds();
609 /* maybe some queued-up processes were waiting for this */
616 struct semid64_ds tbuf
;
617 memset(&tbuf
,0,sizeof(tbuf
));
618 kernel_to_ipc64_perm(&sma
->sem_perm
, &tbuf
.sem_perm
);
619 tbuf
.sem_otime
= sma
->sem_otime
;
620 tbuf
.sem_ctime
= sma
->sem_ctime
;
621 tbuf
.sem_nsems
= sma
->sem_nsems
;
623 if (copy_semid_to_user (arg
.buf
, &tbuf
, version
))
627 /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
630 if(semnum
< 0 || semnum
>= nsems
)
633 curr
= &sma
->sem_base
[semnum
];
640 err
= curr
->sempid
& 0xffff;
643 err
= count_semncnt(sma
,semnum
);
646 err
= count_semzcnt(sma
,semnum
);
653 if (val
> SEMVMX
|| val
< 0)
656 for (un
= sma
->undo
; un
; un
= un
->id_next
)
657 un
->semadj
[semnum
] = 0;
659 curr
->sempid
= current
->pid
;
660 sma
->sem_ctime
= get_seconds();
661 /* maybe some queued-up processes were waiting for this */
670 if(sem_io
!= fast_sem_io
)
671 ipc_free(sem_io
, sizeof(ushort
)*nsems
);
681 static inline unsigned long copy_semid_from_user(struct sem_setbuf
*out
, void *buf
, int version
)
686 struct semid64_ds tbuf
;
688 if(copy_from_user(&tbuf
, buf
, sizeof(tbuf
)))
691 out
->uid
= tbuf
.sem_perm
.uid
;
692 out
->gid
= tbuf
.sem_perm
.gid
;
693 out
->mode
= tbuf
.sem_perm
.mode
;
699 struct semid_ds tbuf_old
;
701 if(copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
704 out
->uid
= tbuf_old
.sem_perm
.uid
;
705 out
->gid
= tbuf_old
.sem_perm
.gid
;
706 out
->mode
= tbuf_old
.sem_perm
.mode
;
715 static int semctl_down(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
717 struct sem_array
*sma
;
719 struct sem_setbuf setbuf
;
720 struct kern_ipc_perm
*ipcp
;
723 if(copy_semid_from_user (&setbuf
, arg
.buf
, version
))
726 sma
= sem_lock(semid
);
730 if (sem_checkid(sma
,semid
)) {
734 ipcp
= &sma
->sem_perm
;
736 if (current
->euid
!= ipcp
->cuid
&&
737 current
->euid
!= ipcp
->uid
&& !capable(CAP_SYS_ADMIN
)) {
748 ipcp
->uid
= setbuf
.uid
;
749 ipcp
->gid
= setbuf
.gid
;
750 ipcp
->mode
= (ipcp
->mode
& ~S_IRWXUGO
)
751 | (setbuf
.mode
& S_IRWXUGO
);
752 sma
->sem_ctime
= get_seconds();
768 asmlinkage
long sys_semctl (int semid
, int semnum
, int cmd
, union semun arg
)
776 version
= ipc_parse_version(&cmd
);
782 err
= semctl_nolock(semid
,semnum
,cmd
,version
,arg
);
792 err
= semctl_main(semid
,semnum
,cmd
,version
,arg
);
797 err
= semctl_down(semid
,semnum
,cmd
,version
,arg
);
805 static inline void lock_semundo(void)
807 struct sem_undo_list
*undo_list
;
809 undo_list
= current
->sysvsem
.undo_list
;
810 if ((undo_list
!= NULL
) && (atomic_read(&undo_list
->refcnt
) != 1))
811 spin_lock(&undo_list
->lock
);
814 /* This code has an interaction with copy_semundo().
815 * Consider; two tasks are sharing the undo_list. task1
816 * acquires the undo_list lock in lock_semundo(). If task2 now
817 * exits before task1 releases the lock (by calling
818 * unlock_semundo()), then task1 will never call spin_unlock().
819 * This leave the sem_undo_list in a locked state. If task1 now creats task3
820 * and once again shares the sem_undo_list, the sem_undo_list will still be
821 * locked, and future SEM_UNDO operations will deadlock. This case is
822 * dealt with in copy_semundo() by having it reinitialize the spin lock when
823 * the refcnt goes from 1 to 2.
825 static inline void unlock_semundo(void)
827 struct sem_undo_list
*undo_list
;
829 undo_list
= current
->sysvsem
.undo_list
;
830 if ((undo_list
!= NULL
) && (atomic_read(&undo_list
->refcnt
) != 1))
831 spin_unlock(&undo_list
->lock
);
835 /* If the task doesn't already have a undo_list, then allocate one
836 * here. We guarantee there is only one thread using this undo list,
837 * and current is THE ONE
839 * If this allocation and assignment succeeds, but later
840 * portions of this code fail, there is no need to free the sem_undo_list.
841 * Just let it stay associated with the task, and it'll be freed later
844 * This can block, so callers must hold no locks.
846 static inline int get_undo_list(struct sem_undo_list
**undo_listp
)
848 struct sem_undo_list
*undo_list
;
851 undo_list
= current
->sysvsem
.undo_list
;
853 size
= sizeof(struct sem_undo_list
);
854 undo_list
= (struct sem_undo_list
*) kmalloc(size
, GFP_KERNEL
);
855 if (undo_list
== NULL
)
857 memset(undo_list
, 0, size
);
858 /* don't initialize unodhd->lock here. It's done
859 * in copy_semundo() instead.
861 atomic_set(&undo_list
->refcnt
, 1);
862 current
->sysvsem
.undo_list
= undo_list
;
864 *undo_listp
= undo_list
;
868 static struct sem_undo
* freeundos(struct sem_undo
* un
)
871 struct sem_undo
** up
;
873 for(up
= ¤t
->sysvsem
.undo_list
->proc_list
;(u
=*up
);up
=&u
->proc_next
) {
881 printk ("freeundos undo list error id=%d\n", un
->semid
);
882 return un
->proc_next
;
885 static inline struct sem_undo
*find_undo(int semid
)
890 if (current
->sysvsem
.undo_list
!= NULL
) {
891 un
= current
->sysvsem
.undo_list
->proc_list
;
904 /* returns without sem_lock and semundo list locks on error! */
905 static int alloc_undo(struct sem_array
*sma
, struct sem_undo
** unp
, int semid
, int alter
)
907 int size
, nsems
, error
;
908 struct sem_undo
*un
, *new_un
;
909 struct sem_undo_list
*undo_list
;
910 unsigned long saved_add_count
;
913 nsems
= sma
->sem_nsems
;
915 if (current
->sysvsem
.undo_list
!= NULL
)
916 saved_add_count
= current
->sysvsem
.undo_list
->add_count
;
920 error
= get_undo_list(&undo_list
);
924 size
= sizeof(struct sem_undo
) + sizeof(short)*nsems
;
925 un
= (struct sem_undo
*) kmalloc(size
, GFP_KERNEL
);
931 error
= sem_revalidate(semid
, sma
, nsems
, alter
? S_IWUGO
: S_IRUGO
);
939 /* alloc_undo has just
940 * released all locks and reacquired them.
941 * But, another thread may have
942 * added the semundo we were looking for
944 * So, we check for it again.
945 * only initialize and add the new one
946 * if we don't discover one.
949 if (current
->sysvsem
.undo_list
->add_count
!= saved_add_count
)
950 new_un
= find_undo(semid
);
952 if (new_un
!= NULL
) {
953 if (sma
->undo
!= new_un
)
958 current
->sysvsem
.undo_list
->add_count
++;
959 un
->semadj
= (short *) &un
[1];
961 un
->proc_next
= undo_list
->proc_list
;
962 undo_list
->proc_list
= un
;
963 un
->id_next
= sma
->undo
;
970 asmlinkage
long sys_semop (int semid
, struct sembuf
*tsops
, unsigned nsops
)
973 struct sem_array
*sma
;
974 struct sembuf fast_sops
[SEMOPM_FAST
];
975 struct sembuf
* sops
= fast_sops
, *sop
;
977 int undos
= 0, decrease
= 0, alter
= 0;
978 struct sem_queue queue
;
981 if (nsops
< 1 || semid
< 0)
983 if (nsops
> sc_semopm
)
985 if(nsops
> SEMOPM_FAST
) {
986 sops
= kmalloc(sizeof(*sops
)*nsops
,GFP_KERNEL
);
990 if (copy_from_user (sops
, tsops
, nsops
* sizeof(*tsops
))) {
995 sma
= sem_lock(semid
);
998 goto out_semundo_free
;
1000 if (sem_checkid(sma
,semid
))
1001 goto out_unlock_semundo_free
;
1003 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
1004 if (sop
->sem_num
>= sma
->sem_nsems
)
1005 goto out_unlock_semundo_free
;
1006 if (sop
->sem_flg
& SEM_UNDO
)
1008 if (sop
->sem_op
< 0)
1010 if (sop
->sem_op
> 0)
1016 if (ipcperms(&sma
->sem_perm
, alter
? S_IWUGO
: S_IRUGO
))
1017 goto out_unlock_semundo_free
;
1019 /* Make sure we have an undo structure
1020 * for this process and this semaphore set.
1023 un
= find_undo(semid
);
1025 error
= alloc_undo(sma
,&un
,semid
,alter
);
1033 error
= try_atomic_semop (sma
, sops
, nsops
, un
, current
->pid
, 0);
1037 /* We need to sleep on this operation, so we put the current
1038 * task into the pending queue and go to sleep.
1043 queue
.nsops
= nsops
;
1045 queue
.pid
= current
->pid
;
1046 queue
.alter
= decrease
;
1049 append_to_queue(sma
,&queue
);
1051 prepend_to_queue(sma
,&queue
);
1052 current
->sysvsem
.sleep_list
= &queue
;
1055 queue
.status
= -EINTR
;
1056 queue
.sleeper
= current
;
1057 current
->state
= TASK_INTERRUPTIBLE
;
1064 sma
= sem_lock(semid
);
1066 if(queue
.prev
!= NULL
)
1068 current
->sysvsem
.sleep_list
= NULL
;
1070 goto out_semundo_free
;
1073 * If queue.status == 1 we where woken up and
1074 * have to retry else we simply return.
1075 * If an interrupt occurred we have to clean up the
1079 if (queue
.status
== 1)
1081 error
= try_atomic_semop (sma
, sops
, nsops
, un
,
1086 error
= queue
.status
;
1087 if (queue
.prev
) /* got Interrupt */
1089 /* Everything done by update_queue */
1090 current
->sysvsem
.sleep_list
= NULL
;
1091 goto out_unlock_semundo_free
;
1094 current
->sysvsem
.sleep_list
= NULL
;
1095 remove_from_queue(sma
,&queue
);
1099 out_unlock_semundo_free
:
1104 if(sops
!= fast_sops
)
1109 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1110 * parent and child tasks.
1112 * See the notes above unlock_semundo() regarding the spin_lock_init()
1113 * in this code. Initialize the undo_list->lock here instead of get_undo_list()
1114 * because of the reasoning in the comment above unlock_semundo.
1117 int copy_semundo(unsigned long clone_flags
, struct task_struct
*tsk
)
1119 struct sem_undo_list
*undo_list
;
1122 if (clone_flags
& CLONE_SYSVSEM
) {
1123 error
= get_undo_list(&undo_list
);
1126 if (atomic_read(&undo_list
->refcnt
) == 1)
1127 spin_lock_init(&undo_list
->lock
);
1128 atomic_inc(&undo_list
->refcnt
);
1129 tsk
->sysvsem
.undo_list
= undo_list
;
1131 tsk
->sysvsem
.undo_list
= NULL
;
1136 static inline void __exit_semundo(struct task_struct
*tsk
)
1138 struct sem_undo_list
*undo_list
;
1140 undo_list
= tsk
->sysvsem
.undo_list
;
1141 if (!atomic_dec_and_test(&undo_list
->refcnt
))
1145 void exit_semundo(struct task_struct
*tsk
)
1147 if (tsk
->sysvsem
.undo_list
!= NULL
)
1148 __exit_semundo(tsk
);
1152 * add semadj values to semaphores, free undo structures.
1153 * undo structures are not freed when semaphore arrays are destroyed
1154 * so some of them may be out of date.
1155 * IMPLEMENTATION NOTE: There is some confusion over whether the
1156 * set of adjustments that needs to be done should be done in an atomic
1157 * manner or not. That is, if we are attempting to decrement the semval
1158 * should we queue up and wait until we can do so legally?
1159 * The original implementation attempted to do this (queue and wait).
1160 * The current implementation does not do so. The POSIX standard
1161 * and SVID should be consulted to determine what behavior is mandated.
1163 void sem_exit (void)
1165 struct sem_queue
*q
;
1166 struct sem_undo
*u
, *un
= NULL
, **up
, **unp
;
1167 struct sem_array
*sma
;
1168 struct sem_undo_list
*undo_list
;
1173 /* If the current process was sleeping for a semaphore,
1174 * remove it from the queue.
1176 if ((q
= current
->sysvsem
.sleep_list
)) {
1178 sma
= sem_lock(semid
);
1179 current
->sysvsem
.sleep_list
= NULL
;
1184 remove_from_queue(q
->sma
,q
);
1190 undo_list
= current
->sysvsem
.undo_list
;
1191 if ((undo_list
== NULL
) || (atomic_read(&undo_list
->refcnt
) != 1)) {
1196 /* There's no need to hold the semundo list lock, as current
1197 * is the last task exiting for this undo list.
1199 for (up
= &undo_list
->proc_list
; (u
= *up
); *up
= u
->proc_next
, kfree(u
)) {
1200 int semid
= u
->semid
;
1203 sma
= sem_lock(semid
);
1210 if (sem_checkid(sma
,u
->semid
))
1213 /* remove u from the sma->undo list */
1214 for (unp
= &sma
->undo
; (un
= *unp
); unp
= &un
->id_next
) {
1218 printk ("sem_exit undo list error id=%d\n", u
->semid
);
1222 /* perform adjustments registered in u */
1223 nsems
= sma
->sem_nsems
;
1224 for (i
= 0; i
< nsems
; i
++) {
1225 struct sem
* sem
= &sma
->sem_base
[i
];
1226 sem
->semval
+= u
->semadj
[i
];
1227 if (sem
->semval
< 0)
1228 sem
->semval
= 0; /* shouldn't happen */
1229 sem
->sempid
= current
->pid
;
1231 sma
->sem_otime
= get_seconds();
1232 /* maybe some queued-up processes were waiting for this */
1237 __exit_semundo(current
);
1242 #ifdef CONFIG_PROC_FS
1243 static int sysvipc_sem_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
)
1249 len
+= sprintf(buffer
, " key semid perms nsems uid gid cuid cgid otime ctime\n");
1252 for(i
= 0; i
<= sem_ids
.max_id
; i
++) {
1253 struct sem_array
*sma
;
1256 len
+= sprintf(buffer
+ len
, "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
1258 sem_buildid(i
,sma
->sem_perm
.seq
),
1274 if(pos
> offset
+ length
)
1281 *start
= buffer
+ (offset
- begin
);
1282 len
-= (offset
- begin
);