3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
6 * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
7 * This code underwent a massive rewrite in order to solve some problems
8 * with the original code. In particular the original code failed to
9 * wake up processes that were waiting for semval to go to 0 if the
10 * value went to 0 and was then incremented rapidly enough. In solving
11 * this problem I have also modified the implementation so that it
12 * processes pending operations in a FIFO manner, thus give a guarantee
13 * that processes waiting for a lock on the semaphore won't starve
14 * unless another locking process fails to unlock.
15 * In addition the following two changes in behavior have been introduced:
16 * - The original implementation of semop returned the value
17 * last semaphore element examined on success. This does not
18 * match the manual page specifications, and effectively
19 * allows the user to read the semaphore even if they do not
20 * have read permissions. The implementation now returns 0
21 * on success as stated in the manual page.
22 * - There is some confusion over whether the set of undo adjustments
23 * to be performed at exit should be done in an atomic manner.
24 * That is, if we are attempting to decrement the semval should we queue
25 * up and wait until we can do so legally?
26 * The original implementation attempted to do this.
27 * The current implementation does not do so. This is because I don't
28 * think it is the right thing (TM) to do, and because I couldn't
29 * see a clean way to get the old behavior with the new design.
30 * The POSIX standard and SVID should be consulted to determine
31 * what behavior is mandated.
33 * Further notes on refinement (Christoph Rohland, December 1998):
34 * - The POSIX standard says, that the undo adjustments simply should
35 * redo. So the current implementation is o.K.
36 * - The previous code had two flaws:
37 * 1) It actively gave the semaphore to the next waiting process
38 * sleeping on the semaphore. Since this process did not have the
39 * cpu this led to many unnecessary context switches and bad
40 * performance. Now we only check which process should be able to
41 * get the semaphore and if this process wants to reduce some
42 * semaphore value we simply wake it up without doing the
43 * operation. So it has to try to get it later. Thus e.g. the
44 * running process may reacquire the semaphore during the current
45 * time slice. If it only waits for zero or increases the semaphore,
46 * we do the operation in advance and wake it up.
47 * 2) It did not wake up all zero waiting processes. We try to do
48 * better but only get the semops right which only wait for zero or
49 * increase. If there are decrement operations in the operations
50 * array we do the same as before.
52 * With the incarnation of O(1) scheduler, it becomes unnecessary to perform
53 * check/retry algorithm for waking up blocked processes as the new scheduler
54 * is better at handling thread switch than the old one.
56 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
58 * SMP-threaded, sysctl's added
59 * (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
60 * Enforced range limit on SEM_UNDO
61 * (c) 2001 Red Hat Inc <alan@redhat.com>
64 #include <linux/config.h>
65 #include <linux/slab.h>
66 #include <linux/spinlock.h>
67 #include <linux/init.h>
68 #include <linux/proc_fs.h>
69 #include <linux/time.h>
70 #include <linux/smp_lock.h>
71 #include <linux/security.h>
72 #include <asm/uaccess.h>
76 #define sem_lock(id) ((struct sem_array*)ipc_lock(&sem_ids,id))
77 #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
78 #define sem_rmid(id) ((struct sem_array*)ipc_rmid(&sem_ids,id))
79 #define sem_checkid(sma, semid) \
80 ipc_checkid(&sem_ids,&sma->sem_perm,semid)
81 #define sem_buildid(id, seq) \
82 ipc_buildid(&sem_ids, id, seq)
83 static struct ipc_ids sem_ids
;
85 static int newary (key_t
, int, int);
86 static void freeary (struct sem_array
*sma
, int id
);
88 static int sysvipc_sem_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
);
91 #define SEMMSL_FAST 256 /* 512 bytes on stack */
92 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
95 * linked list protection:
97 * sem_array.sem_pending{,last},
98 * sem_array.sem_undo: sem_lock() for read/write
99 * sem_undo.proc_next: only "current" is allowed to read/write that field.
103 int sem_ctls
[4] = {SEMMSL
, SEMMNS
, SEMOPM
, SEMMNI
};
104 #define sc_semmsl (sem_ctls[0])
105 #define sc_semmns (sem_ctls[1])
106 #define sc_semopm (sem_ctls[2])
107 #define sc_semmni (sem_ctls[3])
109 static int used_sems
;
111 void __init
sem_init (void)
114 ipc_init_ids(&sem_ids
,sc_semmni
);
116 #ifdef CONFIG_PROC_FS
117 create_proc_read_entry("sysvipc/sem", 0, 0, sysvipc_sem_read_proc
, NULL
);
121 static int newary (key_t key
, int nsems
, int semflg
)
125 struct sem_array
*sma
;
130 if (used_sems
+ nsems
> sc_semmns
)
133 size
= sizeof (*sma
) + nsems
* sizeof (struct sem
);
134 sma
= ipc_rcu_alloc(size
);
138 memset (sma
, 0, size
);
140 sma
->sem_perm
.mode
= (semflg
& S_IRWXUGO
);
141 sma
->sem_perm
.key
= key
;
143 sma
->sem_perm
.security
= NULL
;
144 retval
= security_sem_alloc(sma
);
146 ipc_rcu_free(sma
, size
);
150 id
= ipc_addid(&sem_ids
, &sma
->sem_perm
, sc_semmni
);
152 security_sem_free(sma
);
153 ipc_rcu_free(sma
, size
);
158 sma
->sem_base
= (struct sem
*) &sma
[1];
159 /* sma->sem_pending = NULL; */
160 sma
->sem_pending_last
= &sma
->sem_pending
;
161 /* sma->undo = NULL; */
162 sma
->sem_nsems
= nsems
;
163 sma
->sem_ctime
= get_seconds();
166 return sem_buildid(id
, sma
->sem_perm
.seq
);
169 asmlinkage
long sys_semget (key_t key
, int nsems
, int semflg
)
171 int id
, err
= -EINVAL
;
172 struct sem_array
*sma
;
174 if (nsems
< 0 || nsems
> sc_semmsl
)
178 if (key
== IPC_PRIVATE
) {
179 err
= newary(key
, nsems
, semflg
);
180 } else if ((id
= ipc_findkey(&sem_ids
, key
)) == -1) { /* key not used */
181 if (!(semflg
& IPC_CREAT
))
184 err
= newary(key
, nsems
, semflg
);
185 } else if (semflg
& IPC_CREAT
&& semflg
& IPC_EXCL
) {
191 if (nsems
> sma
->sem_nsems
)
193 else if (ipcperms(&sma
->sem_perm
, semflg
))
196 int semid
= sem_buildid(id
, sma
->sem_perm
.seq
);
197 err
= security_sem_associate(sma
, semflg
);
208 /* doesn't acquire the sem_lock on error! */
209 static int sem_revalidate(int semid
, struct sem_array
* sma
, int nsems
, short flg
)
211 struct sem_array
* smanew
;
213 smanew
= sem_lock(semid
);
216 if(smanew
!= sma
|| sem_checkid(sma
,semid
) || sma
->sem_nsems
!= nsems
) {
221 if (flg
&& ipcperms(&sma
->sem_perm
, flg
)) {
227 /* Manage the doubly linked list sma->sem_pending as a FIFO:
228 * insert new queue elements at the tail sma->sem_pending_last.
230 static inline void append_to_queue (struct sem_array
* sma
,
231 struct sem_queue
* q
)
233 *(q
->prev
= sma
->sem_pending_last
) = q
;
234 *(sma
->sem_pending_last
= &q
->next
) = NULL
;
237 static inline void prepend_to_queue (struct sem_array
* sma
,
238 struct sem_queue
* q
)
240 q
->next
= sma
->sem_pending
;
241 *(q
->prev
= &sma
->sem_pending
) = q
;
243 q
->next
->prev
= &q
->next
;
244 else /* sma->sem_pending_last == &sma->sem_pending */
245 sma
->sem_pending_last
= &q
->next
;
248 static inline void remove_from_queue (struct sem_array
* sma
,
249 struct sem_queue
* q
)
251 *(q
->prev
) = q
->next
;
253 q
->next
->prev
= q
->prev
;
254 else /* sma->sem_pending_last == &q->next */
255 sma
->sem_pending_last
= q
->prev
;
256 q
->prev
= NULL
; /* mark as removed */
260 * Determine whether a sequence of semaphore operations would succeed
261 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
264 static int try_atomic_semop (struct sem_array
* sma
, struct sembuf
* sops
,
265 int nsops
, struct sem_undo
*un
, int pid
)
271 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
272 curr
= sma
->sem_base
+ sop
->sem_num
;
273 sem_op
= sop
->sem_op
;
274 result
= curr
->semval
;
276 if (!sem_op
&& result
)
284 if (sop
->sem_flg
& SEM_UNDO
) {
285 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
287 * Exceeding the undo range is an error.
289 if (undo
< (-SEMAEM
- 1) || undo
> SEMAEM
)
292 curr
->semval
= result
;
296 while (sop
>= sops
) {
297 sma
->sem_base
[sop
->sem_num
].sempid
= pid
;
298 if (sop
->sem_flg
& SEM_UNDO
)
299 un
->semadj
[sop
->sem_num
] -= sop
->sem_op
;
303 sma
->sem_otime
= get_seconds();
311 if (sop
->sem_flg
& IPC_NOWAIT
)
318 while (sop
>= sops
) {
319 sma
->sem_base
[sop
->sem_num
].semval
-= sop
->sem_op
;
326 /* Go through the pending queue for the indicated semaphore
327 * looking for tasks that can be completed.
329 static void update_queue (struct sem_array
* sma
)
332 struct sem_queue
* q
;
334 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
336 error
= try_atomic_semop(sma
, q
->sops
, q
->nsops
,
339 /* Does q->sleeper still need to sleep? */
342 remove_from_queue(sma
,q
);
343 wake_up_process(q
->sleeper
);
348 /* The following counts are associated to each semaphore:
349 * semncnt number of tasks waiting on semval being nonzero
350 * semzcnt number of tasks waiting on semval being zero
351 * This model assumes that a task waits on exactly one semaphore.
352 * Since semaphore operations are to be performed atomically, tasks actually
353 * wait on a whole sequence of semaphores simultaneously.
354 * The counts we return here are a rough approximation, but still
355 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
357 static int count_semncnt (struct sem_array
* sma
, ushort semnum
)
360 struct sem_queue
* q
;
363 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
364 struct sembuf
* sops
= q
->sops
;
365 int nsops
= q
->nsops
;
367 for (i
= 0; i
< nsops
; i
++)
368 if (sops
[i
].sem_num
== semnum
369 && (sops
[i
].sem_op
< 0)
370 && !(sops
[i
].sem_flg
& IPC_NOWAIT
))
375 static int count_semzcnt (struct sem_array
* sma
, ushort semnum
)
378 struct sem_queue
* q
;
381 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
382 struct sembuf
* sops
= q
->sops
;
383 int nsops
= q
->nsops
;
385 for (i
= 0; i
< nsops
; i
++)
386 if (sops
[i
].sem_num
== semnum
387 && (sops
[i
].sem_op
== 0)
388 && !(sops
[i
].sem_flg
& IPC_NOWAIT
))
394 /* Free a semaphore set. freeary() is called with sem_ids.sem down and
395 * the spinlock for this semaphore set hold. sem_ids.sem remains locked
398 static void freeary (struct sem_array
*sma
, int id
)
404 /* Invalidate the existing undo structures for this semaphore set.
405 * (They will be freed without any further action in exit_sem()
406 * or during the next semop.)
408 for (un
= sma
->undo
; un
; un
= un
->id_next
)
411 /* Wake up all pending processes and let them fail with EIDRM. */
412 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
415 wake_up_process(q
->sleeper
); /* doesn't sleep */
418 /* Remove the semaphore set from the ID array*/
422 used_sems
-= sma
->sem_nsems
;
423 size
= sizeof (*sma
) + sma
->sem_nsems
* sizeof (struct sem
);
424 security_sem_free(sma
);
425 ipc_rcu_free(sma
, size
);
428 static unsigned long copy_semid_to_user(void *buf
, struct semid64_ds
*in
, int version
)
432 return copy_to_user(buf
, in
, sizeof(*in
));
437 ipc64_perm_to_ipc_perm(&in
->sem_perm
, &out
.sem_perm
);
439 out
.sem_otime
= in
->sem_otime
;
440 out
.sem_ctime
= in
->sem_ctime
;
441 out
.sem_nsems
= in
->sem_nsems
;
443 return copy_to_user(buf
, &out
, sizeof(out
));
450 static int semctl_nolock(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
453 struct sem_array
*sma
;
459 struct seminfo seminfo
;
462 err
= security_sem_semctl(NULL
, cmd
);
466 memset(&seminfo
,0,sizeof(seminfo
));
467 seminfo
.semmni
= sc_semmni
;
468 seminfo
.semmns
= sc_semmns
;
469 seminfo
.semmsl
= sc_semmsl
;
470 seminfo
.semopm
= sc_semopm
;
471 seminfo
.semvmx
= SEMVMX
;
472 seminfo
.semmnu
= SEMMNU
;
473 seminfo
.semmap
= SEMMAP
;
474 seminfo
.semume
= SEMUME
;
476 if (cmd
== SEM_INFO
) {
477 seminfo
.semusz
= sem_ids
.in_use
;
478 seminfo
.semaem
= used_sems
;
480 seminfo
.semusz
= SEMUSZ
;
481 seminfo
.semaem
= SEMAEM
;
483 max_id
= sem_ids
.max_id
;
485 if (copy_to_user (arg
.__buf
, &seminfo
, sizeof(struct seminfo
)))
487 return (max_id
< 0) ? 0: max_id
;
491 struct semid64_ds tbuf
;
494 if(semid
>= sem_ids
.size
)
497 memset(&tbuf
,0,sizeof(tbuf
));
499 sma
= sem_lock(semid
);
504 if (ipcperms (&sma
->sem_perm
, S_IRUGO
))
507 err
= security_sem_semctl(sma
, cmd
);
511 id
= sem_buildid(semid
, sma
->sem_perm
.seq
);
513 kernel_to_ipc64_perm(&sma
->sem_perm
, &tbuf
.sem_perm
);
514 tbuf
.sem_otime
= sma
->sem_otime
;
515 tbuf
.sem_ctime
= sma
->sem_ctime
;
516 tbuf
.sem_nsems
= sma
->sem_nsems
;
518 if (copy_semid_to_user (arg
.buf
, &tbuf
, version
))
531 static int semctl_main(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
533 struct sem_array
*sma
;
536 ushort fast_sem_io
[SEMMSL_FAST
];
537 ushort
* sem_io
= fast_sem_io
;
540 sma
= sem_lock(semid
);
544 nsems
= sma
->sem_nsems
;
547 if (sem_checkid(sma
,semid
))
551 if (ipcperms (&sma
->sem_perm
, (cmd
==SETVAL
||cmd
==SETALL
)?S_IWUGO
:S_IRUGO
))
554 err
= security_sem_semctl(sma
, cmd
);
562 ushort
*array
= arg
.array
;
565 if(nsems
> SEMMSL_FAST
) {
567 sem_io
= ipc_alloc(sizeof(ushort
)*nsems
);
570 err
= sem_revalidate(semid
, sma
, nsems
, S_IRUGO
);
575 for (i
= 0; i
< sma
->sem_nsems
; i
++)
576 sem_io
[i
] = sma
->sem_base
[i
].semval
;
579 if(copy_to_user(array
, sem_io
, nsems
*sizeof(ushort
)))
590 if(nsems
> SEMMSL_FAST
) {
591 sem_io
= ipc_alloc(sizeof(ushort
)*nsems
);
596 if (copy_from_user (sem_io
, arg
.array
, nsems
*sizeof(ushort
))) {
601 for (i
= 0; i
< nsems
; i
++) {
602 if (sem_io
[i
] > SEMVMX
) {
607 err
= sem_revalidate(semid
, sma
, nsems
, S_IWUGO
);
611 for (i
= 0; i
< nsems
; i
++)
612 sma
->sem_base
[i
].semval
= sem_io
[i
];
613 for (un
= sma
->undo
; un
; un
= un
->id_next
)
614 for (i
= 0; i
< nsems
; i
++)
616 sma
->sem_ctime
= get_seconds();
617 /* maybe some queued-up processes were waiting for this */
624 struct semid64_ds tbuf
;
625 memset(&tbuf
,0,sizeof(tbuf
));
626 kernel_to_ipc64_perm(&sma
->sem_perm
, &tbuf
.sem_perm
);
627 tbuf
.sem_otime
= sma
->sem_otime
;
628 tbuf
.sem_ctime
= sma
->sem_ctime
;
629 tbuf
.sem_nsems
= sma
->sem_nsems
;
631 if (copy_semid_to_user (arg
.buf
, &tbuf
, version
))
635 /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
638 if(semnum
< 0 || semnum
>= nsems
)
641 curr
= &sma
->sem_base
[semnum
];
651 err
= count_semncnt(sma
,semnum
);
654 err
= count_semzcnt(sma
,semnum
);
661 if (val
> SEMVMX
|| val
< 0)
664 for (un
= sma
->undo
; un
; un
= un
->id_next
)
665 un
->semadj
[semnum
] = 0;
667 curr
->sempid
= current
->pid
;
668 sma
->sem_ctime
= get_seconds();
669 /* maybe some queued-up processes were waiting for this */
678 if(sem_io
!= fast_sem_io
)
679 ipc_free(sem_io
, sizeof(ushort
)*nsems
);
689 static inline unsigned long copy_semid_from_user(struct sem_setbuf
*out
, void *buf
, int version
)
694 struct semid64_ds tbuf
;
696 if(copy_from_user(&tbuf
, buf
, sizeof(tbuf
)))
699 out
->uid
= tbuf
.sem_perm
.uid
;
700 out
->gid
= tbuf
.sem_perm
.gid
;
701 out
->mode
= tbuf
.sem_perm
.mode
;
707 struct semid_ds tbuf_old
;
709 if(copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
712 out
->uid
= tbuf_old
.sem_perm
.uid
;
713 out
->gid
= tbuf_old
.sem_perm
.gid
;
714 out
->mode
= tbuf_old
.sem_perm
.mode
;
723 static int semctl_down(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
725 struct sem_array
*sma
;
727 struct sem_setbuf setbuf
;
728 struct kern_ipc_perm
*ipcp
;
731 if(copy_semid_from_user (&setbuf
, arg
.buf
, version
))
734 sma
= sem_lock(semid
);
738 if (sem_checkid(sma
,semid
)) {
742 ipcp
= &sma
->sem_perm
;
744 if (current
->euid
!= ipcp
->cuid
&&
745 current
->euid
!= ipcp
->uid
&& !capable(CAP_SYS_ADMIN
)) {
750 err
= security_sem_semctl(sma
, cmd
);
760 ipcp
->uid
= setbuf
.uid
;
761 ipcp
->gid
= setbuf
.gid
;
762 ipcp
->mode
= (ipcp
->mode
& ~S_IRWXUGO
)
763 | (setbuf
.mode
& S_IRWXUGO
);
764 sma
->sem_ctime
= get_seconds();
780 asmlinkage
long sys_semctl (int semid
, int semnum
, int cmd
, union semun arg
)
788 version
= ipc_parse_version(&cmd
);
794 err
= semctl_nolock(semid
,semnum
,cmd
,version
,arg
);
804 err
= semctl_main(semid
,semnum
,cmd
,version
,arg
);
809 err
= semctl_down(semid
,semnum
,cmd
,version
,arg
);
817 static inline void lock_semundo(void)
819 struct sem_undo_list
*undo_list
;
821 undo_list
= current
->sysvsem
.undo_list
;
822 if ((undo_list
!= NULL
) && (atomic_read(&undo_list
->refcnt
) != 1))
823 spin_lock(&undo_list
->lock
);
826 /* This code has an interaction with copy_semundo().
827 * Consider; two tasks are sharing the undo_list. task1
828 * acquires the undo_list lock in lock_semundo(). If task2 now
829 * exits before task1 releases the lock (by calling
830 * unlock_semundo()), then task1 will never call spin_unlock().
831 * This leave the sem_undo_list in a locked state. If task1 now creats task3
832 * and once again shares the sem_undo_list, the sem_undo_list will still be
833 * locked, and future SEM_UNDO operations will deadlock. This case is
834 * dealt with in copy_semundo() by having it reinitialize the spin lock when
835 * the refcnt goes from 1 to 2.
837 static inline void unlock_semundo(void)
839 struct sem_undo_list
*undo_list
;
841 undo_list
= current
->sysvsem
.undo_list
;
842 if ((undo_list
!= NULL
) && (atomic_read(&undo_list
->refcnt
) != 1))
843 spin_unlock(&undo_list
->lock
);
847 /* If the task doesn't already have a undo_list, then allocate one
848 * here. We guarantee there is only one thread using this undo list,
849 * and current is THE ONE
851 * If this allocation and assignment succeeds, but later
852 * portions of this code fail, there is no need to free the sem_undo_list.
853 * Just let it stay associated with the task, and it'll be freed later
856 * This can block, so callers must hold no locks.
858 static inline int get_undo_list(struct sem_undo_list
**undo_listp
)
860 struct sem_undo_list
*undo_list
;
863 undo_list
= current
->sysvsem
.undo_list
;
865 size
= sizeof(struct sem_undo_list
);
866 undo_list
= (struct sem_undo_list
*) kmalloc(size
, GFP_KERNEL
);
867 if (undo_list
== NULL
)
869 memset(undo_list
, 0, size
);
870 /* don't initialize unodhd->lock here. It's done
871 * in copy_semundo() instead.
873 atomic_set(&undo_list
->refcnt
, 1);
874 current
->sysvsem
.undo_list
= undo_list
;
876 *undo_listp
= undo_list
;
880 static struct sem_undo
*lookup_undo(struct sem_undo_list
*ulp
, int semid
)
882 struct sem_undo
**last
, *un
;
884 last
= &ulp
->proc_list
;
900 static struct sem_undo
*find_undo(int semid
)
902 struct sem_array
*sma
;
903 struct sem_undo_list
*ulp
;
904 struct sem_undo
*un
, *new;
908 error
= get_undo_list(&ulp
);
910 return ERR_PTR(error
);
913 un
= lookup_undo(ulp
, semid
);
915 if (likely(un
!=NULL
))
918 /* no undo structure around - allocate one. */
919 sma
= sem_lock(semid
);
920 un
= ERR_PTR(-EINVAL
);
923 un
= ERR_PTR(-EIDRM
);
924 if (sem_checkid(sma
,semid
))
926 nsems
= sma
->sem_nsems
;
929 new = (struct sem_undo
*) kmalloc(sizeof(struct sem_undo
) + sizeof(short)*nsems
, GFP_KERNEL
);
931 return ERR_PTR(-ENOMEM
);
932 memset(new, 0, sizeof(struct sem_undo
) + sizeof(short)*nsems
);
933 new->semadj
= (short *) &new[1];
937 un
= lookup_undo(ulp
, semid
);
943 error
= sem_revalidate(semid
, sma
, nsems
, 0);
951 new->proc_next
= ulp
->proc_list
;
952 ulp
->proc_list
= new;
953 new->id_next
= sma
->undo
;
963 asmlinkage
long sys_semop (int semid
, struct sembuf
*tsops
, unsigned nsops
)
965 return sys_semtimedop(semid
, tsops
, nsops
, NULL
);
968 asmlinkage
long sys_semtimedop(int semid
, struct sembuf
*tsops
,
969 unsigned nsops
, const struct timespec
*timeout
)
972 struct sem_array
*sma
;
973 struct sembuf fast_sops
[SEMOPM_FAST
];
974 struct sembuf
* sops
= fast_sops
, *sop
;
976 int undos
= 0, decrease
= 0, alter
= 0, max
;
977 struct sem_queue queue
;
978 unsigned long jiffies_left
= 0;
980 if (nsops
< 1 || semid
< 0)
982 if (nsops
> sc_semopm
)
984 if(nsops
> SEMOPM_FAST
) {
985 sops
= kmalloc(sizeof(*sops
)*nsops
,GFP_KERNEL
);
989 if (copy_from_user (sops
, tsops
, nsops
* sizeof(*tsops
))) {
994 struct timespec _timeout
;
995 if (copy_from_user(&_timeout
, timeout
, sizeof(*timeout
))) {
999 if (_timeout
.tv_sec
< 0 || _timeout
.tv_nsec
< 0 ||
1000 _timeout
.tv_nsec
>= 1000000000L) {
1004 jiffies_left
= timespec_to_jiffies(&_timeout
);
1007 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
1008 if (sop
->sem_num
>= max
)
1010 if (sop
->sem_flg
& SEM_UNDO
)
1012 if (sop
->sem_op
< 0)
1014 if (sop
->sem_op
> 0)
1021 un
= find_undo(semid
);
1023 error
= PTR_ERR(un
);
1029 sma
= sem_lock(semid
);
1034 if (sem_checkid(sma
,semid
))
1035 goto out_unlock_free
;
1037 * semid identifies are not unique - find_undo may have
1038 * allocated an undo structure, it was invalidated by an RMID
1039 * and now a new array with received the same id. Check and retry.
1041 if (un
&& un
->semid
== -1)
1044 if (max
>= sma
->sem_nsems
)
1045 goto out_unlock_free
;
1048 if (ipcperms(&sma
->sem_perm
, alter
? S_IWUGO
: S_IRUGO
))
1049 goto out_unlock_free
;
1051 error
= security_sem_semop(sma
, sops
, nsops
, alter
);
1053 goto out_unlock_free
;
1055 error
= try_atomic_semop (sma
, sops
, nsops
, un
, current
->pid
);
1059 /* We need to sleep on this operation, so we put the current
1060 * task into the pending queue and go to sleep.
1065 queue
.nsops
= nsops
;
1067 queue
.pid
= current
->pid
;
1070 append_to_queue(sma
,&queue
);
1072 prepend_to_queue(sma
,&queue
);
1074 queue
.status
= -EINTR
;
1075 queue
.sleeper
= current
;
1076 current
->state
= TASK_INTERRUPTIBLE
;
1080 jiffies_left
= schedule_timeout(jiffies_left
);
1084 sma
= sem_lock(semid
);
1086 if(queue
.prev
!= NULL
)
1093 * If queue.status != -EINTR we are woken up by another process
1095 error
= queue
.status
;
1096 if (queue
.status
!= -EINTR
) {
1097 goto out_unlock_free
;
1101 * If an interrupt occurred we have to clean up the queue
1103 if (timeout
&& jiffies_left
== 0)
1105 remove_from_queue(sma
,&queue
);
1106 goto out_unlock_free
;
1114 if(sops
!= fast_sops
)
1119 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1120 * parent and child tasks.
1122 * See the notes above unlock_semundo() regarding the spin_lock_init()
1123 * in this code. Initialize the undo_list->lock here instead of get_undo_list()
1124 * because of the reasoning in the comment above unlock_semundo.
1127 int copy_semundo(unsigned long clone_flags
, struct task_struct
*tsk
)
1129 struct sem_undo_list
*undo_list
;
1132 if (clone_flags
& CLONE_SYSVSEM
) {
1133 error
= get_undo_list(&undo_list
);
1136 if (atomic_read(&undo_list
->refcnt
) == 1)
1137 spin_lock_init(&undo_list
->lock
);
1138 atomic_inc(&undo_list
->refcnt
);
1139 tsk
->sysvsem
.undo_list
= undo_list
;
1141 tsk
->sysvsem
.undo_list
= NULL
;
1147 * add semadj values to semaphores, free undo structures.
1148 * undo structures are not freed when semaphore arrays are destroyed
1149 * so some of them may be out of date.
1150 * IMPLEMENTATION NOTE: There is some confusion over whether the
1151 * set of adjustments that needs to be done should be done in an atomic
1152 * manner or not. That is, if we are attempting to decrement the semval
1153 * should we queue up and wait until we can do so legally?
1154 * The original implementation attempted to do this (queue and wait).
1155 * The current implementation does not do so. The POSIX standard
1156 * and SVID should be consulted to determine what behavior is mandated.
1158 void exit_sem(struct task_struct
*tsk
)
1160 struct sem_undo_list
*undo_list
;
1161 struct sem_undo
*u
, **up
;
1163 undo_list
= tsk
->sysvsem
.undo_list
;
1167 if (!atomic_dec_and_test(&undo_list
->refcnt
))
1170 /* There's no need to hold the semundo list lock, as current
1171 * is the last task exiting for this undo list.
1173 for (up
= &undo_list
->proc_list
; (u
= *up
); *up
= u
->proc_next
, kfree(u
)) {
1174 struct sem_array
*sma
;
1176 struct sem_undo
*un
, **unp
;
1183 sma
= sem_lock(semid
);
1190 BUG_ON(sem_checkid(sma
,u
->semid
));
1192 /* remove u from the sma->undo list */
1193 for (unp
= &sma
->undo
; (un
= *unp
); unp
= &un
->id_next
) {
1197 printk ("exit_sem undo list error id=%d\n", u
->semid
);
1201 /* perform adjustments registered in u */
1202 nsems
= sma
->sem_nsems
;
1203 for (i
= 0; i
< nsems
; i
++) {
1204 struct sem
* sem
= &sma
->sem_base
[i
];
1206 sem
->semval
+= u
->semadj
[i
];
1207 if (sem
->semval
< 0)
1208 sem
->semval
= 0; /* shouldn't happen */
1209 sem
->sempid
= current
->pid
;
1212 sma
->sem_otime
= get_seconds();
1213 /* maybe some queued-up processes were waiting for this */
1221 #ifdef CONFIG_PROC_FS
1222 static int sysvipc_sem_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
)
1228 len
+= sprintf(buffer
, " key semid perms nsems uid gid cuid cgid otime ctime\n");
1231 for(i
= 0; i
<= sem_ids
.max_id
; i
++) {
1232 struct sem_array
*sma
;
1235 len
+= sprintf(buffer
+ len
, "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
1237 sem_buildid(i
,sma
->sem_perm
.seq
),
1253 if(pos
> offset
+ length
)
1260 *start
= buffer
+ (offset
- begin
);
1261 len
-= (offset
- begin
);