3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
6 * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
7 * This code underwent a massive rewrite in order to solve some problems
8 * with the original code. In particular the original code failed to
9 * wake up processes that were waiting for semval to go to 0 if the
10 * value went to 0 and was then incremented rapidly enough. In solving
11 * this problem I have also modified the implementation so that it
12 * processes pending operations in a FIFO manner, thus give a guarantee
13 * that processes waiting for a lock on the semaphore won't starve
14 * unless another locking process fails to unlock.
15 * In addition the following two changes in behavior have been introduced:
16 * - The original implementation of semop returned the value
17 * last semaphore element examined on success. This does not
18 * match the manual page specifications, and effectively
19 * allows the user to read the semaphore even if they do not
20 * have read permissions. The implementation now returns 0
21 * on success as stated in the manual page.
22 * - There is some confusion over whether the set of undo adjustments
23 * to be performed at exit should be done in an atomic manner.
24 * That is, if we are attempting to decrement the semval should we queue
25 * up and wait until we can do so legally?
26 * The original implementation attempted to do this.
27 * The current implementation does not do so. This is because I don't
28 * think it is the right thing (TM) to do, and because I couldn't
29 * see a clean way to get the old behavior with the new design.
30 * The POSIX standard and SVID should be consulted to determine
31 * what behavior is mandated.
33 * Further notes on refinement (Christoph Rohland, December 1998):
34 * - The POSIX standard says, that the undo adjustments simply should
35 * redo. So the current implementation is o.K.
36 * - The previous code had two flaws:
37 * 1) It actively gave the semaphore to the next waiting process
38 * sleeping on the semaphore. Since this process did not have the
39 * cpu this led to many unnecessary context switches and bad
40 * performance. Now we only check which process should be able to
41 * get the semaphore and if this process wants to reduce some
42 * semaphore value we simply wake it up without doing the
43 * operation. So it has to try to get it later. Thus e.g. the
44 * running process may reacquire the semaphore during the current
45 * time slice. If it only waits for zero or increases the semaphore,
46 * we do the operation in advance and wake it up.
47 * 2) It did not wake up all zero waiting processes. We try to do
48 * better but only get the semops right which only wait for zero or
49 * increase. If there are decrement operations in the operations
50 * array we do the same as before.
52 * With the incarnation of O(1) scheduler, it becomes unnecessary to perform
53 * check/retry algorithm for waking up blocked processes as the new scheduler
54 * is better at handling thread switch than the old one.
56 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
58 * SMP-threaded, sysctl's added
59 * (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
60 * Enforced range limit on SEM_UNDO
61 * (c) 2001 Red Hat Inc <alan@redhat.com>
63 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
66 #include <linux/config.h>
67 #include <linux/slab.h>
68 #include <linux/spinlock.h>
69 #include <linux/init.h>
70 #include <linux/proc_fs.h>
71 #include <linux/time.h>
72 #include <linux/smp_lock.h>
73 #include <linux/security.h>
74 #include <linux/syscalls.h>
75 #include <linux/audit.h>
76 #include <asm/uaccess.h>
80 #define sem_lock(id) ((struct sem_array*)ipc_lock(&sem_ids,id))
81 #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
82 #define sem_rmid(id) ((struct sem_array*)ipc_rmid(&sem_ids,id))
83 #define sem_checkid(sma, semid) \
84 ipc_checkid(&sem_ids,&sma->sem_perm,semid)
85 #define sem_buildid(id, seq) \
86 ipc_buildid(&sem_ids, id, seq)
87 static struct ipc_ids sem_ids
;
89 static int newary (key_t
, int, int);
90 static void freeary (struct sem_array
*sma
, int id
);
92 static int sysvipc_sem_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
);
95 #define SEMMSL_FAST 256 /* 512 bytes on stack */
96 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
99 * linked list protection:
101 * sem_array.sem_pending{,last},
102 * sem_array.sem_undo: sem_lock() for read/write
103 * sem_undo.proc_next: only "current" is allowed to read/write that field.
107 int sem_ctls
[4] = {SEMMSL
, SEMMNS
, SEMOPM
, SEMMNI
};
108 #define sc_semmsl (sem_ctls[0])
109 #define sc_semmns (sem_ctls[1])
110 #define sc_semopm (sem_ctls[2])
111 #define sc_semmni (sem_ctls[3])
113 static int used_sems
;
115 void __init
sem_init (void)
118 ipc_init_ids(&sem_ids
,sc_semmni
);
120 #ifdef CONFIG_PROC_FS
121 create_proc_read_entry("sysvipc/sem", 0, NULL
, sysvipc_sem_read_proc
, NULL
);
126 * Lockless wakeup algorithm:
127 * Without the check/retry algorithm a lockless wakeup is possible:
128 * - queue.status is initialized to -EINTR before blocking.
129 * - wakeup is performed by
130 * * unlinking the queue entry from sma->sem_pending
131 * * setting queue.status to IN_WAKEUP
132 * This is the notification for the blocked thread that a
133 * result value is imminent.
134 * * call wake_up_process
135 * * set queue.status to the final value.
136 * - the previously blocked thread checks queue.status:
137 * * if it's IN_WAKEUP, then it must wait until the value changes
138 * * if it's not -EINTR, then the operation was completed by
139 * update_queue. semtimedop can return queue.status without
140 * performing any operation on the semaphore array.
141 * * otherwise it must acquire the spinlock and check what's up.
143 * The two-stage algorithm is necessary to protect against the following
145 * - if queue.status is set after wake_up_process, then the woken up idle
146 * thread could race forward and try (and fail) to acquire sma->lock
147 * before update_queue had a chance to set queue.status
148 * - if queue.status is written before wake_up_process and if the
149 * blocked process is woken up by a signal between writing
150 * queue.status and the wake_up_process, then the woken up
151 * process could return from semtimedop and die by calling
152 * sys_exit before wake_up_process is called. Then wake_up_process
153 * will oops, because the task structure is already invalid.
154 * (yes, this happened on s390 with sysv msg).
159 static int newary (key_t key
, int nsems
, int semflg
)
163 struct sem_array
*sma
;
168 if (used_sems
+ nsems
> sc_semmns
)
171 size
= sizeof (*sma
) + nsems
* sizeof (struct sem
);
172 sma
= ipc_rcu_alloc(size
);
176 memset (sma
, 0, size
);
178 sma
->sem_perm
.mode
= (semflg
& S_IRWXUGO
);
179 sma
->sem_perm
.key
= key
;
181 sma
->sem_perm
.security
= NULL
;
182 retval
= security_sem_alloc(sma
);
188 id
= ipc_addid(&sem_ids
, &sma
->sem_perm
, sc_semmni
);
190 security_sem_free(sma
);
196 sma
->sem_base
= (struct sem
*) &sma
[1];
197 /* sma->sem_pending = NULL; */
198 sma
->sem_pending_last
= &sma
->sem_pending
;
199 /* sma->undo = NULL; */
200 sma
->sem_nsems
= nsems
;
201 sma
->sem_ctime
= get_seconds();
204 return sem_buildid(id
, sma
->sem_perm
.seq
);
207 asmlinkage
long sys_semget (key_t key
, int nsems
, int semflg
)
209 int id
, err
= -EINVAL
;
210 struct sem_array
*sma
;
212 if (nsems
< 0 || nsems
> sc_semmsl
)
216 if (key
== IPC_PRIVATE
) {
217 err
= newary(key
, nsems
, semflg
);
218 } else if ((id
= ipc_findkey(&sem_ids
, key
)) == -1) { /* key not used */
219 if (!(semflg
& IPC_CREAT
))
222 err
= newary(key
, nsems
, semflg
);
223 } else if (semflg
& IPC_CREAT
&& semflg
& IPC_EXCL
) {
229 if (nsems
> sma
->sem_nsems
)
231 else if (ipcperms(&sma
->sem_perm
, semflg
))
234 int semid
= sem_buildid(id
, sma
->sem_perm
.seq
);
235 err
= security_sem_associate(sma
, semflg
);
246 /* Manage the doubly linked list sma->sem_pending as a FIFO:
247 * insert new queue elements at the tail sma->sem_pending_last.
249 static inline void append_to_queue (struct sem_array
* sma
,
250 struct sem_queue
* q
)
252 *(q
->prev
= sma
->sem_pending_last
) = q
;
253 *(sma
->sem_pending_last
= &q
->next
) = NULL
;
256 static inline void prepend_to_queue (struct sem_array
* sma
,
257 struct sem_queue
* q
)
259 q
->next
= sma
->sem_pending
;
260 *(q
->prev
= &sma
->sem_pending
) = q
;
262 q
->next
->prev
= &q
->next
;
263 else /* sma->sem_pending_last == &sma->sem_pending */
264 sma
->sem_pending_last
= &q
->next
;
267 static inline void remove_from_queue (struct sem_array
* sma
,
268 struct sem_queue
* q
)
270 *(q
->prev
) = q
->next
;
272 q
->next
->prev
= q
->prev
;
273 else /* sma->sem_pending_last == &q->next */
274 sma
->sem_pending_last
= q
->prev
;
275 q
->prev
= NULL
; /* mark as removed */
279 * Determine whether a sequence of semaphore operations would succeed
280 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
283 static int try_atomic_semop (struct sem_array
* sma
, struct sembuf
* sops
,
284 int nsops
, struct sem_undo
*un
, int pid
)
290 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
291 curr
= sma
->sem_base
+ sop
->sem_num
;
292 sem_op
= sop
->sem_op
;
293 result
= curr
->semval
;
295 if (!sem_op
&& result
)
303 if (sop
->sem_flg
& SEM_UNDO
) {
304 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
306 * Exceeding the undo range is an error.
308 if (undo
< (-SEMAEM
- 1) || undo
> SEMAEM
)
311 curr
->semval
= result
;
315 while (sop
>= sops
) {
316 sma
->sem_base
[sop
->sem_num
].sempid
= pid
;
317 if (sop
->sem_flg
& SEM_UNDO
)
318 un
->semadj
[sop
->sem_num
] -= sop
->sem_op
;
322 sma
->sem_otime
= get_seconds();
330 if (sop
->sem_flg
& IPC_NOWAIT
)
337 while (sop
>= sops
) {
338 sma
->sem_base
[sop
->sem_num
].semval
-= sop
->sem_op
;
345 /* Go through the pending queue for the indicated semaphore
346 * looking for tasks that can be completed.
348 static void update_queue (struct sem_array
* sma
)
351 struct sem_queue
* q
;
353 q
= sma
->sem_pending
;
355 error
= try_atomic_semop(sma
, q
->sops
, q
->nsops
,
358 /* Does q->sleeper still need to sleep? */
361 remove_from_queue(sma
,q
);
362 q
->status
= IN_WAKEUP
;
364 * Continue scanning. The next operation
365 * that must be checked depends on the type of the
366 * completed operation:
367 * - if the operation modified the array, then
368 * restart from the head of the queue and
369 * check for threads that might be waiting
370 * for semaphore values to become 0.
371 * - if the operation didn't modify the array,
372 * then just continue.
375 n
= sma
->sem_pending
;
378 wake_up_process(q
->sleeper
);
379 /* hands-off: q will disappear immediately after
390 /* The following counts are associated to each semaphore:
391 * semncnt number of tasks waiting on semval being nonzero
392 * semzcnt number of tasks waiting on semval being zero
393 * This model assumes that a task waits on exactly one semaphore.
394 * Since semaphore operations are to be performed atomically, tasks actually
395 * wait on a whole sequence of semaphores simultaneously.
396 * The counts we return here are a rough approximation, but still
397 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
399 static int count_semncnt (struct sem_array
* sma
, ushort semnum
)
402 struct sem_queue
* q
;
405 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
406 struct sembuf
* sops
= q
->sops
;
407 int nsops
= q
->nsops
;
409 for (i
= 0; i
< nsops
; i
++)
410 if (sops
[i
].sem_num
== semnum
411 && (sops
[i
].sem_op
< 0)
412 && !(sops
[i
].sem_flg
& IPC_NOWAIT
))
417 static int count_semzcnt (struct sem_array
* sma
, ushort semnum
)
420 struct sem_queue
* q
;
423 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
424 struct sembuf
* sops
= q
->sops
;
425 int nsops
= q
->nsops
;
427 for (i
= 0; i
< nsops
; i
++)
428 if (sops
[i
].sem_num
== semnum
429 && (sops
[i
].sem_op
== 0)
430 && !(sops
[i
].sem_flg
& IPC_NOWAIT
))
436 /* Free a semaphore set. freeary() is called with sem_ids.sem down and
437 * the spinlock for this semaphore set hold. sem_ids.sem remains locked
440 static void freeary (struct sem_array
*sma
, int id
)
446 /* Invalidate the existing undo structures for this semaphore set.
447 * (They will be freed without any further action in exit_sem()
448 * or during the next semop.)
450 for (un
= sma
->undo
; un
; un
= un
->id_next
)
453 /* Wake up all pending processes and let them fail with EIDRM. */
454 q
= sma
->sem_pending
;
457 /* lazy remove_from_queue: we are killing the whole queue */
460 q
->status
= IN_WAKEUP
;
461 wake_up_process(q
->sleeper
); /* doesn't sleep */
462 q
->status
= -EIDRM
; /* hands-off q */
466 /* Remove the semaphore set from the ID array*/
470 used_sems
-= sma
->sem_nsems
;
471 size
= sizeof (*sma
) + sma
->sem_nsems
* sizeof (struct sem
);
472 security_sem_free(sma
);
476 static unsigned long copy_semid_to_user(void __user
*buf
, struct semid64_ds
*in
, int version
)
480 return copy_to_user(buf
, in
, sizeof(*in
));
485 ipc64_perm_to_ipc_perm(&in
->sem_perm
, &out
.sem_perm
);
487 out
.sem_otime
= in
->sem_otime
;
488 out
.sem_ctime
= in
->sem_ctime
;
489 out
.sem_nsems
= in
->sem_nsems
;
491 return copy_to_user(buf
, &out
, sizeof(out
));
498 static int semctl_nolock(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
501 struct sem_array
*sma
;
507 struct seminfo seminfo
;
510 err
= security_sem_semctl(NULL
, cmd
);
514 memset(&seminfo
,0,sizeof(seminfo
));
515 seminfo
.semmni
= sc_semmni
;
516 seminfo
.semmns
= sc_semmns
;
517 seminfo
.semmsl
= sc_semmsl
;
518 seminfo
.semopm
= sc_semopm
;
519 seminfo
.semvmx
= SEMVMX
;
520 seminfo
.semmnu
= SEMMNU
;
521 seminfo
.semmap
= SEMMAP
;
522 seminfo
.semume
= SEMUME
;
524 if (cmd
== SEM_INFO
) {
525 seminfo
.semusz
= sem_ids
.in_use
;
526 seminfo
.semaem
= used_sems
;
528 seminfo
.semusz
= SEMUSZ
;
529 seminfo
.semaem
= SEMAEM
;
531 max_id
= sem_ids
.max_id
;
533 if (copy_to_user (arg
.__buf
, &seminfo
, sizeof(struct seminfo
)))
535 return (max_id
< 0) ? 0: max_id
;
539 struct semid64_ds tbuf
;
542 if(semid
>= sem_ids
.entries
->size
)
545 memset(&tbuf
,0,sizeof(tbuf
));
547 sma
= sem_lock(semid
);
552 if (ipcperms (&sma
->sem_perm
, S_IRUGO
))
555 err
= security_sem_semctl(sma
, cmd
);
559 id
= sem_buildid(semid
, sma
->sem_perm
.seq
);
561 kernel_to_ipc64_perm(&sma
->sem_perm
, &tbuf
.sem_perm
);
562 tbuf
.sem_otime
= sma
->sem_otime
;
563 tbuf
.sem_ctime
= sma
->sem_ctime
;
564 tbuf
.sem_nsems
= sma
->sem_nsems
;
566 if (copy_semid_to_user (arg
.buf
, &tbuf
, version
))
579 static int semctl_main(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
581 struct sem_array
*sma
;
584 ushort fast_sem_io
[SEMMSL_FAST
];
585 ushort
* sem_io
= fast_sem_io
;
588 sma
= sem_lock(semid
);
592 nsems
= sma
->sem_nsems
;
595 if (sem_checkid(sma
,semid
))
599 if (ipcperms (&sma
->sem_perm
, (cmd
==SETVAL
||cmd
==SETALL
)?S_IWUGO
:S_IRUGO
))
602 err
= security_sem_semctl(sma
, cmd
);
610 ushort __user
*array
= arg
.array
;
613 if(nsems
> SEMMSL_FAST
) {
617 sem_io
= ipc_alloc(sizeof(ushort
)*nsems
);
619 ipc_lock_by_ptr(&sma
->sem_perm
);
625 ipc_lock_by_ptr(&sma
->sem_perm
);
627 if (sma
->sem_perm
.deleted
) {
634 for (i
= 0; i
< sma
->sem_nsems
; i
++)
635 sem_io
[i
] = sma
->sem_base
[i
].semval
;
638 if(copy_to_user(array
, sem_io
, nsems
*sizeof(ushort
)))
650 if(nsems
> SEMMSL_FAST
) {
651 sem_io
= ipc_alloc(sizeof(ushort
)*nsems
);
653 ipc_lock_by_ptr(&sma
->sem_perm
);
660 if (copy_from_user (sem_io
, arg
.array
, nsems
*sizeof(ushort
))) {
661 ipc_lock_by_ptr(&sma
->sem_perm
);
668 for (i
= 0; i
< nsems
; i
++) {
669 if (sem_io
[i
] > SEMVMX
) {
670 ipc_lock_by_ptr(&sma
->sem_perm
);
677 ipc_lock_by_ptr(&sma
->sem_perm
);
679 if (sma
->sem_perm
.deleted
) {
685 for (i
= 0; i
< nsems
; i
++)
686 sma
->sem_base
[i
].semval
= sem_io
[i
];
687 for (un
= sma
->undo
; un
; un
= un
->id_next
)
688 for (i
= 0; i
< nsems
; i
++)
690 sma
->sem_ctime
= get_seconds();
691 /* maybe some queued-up processes were waiting for this */
698 struct semid64_ds tbuf
;
699 memset(&tbuf
,0,sizeof(tbuf
));
700 kernel_to_ipc64_perm(&sma
->sem_perm
, &tbuf
.sem_perm
);
701 tbuf
.sem_otime
= sma
->sem_otime
;
702 tbuf
.sem_ctime
= sma
->sem_ctime
;
703 tbuf
.sem_nsems
= sma
->sem_nsems
;
705 if (copy_semid_to_user (arg
.buf
, &tbuf
, version
))
709 /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
712 if(semnum
< 0 || semnum
>= nsems
)
715 curr
= &sma
->sem_base
[semnum
];
725 err
= count_semncnt(sma
,semnum
);
728 err
= count_semzcnt(sma
,semnum
);
735 if (val
> SEMVMX
|| val
< 0)
738 for (un
= sma
->undo
; un
; un
= un
->id_next
)
739 un
->semadj
[semnum
] = 0;
741 curr
->sempid
= current
->tgid
;
742 sma
->sem_ctime
= get_seconds();
743 /* maybe some queued-up processes were waiting for this */
752 if(sem_io
!= fast_sem_io
)
753 ipc_free(sem_io
, sizeof(ushort
)*nsems
);
763 static inline unsigned long copy_semid_from_user(struct sem_setbuf
*out
, void __user
*buf
, int version
)
768 struct semid64_ds tbuf
;
770 if(copy_from_user(&tbuf
, buf
, sizeof(tbuf
)))
773 out
->uid
= tbuf
.sem_perm
.uid
;
774 out
->gid
= tbuf
.sem_perm
.gid
;
775 out
->mode
= tbuf
.sem_perm
.mode
;
781 struct semid_ds tbuf_old
;
783 if(copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
786 out
->uid
= tbuf_old
.sem_perm
.uid
;
787 out
->gid
= tbuf_old
.sem_perm
.gid
;
788 out
->mode
= tbuf_old
.sem_perm
.mode
;
797 static int semctl_down(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
799 struct sem_array
*sma
;
801 struct sem_setbuf setbuf
;
802 struct kern_ipc_perm
*ipcp
;
805 if(copy_semid_from_user (&setbuf
, arg
.buf
, version
))
807 if ((err
= audit_ipc_perms(0, setbuf
.uid
, setbuf
.gid
, setbuf
.mode
)))
810 sma
= sem_lock(semid
);
814 if (sem_checkid(sma
,semid
)) {
818 ipcp
= &sma
->sem_perm
;
820 if (current
->euid
!= ipcp
->cuid
&&
821 current
->euid
!= ipcp
->uid
&& !capable(CAP_SYS_ADMIN
)) {
826 err
= security_sem_semctl(sma
, cmd
);
836 ipcp
->uid
= setbuf
.uid
;
837 ipcp
->gid
= setbuf
.gid
;
838 ipcp
->mode
= (ipcp
->mode
& ~S_IRWXUGO
)
839 | (setbuf
.mode
& S_IRWXUGO
);
840 sma
->sem_ctime
= get_seconds();
856 asmlinkage
long sys_semctl (int semid
, int semnum
, int cmd
, union semun arg
)
864 version
= ipc_parse_version(&cmd
);
870 err
= semctl_nolock(semid
,semnum
,cmd
,version
,arg
);
880 err
= semctl_main(semid
,semnum
,cmd
,version
,arg
);
885 err
= semctl_down(semid
,semnum
,cmd
,version
,arg
);
893 static inline void lock_semundo(void)
895 struct sem_undo_list
*undo_list
;
897 undo_list
= current
->sysvsem
.undo_list
;
898 if ((undo_list
!= NULL
) && (atomic_read(&undo_list
->refcnt
) != 1))
899 spin_lock(&undo_list
->lock
);
902 /* This code has an interaction with copy_semundo().
903 * Consider; two tasks are sharing the undo_list. task1
904 * acquires the undo_list lock in lock_semundo(). If task2 now
905 * exits before task1 releases the lock (by calling
906 * unlock_semundo()), then task1 will never call spin_unlock().
907 * This leave the sem_undo_list in a locked state. If task1 now creats task3
908 * and once again shares the sem_undo_list, the sem_undo_list will still be
909 * locked, and future SEM_UNDO operations will deadlock. This case is
910 * dealt with in copy_semundo() by having it reinitialize the spin lock when
911 * the refcnt goes from 1 to 2.
913 static inline void unlock_semundo(void)
915 struct sem_undo_list
*undo_list
;
917 undo_list
= current
->sysvsem
.undo_list
;
918 if ((undo_list
!= NULL
) && (atomic_read(&undo_list
->refcnt
) != 1))
919 spin_unlock(&undo_list
->lock
);
923 /* If the task doesn't already have a undo_list, then allocate one
924 * here. We guarantee there is only one thread using this undo list,
925 * and current is THE ONE
927 * If this allocation and assignment succeeds, but later
928 * portions of this code fail, there is no need to free the sem_undo_list.
929 * Just let it stay associated with the task, and it'll be freed later
932 * This can block, so callers must hold no locks.
934 static inline int get_undo_list(struct sem_undo_list
**undo_listp
)
936 struct sem_undo_list
*undo_list
;
939 undo_list
= current
->sysvsem
.undo_list
;
941 size
= sizeof(struct sem_undo_list
);
942 undo_list
= (struct sem_undo_list
*) kmalloc(size
, GFP_KERNEL
);
943 if (undo_list
== NULL
)
945 memset(undo_list
, 0, size
);
946 /* don't initialize unodhd->lock here. It's done
947 * in copy_semundo() instead.
949 atomic_set(&undo_list
->refcnt
, 1);
950 current
->sysvsem
.undo_list
= undo_list
;
952 *undo_listp
= undo_list
;
956 static struct sem_undo
*lookup_undo(struct sem_undo_list
*ulp
, int semid
)
958 struct sem_undo
**last
, *un
;
960 last
= &ulp
->proc_list
;
976 static struct sem_undo
*find_undo(int semid
)
978 struct sem_array
*sma
;
979 struct sem_undo_list
*ulp
;
980 struct sem_undo
*un
, *new;
984 error
= get_undo_list(&ulp
);
986 return ERR_PTR(error
);
989 un
= lookup_undo(ulp
, semid
);
991 if (likely(un
!=NULL
))
994 /* no undo structure around - allocate one. */
995 sma
= sem_lock(semid
);
996 un
= ERR_PTR(-EINVAL
);
999 un
= ERR_PTR(-EIDRM
);
1000 if (sem_checkid(sma
,semid
)) {
1004 nsems
= sma
->sem_nsems
;
1005 ipc_rcu_getref(sma
);
1008 new = (struct sem_undo
*) kmalloc(sizeof(struct sem_undo
) + sizeof(short)*nsems
, GFP_KERNEL
);
1010 ipc_lock_by_ptr(&sma
->sem_perm
);
1011 ipc_rcu_putref(sma
);
1013 return ERR_PTR(-ENOMEM
);
1015 memset(new, 0, sizeof(struct sem_undo
) + sizeof(short)*nsems
);
1016 new->semadj
= (short *) &new[1];
1020 un
= lookup_undo(ulp
, semid
);
1024 ipc_lock_by_ptr(&sma
->sem_perm
);
1025 ipc_rcu_putref(sma
);
1029 ipc_lock_by_ptr(&sma
->sem_perm
);
1030 ipc_rcu_putref(sma
);
1031 if (sma
->sem_perm
.deleted
) {
1035 un
= ERR_PTR(-EIDRM
);
1038 new->proc_next
= ulp
->proc_list
;
1039 ulp
->proc_list
= new;
1040 new->id_next
= sma
->undo
;
1049 asmlinkage
long sys_semtimedop(int semid
, struct sembuf __user
*tsops
,
1050 unsigned nsops
, const struct timespec __user
*timeout
)
1052 int error
= -EINVAL
;
1053 struct sem_array
*sma
;
1054 struct sembuf fast_sops
[SEMOPM_FAST
];
1055 struct sembuf
* sops
= fast_sops
, *sop
;
1056 struct sem_undo
*un
;
1057 int undos
= 0, alter
= 0, max
;
1058 struct sem_queue queue
;
1059 unsigned long jiffies_left
= 0;
1061 if (nsops
< 1 || semid
< 0)
1063 if (nsops
> sc_semopm
)
1065 if(nsops
> SEMOPM_FAST
) {
1066 sops
= kmalloc(sizeof(*sops
)*nsops
,GFP_KERNEL
);
1070 if (copy_from_user (sops
, tsops
, nsops
* sizeof(*tsops
))) {
1075 struct timespec _timeout
;
1076 if (copy_from_user(&_timeout
, timeout
, sizeof(*timeout
))) {
1080 if (_timeout
.tv_sec
< 0 || _timeout
.tv_nsec
< 0 ||
1081 _timeout
.tv_nsec
>= 1000000000L) {
1085 jiffies_left
= timespec_to_jiffies(&_timeout
);
1088 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
1089 if (sop
->sem_num
>= max
)
1091 if (sop
->sem_flg
& SEM_UNDO
)
1093 if (sop
->sem_op
!= 0)
1099 un
= find_undo(semid
);
1101 error
= PTR_ERR(un
);
1107 sma
= sem_lock(semid
);
1112 if (sem_checkid(sma
,semid
))
1113 goto out_unlock_free
;
1115 * semid identifies are not unique - find_undo may have
1116 * allocated an undo structure, it was invalidated by an RMID
1117 * and now a new array with received the same id. Check and retry.
1119 if (un
&& un
->semid
== -1) {
1124 if (max
>= sma
->sem_nsems
)
1125 goto out_unlock_free
;
1128 if (ipcperms(&sma
->sem_perm
, alter
? S_IWUGO
: S_IRUGO
))
1129 goto out_unlock_free
;
1131 error
= security_sem_semop(sma
, sops
, nsops
, alter
);
1133 goto out_unlock_free
;
1135 error
= try_atomic_semop (sma
, sops
, nsops
, un
, current
->tgid
);
1137 if (alter
&& error
== 0)
1139 goto out_unlock_free
;
1142 /* We need to sleep on this operation, so we put the current
1143 * task into the pending queue and go to sleep.
1148 queue
.nsops
= nsops
;
1150 queue
.pid
= current
->tgid
;
1152 queue
.alter
= alter
;
1154 append_to_queue(sma
,&queue
);
1156 prepend_to_queue(sma
,&queue
);
1158 queue
.status
= -EINTR
;
1159 queue
.sleeper
= current
;
1160 current
->state
= TASK_INTERRUPTIBLE
;
1164 jiffies_left
= schedule_timeout(jiffies_left
);
1168 error
= queue
.status
;
1169 while(unlikely(error
== IN_WAKEUP
)) {
1171 error
= queue
.status
;
1174 if (error
!= -EINTR
) {
1175 /* fast path: update_queue already obtained all requested
1180 sma
= sem_lock(semid
);
1182 if(queue
.prev
!= NULL
)
1189 * If queue.status != -EINTR we are woken up by another process
1191 error
= queue
.status
;
1192 if (error
!= -EINTR
) {
1193 goto out_unlock_free
;
1197 * If an interrupt occurred we have to clean up the queue
1199 if (timeout
&& jiffies_left
== 0)
1201 remove_from_queue(sma
,&queue
);
1202 goto out_unlock_free
;
1207 if(sops
!= fast_sops
)
1212 asmlinkage
long sys_semop (int semid
, struct sembuf __user
*tsops
, unsigned nsops
)
1214 return sys_semtimedop(semid
, tsops
, nsops
, NULL
);
1217 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1218 * parent and child tasks.
1220 * See the notes above unlock_semundo() regarding the spin_lock_init()
1221 * in this code. Initialize the undo_list->lock here instead of get_undo_list()
1222 * because of the reasoning in the comment above unlock_semundo.
1225 int copy_semundo(unsigned long clone_flags
, struct task_struct
*tsk
)
1227 struct sem_undo_list
*undo_list
;
1230 if (clone_flags
& CLONE_SYSVSEM
) {
1231 error
= get_undo_list(&undo_list
);
1234 if (atomic_read(&undo_list
->refcnt
) == 1)
1235 spin_lock_init(&undo_list
->lock
);
1236 atomic_inc(&undo_list
->refcnt
);
1237 tsk
->sysvsem
.undo_list
= undo_list
;
1239 tsk
->sysvsem
.undo_list
= NULL
;
1245 * add semadj values to semaphores, free undo structures.
1246 * undo structures are not freed when semaphore arrays are destroyed
1247 * so some of them may be out of date.
1248 * IMPLEMENTATION NOTE: There is some confusion over whether the
1249 * set of adjustments that needs to be done should be done in an atomic
1250 * manner or not. That is, if we are attempting to decrement the semval
1251 * should we queue up and wait until we can do so legally?
1252 * The original implementation attempted to do this (queue and wait).
1253 * The current implementation does not do so. The POSIX standard
1254 * and SVID should be consulted to determine what behavior is mandated.
1256 void exit_sem(struct task_struct
*tsk
)
1258 struct sem_undo_list
*undo_list
;
1259 struct sem_undo
*u
, **up
;
1261 undo_list
= tsk
->sysvsem
.undo_list
;
1265 if (!atomic_dec_and_test(&undo_list
->refcnt
))
1268 /* There's no need to hold the semundo list lock, as current
1269 * is the last task exiting for this undo list.
1271 for (up
= &undo_list
->proc_list
; (u
= *up
); *up
= u
->proc_next
, kfree(u
)) {
1272 struct sem_array
*sma
;
1274 struct sem_undo
*un
, **unp
;
1281 sma
= sem_lock(semid
);
1288 BUG_ON(sem_checkid(sma
,u
->semid
));
1290 /* remove u from the sma->undo list */
1291 for (unp
= &sma
->undo
; (un
= *unp
); unp
= &un
->id_next
) {
1295 printk ("exit_sem undo list error id=%d\n", u
->semid
);
1299 /* perform adjustments registered in u */
1300 nsems
= sma
->sem_nsems
;
1301 for (i
= 0; i
< nsems
; i
++) {
1302 struct sem
* sem
= &sma
->sem_base
[i
];
1304 sem
->semval
+= u
->semadj
[i
];
1306 * Range checks of the new semaphore value,
1307 * not defined by sus:
1308 * - Some unices ignore the undo entirely
1309 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
1310 * - some cap the value (e.g. FreeBSD caps
1311 * at 0, but doesn't enforce SEMVMX)
1313 * Linux caps the semaphore value, both at 0
1316 * Manfred <manfred@colorfullife.com>
1318 if (sem
->semval
< 0)
1320 if (sem
->semval
> SEMVMX
)
1321 sem
->semval
= SEMVMX
;
1322 sem
->sempid
= current
->tgid
;
1325 sma
->sem_otime
= get_seconds();
1326 /* maybe some queued-up processes were waiting for this */
1334 #ifdef CONFIG_PROC_FS
1335 static int sysvipc_sem_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
)
1341 len
+= sprintf(buffer
, " key semid perms nsems uid gid cuid cgid otime ctime\n");
1344 for(i
= 0; i
<= sem_ids
.max_id
; i
++) {
1345 struct sem_array
*sma
;
1348 len
+= sprintf(buffer
+ len
, "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
1350 sem_buildid(i
,sma
->sem_perm
.seq
),
1366 if(pos
> offset
+ length
)
1373 *start
= buffer
+ (offset
- begin
);
1374 len
-= (offset
- begin
);