2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
26 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
47 #include <linux/slab.h>
48 #include <linux/poll.h>
50 #include <linux/file.h>
51 #include <linux/jhash.h>
52 #include <linux/init.h>
53 #include <linux/futex.h>
54 #include <linux/mount.h>
55 #include <linux/pagemap.h>
56 #include <linux/syscalls.h>
57 #include <linux/signal.h>
58 #include <linux/module.h>
59 #include <linux/magic.h>
60 #include <linux/pid.h>
61 #include <linux/nsproxy.h>
63 #include <asm/futex.h>
65 #include "rtmutex_common.h"
67 int __read_mostly futex_cmpxchg_enabled
;
69 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
72 * Priority Inheritance state:
74 struct futex_pi_state
{
76 * list of 'owned' pi_state instances - these have to be
77 * cleaned up in do_exit() if the task exits prematurely:
79 struct list_head list
;
84 struct rt_mutex pi_mutex
;
86 struct task_struct
*owner
;
93 * struct futex_q - The hashed futex queue entry, one per waiting task
94 * @task: the task waiting on the futex
95 * @lock_ptr: the hash bucket lock
96 * @key: the key the futex is hashed on
97 * @pi_state: optional priority inheritance state
98 * @rt_waiter: rt_waiter storage for use with requeue_pi
99 * @requeue_pi_key: the requeue_pi target futex key
100 * @bitset: bitset for the optional bitmasked wakeup
102 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
103 * we can wake only the relevant ones (hashed queues may be shared).
105 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
106 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
107 * The order of wakup is always to make the first condition true, then
110 * PI futexes are typically woken before they are removed from the hash list via
111 * the rt_mutex code. See unqueue_me_pi().
114 struct plist_node list
;
116 struct task_struct
*task
;
117 spinlock_t
*lock_ptr
;
119 struct futex_pi_state
*pi_state
;
120 struct rt_mutex_waiter
*rt_waiter
;
121 union futex_key
*requeue_pi_key
;
126 * Hash buckets are shared by all the futex_keys that hash to the same
127 * location. Each key may have multiple futex_q structures, one for each task
128 * waiting on a futex.
130 struct futex_hash_bucket
{
132 struct plist_head chain
;
135 static struct futex_hash_bucket futex_queues
[1<<FUTEX_HASHBITS
];
138 * We hash on the keys returned from get_futex_key (see below).
140 static struct futex_hash_bucket
*hash_futex(union futex_key
*key
)
142 u32 hash
= jhash2((u32
*)&key
->both
.word
,
143 (sizeof(key
->both
.word
)+sizeof(key
->both
.ptr
))/4,
145 return &futex_queues
[hash
& ((1 << FUTEX_HASHBITS
)-1)];
149 * Return 1 if two futex_keys are equal, 0 otherwise.
151 static inline int match_futex(union futex_key
*key1
, union futex_key
*key2
)
154 && key1
->both
.word
== key2
->both
.word
155 && key1
->both
.ptr
== key2
->both
.ptr
156 && key1
->both
.offset
== key2
->both
.offset
);
160 * Take a reference to the resource addressed by a key.
161 * Can be called while holding spinlocks.
164 static void get_futex_key_refs(union futex_key
*key
)
169 switch (key
->both
.offset
& (FUT_OFF_INODE
|FUT_OFF_MMSHARED
)) {
171 atomic_inc(&key
->shared
.inode
->i_count
);
173 case FUT_OFF_MMSHARED
:
174 atomic_inc(&key
->private.mm
->mm_count
);
180 * Drop a reference to the resource addressed by a key.
181 * The hash bucket spinlock must not be held.
183 static void drop_futex_key_refs(union futex_key
*key
)
185 if (!key
->both
.ptr
) {
186 /* If we're here then we tried to put a key we failed to get */
191 switch (key
->both
.offset
& (FUT_OFF_INODE
|FUT_OFF_MMSHARED
)) {
193 iput(key
->shared
.inode
);
195 case FUT_OFF_MMSHARED
:
196 mmdrop(key
->private.mm
);
202 * get_futex_key() - Get parameters which are the keys for a futex
203 * @uaddr: virtual address of the futex
204 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
205 * @key: address where result is stored.
206 * @rw: mapping needs to be read/write (values: VERIFY_READ,
209 * Returns a negative error code or 0
210 * The key words are stored in *key on success.
212 * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
213 * offset_within_page). For private mappings, it's (uaddr, current->mm).
214 * We can usually work out the index without swapping in the page.
216 * lock_page() might sleep, the caller should not hold a spinlock.
219 get_futex_key(u32 __user
*uaddr
, int fshared
, union futex_key
*key
, int rw
)
221 unsigned long address
= (unsigned long)uaddr
;
222 struct mm_struct
*mm
= current
->mm
;
227 * The futex address must be "naturally" aligned.
229 key
->both
.offset
= address
% PAGE_SIZE
;
230 if (unlikely((address
% sizeof(u32
)) != 0))
232 address
-= key
->both
.offset
;
235 * PROCESS_PRIVATE futexes are fast.
236 * As the mm cannot disappear under us and the 'key' only needs
237 * virtual address, we dont even have to find the underlying vma.
238 * Note : We do have to check 'uaddr' is a valid user address,
239 * but access_ok() should be faster than find_vma()
242 if (unlikely(!access_ok(VERIFY_WRITE
, uaddr
, sizeof(u32
))))
244 key
->private.mm
= mm
;
245 key
->private.address
= address
;
246 get_futex_key_refs(key
);
251 err
= get_user_pages_fast(address
, 1, 1, &page
);
253 * If write access is not required (eg. FUTEX_WAIT), try
254 * and get read-only access.
256 if (err
== -EFAULT
&& rw
== VERIFY_READ
) {
257 err
= get_user_pages_fast(address
, 1, 0, &page
);
265 page
= compound_head(page
);
267 if (!page
->mapping
) {
271 * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
272 * trying to find one. RW mapping would have COW'd (and thus
273 * have a mapping) so this page is RO and won't ever change.
275 if ((page
== ZERO_PAGE(address
)))
281 * Private mappings are handled in a simple way.
283 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
284 * it's a read-only handle, it's expected that futexes attach to
285 * the object not the particular process.
287 if (PageAnon(page
)) {
289 * A RO anonymous page will never change and thus doesn't make
290 * sense for futex operations.
297 key
->both
.offset
|= FUT_OFF_MMSHARED
; /* ref taken on mm */
298 key
->private.mm
= mm
;
299 key
->private.address
= address
;
301 key
->both
.offset
|= FUT_OFF_INODE
; /* inode-based key */
302 key
->shared
.inode
= page
->mapping
->host
;
303 key
->shared
.pgoff
= page
->index
;
306 get_futex_key_refs(key
);
315 void put_futex_key(int fshared
, union futex_key
*key
)
317 drop_futex_key_refs(key
);
321 * fault_in_user_writeable() - Fault in user address and verify RW access
322 * @uaddr: pointer to faulting user space address
324 * Slow path to fixup the fault we just took in the atomic write
327 * We have no generic implementation of a non destructive write to the
328 * user address. We know that we faulted in the atomic pagefault
329 * disabled section so we can as well avoid the #PF overhead by
330 * calling get_user_pages() right away.
332 static int fault_in_user_writeable(u32 __user
*uaddr
)
334 struct mm_struct
*mm
= current
->mm
;
337 down_read(&mm
->mmap_sem
);
338 ret
= get_user_pages(current
, mm
, (unsigned long)uaddr
,
339 1, 1, 0, NULL
, NULL
);
340 up_read(&mm
->mmap_sem
);
342 return ret
< 0 ? ret
: 0;
346 * futex_top_waiter() - Return the highest priority waiter on a futex
347 * @hb: the hash bucket the futex_q's reside in
348 * @key: the futex key (to distinguish it from other futex futex_q's)
350 * Must be called with the hb lock held.
352 static struct futex_q
*futex_top_waiter(struct futex_hash_bucket
*hb
,
353 union futex_key
*key
)
355 struct futex_q
*this;
357 plist_for_each_entry(this, &hb
->chain
, list
) {
358 if (match_futex(&this->key
, key
))
364 static u32
cmpxchg_futex_value_locked(u32 __user
*uaddr
, u32 uval
, u32 newval
)
369 curval
= futex_atomic_cmpxchg_inatomic(uaddr
, uval
, newval
);
375 static int get_futex_value_locked(u32
*dest
, u32 __user
*from
)
380 ret
= __copy_from_user_inatomic(dest
, from
, sizeof(u32
));
383 return ret
? -EFAULT
: 0;
390 static int refill_pi_state_cache(void)
392 struct futex_pi_state
*pi_state
;
394 if (likely(current
->pi_state_cache
))
397 pi_state
= kzalloc(sizeof(*pi_state
), GFP_KERNEL
);
402 INIT_LIST_HEAD(&pi_state
->list
);
403 /* pi_mutex gets initialized later */
404 pi_state
->owner
= NULL
;
405 atomic_set(&pi_state
->refcount
, 1);
406 pi_state
->key
= FUTEX_KEY_INIT
;
408 current
->pi_state_cache
= pi_state
;
413 static struct futex_pi_state
* alloc_pi_state(void)
415 struct futex_pi_state
*pi_state
= current
->pi_state_cache
;
418 current
->pi_state_cache
= NULL
;
423 static void free_pi_state(struct futex_pi_state
*pi_state
)
425 if (!atomic_dec_and_test(&pi_state
->refcount
))
429 * If pi_state->owner is NULL, the owner is most probably dying
430 * and has cleaned up the pi_state already
432 if (pi_state
->owner
) {
433 spin_lock_irq(&pi_state
->owner
->pi_lock
);
434 list_del_init(&pi_state
->list
);
435 spin_unlock_irq(&pi_state
->owner
->pi_lock
);
437 rt_mutex_proxy_unlock(&pi_state
->pi_mutex
, pi_state
->owner
);
440 if (current
->pi_state_cache
)
444 * pi_state->list is already empty.
445 * clear pi_state->owner.
446 * refcount is at 0 - put it back to 1.
448 pi_state
->owner
= NULL
;
449 atomic_set(&pi_state
->refcount
, 1);
450 current
->pi_state_cache
= pi_state
;
455 * Look up the task based on what TID userspace gave us.
458 static struct task_struct
* futex_find_get_task(pid_t pid
)
460 struct task_struct
*p
;
463 p
= find_task_by_vpid(pid
);
473 * This task is holding PI mutexes at exit time => bad.
474 * Kernel cleans up PI-state, but userspace is likely hosed.
475 * (Robust-futex cleanup is separate and might save the day for userspace.)
477 void exit_pi_state_list(struct task_struct
*curr
)
479 struct list_head
*next
, *head
= &curr
->pi_state_list
;
480 struct futex_pi_state
*pi_state
;
481 struct futex_hash_bucket
*hb
;
482 union futex_key key
= FUTEX_KEY_INIT
;
484 if (!futex_cmpxchg_enabled
)
487 * We are a ZOMBIE and nobody can enqueue itself on
488 * pi_state_list anymore, but we have to be careful
489 * versus waiters unqueueing themselves:
491 spin_lock_irq(&curr
->pi_lock
);
492 while (!list_empty(head
)) {
495 pi_state
= list_entry(next
, struct futex_pi_state
, list
);
497 hb
= hash_futex(&key
);
498 spin_unlock_irq(&curr
->pi_lock
);
500 spin_lock(&hb
->lock
);
502 spin_lock_irq(&curr
->pi_lock
);
504 * We dropped the pi-lock, so re-check whether this
505 * task still owns the PI-state:
507 if (head
->next
!= next
) {
508 spin_unlock(&hb
->lock
);
512 WARN_ON(pi_state
->owner
!= curr
);
513 WARN_ON(list_empty(&pi_state
->list
));
514 list_del_init(&pi_state
->list
);
515 pi_state
->owner
= NULL
;
516 spin_unlock_irq(&curr
->pi_lock
);
518 rt_mutex_unlock(&pi_state
->pi_mutex
);
520 spin_unlock(&hb
->lock
);
522 spin_lock_irq(&curr
->pi_lock
);
524 spin_unlock_irq(&curr
->pi_lock
);
528 lookup_pi_state(u32 uval
, struct futex_hash_bucket
*hb
,
529 union futex_key
*key
, struct futex_pi_state
**ps
)
531 struct futex_pi_state
*pi_state
= NULL
;
532 struct futex_q
*this, *next
;
533 struct plist_head
*head
;
534 struct task_struct
*p
;
535 pid_t pid
= uval
& FUTEX_TID_MASK
;
539 plist_for_each_entry_safe(this, next
, head
, list
) {
540 if (match_futex(&this->key
, key
)) {
542 * Another waiter already exists - bump up
543 * the refcount and return its pi_state:
545 pi_state
= this->pi_state
;
547 * Userspace might have messed up non PI and PI futexes
549 if (unlikely(!pi_state
))
552 WARN_ON(!atomic_read(&pi_state
->refcount
));
555 * When pi_state->owner is NULL then the owner died
556 * and another waiter is on the fly. pi_state->owner
557 * is fixed up by the task which acquires
558 * pi_state->rt_mutex.
560 * We do not check for pid == 0 which can happen when
561 * the owner died and robust_list_exit() cleared the
564 if (pid
&& pi_state
->owner
) {
566 * Bail out if user space manipulated the
569 if (pid
!= task_pid_vnr(pi_state
->owner
))
573 atomic_inc(&pi_state
->refcount
);
581 * We are the first waiter - try to look up the real owner and attach
582 * the new pi_state to it, but bail out when TID = 0
586 p
= futex_find_get_task(pid
);
591 * We need to look at the task state flags to figure out,
592 * whether the task is exiting. To protect against the do_exit
593 * change of the task flags, we do this protected by
596 spin_lock_irq(&p
->pi_lock
);
597 if (unlikely(p
->flags
& PF_EXITING
)) {
599 * The task is on the way out. When PF_EXITPIDONE is
600 * set, we know that the task has finished the
603 int ret
= (p
->flags
& PF_EXITPIDONE
) ? -ESRCH
: -EAGAIN
;
605 spin_unlock_irq(&p
->pi_lock
);
610 pi_state
= alloc_pi_state();
613 * Initialize the pi_mutex in locked state and make 'p'
616 rt_mutex_init_proxy_locked(&pi_state
->pi_mutex
, p
);
618 /* Store the key for possible exit cleanups: */
619 pi_state
->key
= *key
;
621 WARN_ON(!list_empty(&pi_state
->list
));
622 list_add(&pi_state
->list
, &p
->pi_state_list
);
624 spin_unlock_irq(&p
->pi_lock
);
634 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
635 * @uaddr: the pi futex user address
636 * @hb: the pi futex hash bucket
637 * @key: the futex key associated with uaddr and hb
638 * @ps: the pi_state pointer where we store the result of the
640 * @task: the task to perform the atomic lock work for. This will
641 * be "current" except in the case of requeue pi.
642 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
646 * 1 - acquired the lock
649 * The hb->lock and futex_key refs shall be held by the caller.
651 static int futex_lock_pi_atomic(u32 __user
*uaddr
, struct futex_hash_bucket
*hb
,
652 union futex_key
*key
,
653 struct futex_pi_state
**ps
,
654 struct task_struct
*task
, int set_waiters
)
656 int lock_taken
, ret
, ownerdied
= 0;
657 u32 uval
, newval
, curval
;
660 ret
= lock_taken
= 0;
663 * To avoid races, we attempt to take the lock here again
664 * (by doing a 0 -> TID atomic cmpxchg), while holding all
665 * the locks. It will most likely not succeed.
667 newval
= task_pid_vnr(task
);
669 newval
|= FUTEX_WAITERS
;
671 curval
= cmpxchg_futex_value_locked(uaddr
, 0, newval
);
673 if (unlikely(curval
== -EFAULT
))
679 if ((unlikely((curval
& FUTEX_TID_MASK
) == task_pid_vnr(task
))))
683 * Surprise - we got the lock. Just return to userspace:
685 if (unlikely(!curval
))
691 * Set the FUTEX_WAITERS flag, so the owner will know it has someone
692 * to wake at the next unlock.
694 newval
= curval
| FUTEX_WAITERS
;
697 * There are two cases, where a futex might have no owner (the
698 * owner TID is 0): OWNER_DIED. We take over the futex in this
699 * case. We also do an unconditional take over, when the owner
702 * This is safe as we are protected by the hash bucket lock !
704 if (unlikely(ownerdied
|| !(curval
& FUTEX_TID_MASK
))) {
705 /* Keep the OWNER_DIED bit */
706 newval
= (curval
& ~FUTEX_TID_MASK
) | task_pid_vnr(task
);
711 curval
= cmpxchg_futex_value_locked(uaddr
, uval
, newval
);
713 if (unlikely(curval
== -EFAULT
))
715 if (unlikely(curval
!= uval
))
719 * We took the lock due to owner died take over.
721 if (unlikely(lock_taken
))
725 * We dont have the lock. Look up the PI state (or create it if
726 * we are the first waiter):
728 ret
= lookup_pi_state(uval
, hb
, key
, ps
);
734 * No owner found for this futex. Check if the
735 * OWNER_DIED bit is set to figure out whether
736 * this is a robust futex or not.
738 if (get_futex_value_locked(&curval
, uaddr
))
742 * We simply start over in case of a robust
743 * futex. The code above will take the futex
746 if (curval
& FUTEX_OWNER_DIED
) {
759 * The hash bucket lock must be held when this is called.
760 * Afterwards, the futex_q must not be accessed.
762 static void wake_futex(struct futex_q
*q
)
764 struct task_struct
*p
= q
->task
;
767 * We set q->lock_ptr = NULL _before_ we wake up the task. If
768 * a non futex wake up happens on another CPU then the task
769 * might exit and p would dereference a non existing task
770 * struct. Prevent this by holding a reference on p across the
775 plist_del(&q
->list
, &q
->list
.plist
);
777 * The waiting task can free the futex_q as soon as
778 * q->lock_ptr = NULL is written, without taking any locks. A
779 * memory barrier is required here to prevent the following
780 * store to lock_ptr from getting ahead of the plist_del.
785 wake_up_state(p
, TASK_NORMAL
);
789 static int wake_futex_pi(u32 __user
*uaddr
, u32 uval
, struct futex_q
*this)
791 struct task_struct
*new_owner
;
792 struct futex_pi_state
*pi_state
= this->pi_state
;
799 * If current does not own the pi_state then the futex is
800 * inconsistent and user space fiddled with the futex value.
802 if (pi_state
->owner
!= current
)
805 spin_lock(&pi_state
->pi_mutex
.wait_lock
);
806 new_owner
= rt_mutex_next_owner(&pi_state
->pi_mutex
);
809 * This happens when we have stolen the lock and the original
810 * pending owner did not enqueue itself back on the rt_mutex.
811 * Thats not a tragedy. We know that way, that a lock waiter
812 * is on the fly. We make the futex_q waiter the pending owner.
815 new_owner
= this->task
;
818 * We pass it to the next owner. (The WAITERS bit is always
819 * kept enabled while there is PI state around. We must also
820 * preserve the owner died bit.)
822 if (!(uval
& FUTEX_OWNER_DIED
)) {
825 newval
= FUTEX_WAITERS
| task_pid_vnr(new_owner
);
827 curval
= cmpxchg_futex_value_locked(uaddr
, uval
, newval
);
829 if (curval
== -EFAULT
)
831 else if (curval
!= uval
)
834 spin_unlock(&pi_state
->pi_mutex
.wait_lock
);
839 spin_lock_irq(&pi_state
->owner
->pi_lock
);
840 WARN_ON(list_empty(&pi_state
->list
));
841 list_del_init(&pi_state
->list
);
842 spin_unlock_irq(&pi_state
->owner
->pi_lock
);
844 spin_lock_irq(&new_owner
->pi_lock
);
845 WARN_ON(!list_empty(&pi_state
->list
));
846 list_add(&pi_state
->list
, &new_owner
->pi_state_list
);
847 pi_state
->owner
= new_owner
;
848 spin_unlock_irq(&new_owner
->pi_lock
);
850 spin_unlock(&pi_state
->pi_mutex
.wait_lock
);
851 rt_mutex_unlock(&pi_state
->pi_mutex
);
856 static int unlock_futex_pi(u32 __user
*uaddr
, u32 uval
)
861 * There is no waiter, so we unlock the futex. The owner died
862 * bit has not to be preserved here. We are the owner:
864 oldval
= cmpxchg_futex_value_locked(uaddr
, uval
, 0);
866 if (oldval
== -EFAULT
)
875 * Express the locking dependencies for lockdep:
878 double_lock_hb(struct futex_hash_bucket
*hb1
, struct futex_hash_bucket
*hb2
)
881 spin_lock(&hb1
->lock
);
883 spin_lock_nested(&hb2
->lock
, SINGLE_DEPTH_NESTING
);
884 } else { /* hb1 > hb2 */
885 spin_lock(&hb2
->lock
);
886 spin_lock_nested(&hb1
->lock
, SINGLE_DEPTH_NESTING
);
891 double_unlock_hb(struct futex_hash_bucket
*hb1
, struct futex_hash_bucket
*hb2
)
893 spin_unlock(&hb1
->lock
);
895 spin_unlock(&hb2
->lock
);
899 * Wake up waiters matching bitset queued on this futex (uaddr).
901 static int futex_wake(u32 __user
*uaddr
, int fshared
, int nr_wake
, u32 bitset
)
903 struct futex_hash_bucket
*hb
;
904 struct futex_q
*this, *next
;
905 struct plist_head
*head
;
906 union futex_key key
= FUTEX_KEY_INIT
;
912 ret
= get_futex_key(uaddr
, fshared
, &key
, VERIFY_READ
);
913 if (unlikely(ret
!= 0))
916 hb
= hash_futex(&key
);
917 spin_lock(&hb
->lock
);
920 plist_for_each_entry_safe(this, next
, head
, list
) {
921 if (match_futex (&this->key
, &key
)) {
922 if (this->pi_state
|| this->rt_waiter
) {
927 /* Check if one of the bits is set in both bitsets */
928 if (!(this->bitset
& bitset
))
932 if (++ret
>= nr_wake
)
937 spin_unlock(&hb
->lock
);
938 put_futex_key(fshared
, &key
);
944 * Wake up all waiters hashed on the physical page that is mapped
945 * to this virtual address:
948 futex_wake_op(u32 __user
*uaddr1
, int fshared
, u32 __user
*uaddr2
,
949 int nr_wake
, int nr_wake2
, int op
)
951 union futex_key key1
= FUTEX_KEY_INIT
, key2
= FUTEX_KEY_INIT
;
952 struct futex_hash_bucket
*hb1
, *hb2
;
953 struct plist_head
*head
;
954 struct futex_q
*this, *next
;
958 ret
= get_futex_key(uaddr1
, fshared
, &key1
, VERIFY_READ
);
959 if (unlikely(ret
!= 0))
961 ret
= get_futex_key(uaddr2
, fshared
, &key2
, VERIFY_WRITE
);
962 if (unlikely(ret
!= 0))
965 hb1
= hash_futex(&key1
);
966 hb2
= hash_futex(&key2
);
969 double_lock_hb(hb1
, hb2
);
970 op_ret
= futex_atomic_op_inuser(op
, uaddr2
);
971 if (unlikely(op_ret
< 0)) {
973 double_unlock_hb(hb1
, hb2
);
977 * we don't get EFAULT from MMU faults if we don't have an MMU,
978 * but we might get them from range checking
984 if (unlikely(op_ret
!= -EFAULT
)) {
989 ret
= fault_in_user_writeable(uaddr2
);
996 put_futex_key(fshared
, &key2
);
997 put_futex_key(fshared
, &key1
);
1003 plist_for_each_entry_safe(this, next
, head
, list
) {
1004 if (match_futex (&this->key
, &key1
)) {
1006 if (++ret
>= nr_wake
)
1015 plist_for_each_entry_safe(this, next
, head
, list
) {
1016 if (match_futex (&this->key
, &key2
)) {
1018 if (++op_ret
>= nr_wake2
)
1025 double_unlock_hb(hb1
, hb2
);
1027 put_futex_key(fshared
, &key2
);
1029 put_futex_key(fshared
, &key1
);
1035 * requeue_futex() - Requeue a futex_q from one hb to another
1036 * @q: the futex_q to requeue
1037 * @hb1: the source hash_bucket
1038 * @hb2: the target hash_bucket
1039 * @key2: the new key for the requeued futex_q
1042 void requeue_futex(struct futex_q
*q
, struct futex_hash_bucket
*hb1
,
1043 struct futex_hash_bucket
*hb2
, union futex_key
*key2
)
1047 * If key1 and key2 hash to the same bucket, no need to
1050 if (likely(&hb1
->chain
!= &hb2
->chain
)) {
1051 plist_del(&q
->list
, &hb1
->chain
);
1052 plist_add(&q
->list
, &hb2
->chain
);
1053 q
->lock_ptr
= &hb2
->lock
;
1054 #ifdef CONFIG_DEBUG_PI_LIST
1055 q
->list
.plist
.lock
= &hb2
->lock
;
1058 get_futex_key_refs(key2
);
1063 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1065 * @key: the key of the requeue target futex
1066 * @hb: the hash_bucket of the requeue target futex
1068 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1069 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1070 * to the requeue target futex so the waiter can detect the wakeup on the right
1071 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1072 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1073 * to protect access to the pi_state to fixup the owner later. Must be called
1074 * with both q->lock_ptr and hb->lock held.
1077 void requeue_pi_wake_futex(struct futex_q
*q
, union futex_key
*key
,
1078 struct futex_hash_bucket
*hb
)
1080 get_futex_key_refs(key
);
1083 WARN_ON(plist_node_empty(&q
->list
));
1084 plist_del(&q
->list
, &q
->list
.plist
);
1086 WARN_ON(!q
->rt_waiter
);
1087 q
->rt_waiter
= NULL
;
1089 q
->lock_ptr
= &hb
->lock
;
1090 #ifdef CONFIG_DEBUG_PI_LIST
1091 q
->list
.plist
.lock
= &hb
->lock
;
1094 wake_up_state(q
->task
, TASK_NORMAL
);
1098 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1099 * @pifutex: the user address of the to futex
1100 * @hb1: the from futex hash bucket, must be locked by the caller
1101 * @hb2: the to futex hash bucket, must be locked by the caller
1102 * @key1: the from futex key
1103 * @key2: the to futex key
1104 * @ps: address to store the pi_state pointer
1105 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1107 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1108 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1109 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1110 * hb1 and hb2 must be held by the caller.
1113 * 0 - failed to acquire the lock atomicly
1114 * 1 - acquired the lock
1117 static int futex_proxy_trylock_atomic(u32 __user
*pifutex
,
1118 struct futex_hash_bucket
*hb1
,
1119 struct futex_hash_bucket
*hb2
,
1120 union futex_key
*key1
, union futex_key
*key2
,
1121 struct futex_pi_state
**ps
, int set_waiters
)
1123 struct futex_q
*top_waiter
= NULL
;
1127 if (get_futex_value_locked(&curval
, pifutex
))
1131 * Find the top_waiter and determine if there are additional waiters.
1132 * If the caller intends to requeue more than 1 waiter to pifutex,
1133 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1134 * as we have means to handle the possible fault. If not, don't set
1135 * the bit unecessarily as it will force the subsequent unlock to enter
1138 top_waiter
= futex_top_waiter(hb1
, key1
);
1140 /* There are no waiters, nothing for us to do. */
1144 /* Ensure we requeue to the expected futex. */
1145 if (!match_futex(top_waiter
->requeue_pi_key
, key2
))
1149 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1150 * the contended case or if set_waiters is 1. The pi_state is returned
1151 * in ps in contended cases.
1153 ret
= futex_lock_pi_atomic(pifutex
, hb2
, key2
, ps
, top_waiter
->task
,
1156 requeue_pi_wake_futex(top_waiter
, key2
, hb2
);
1162 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1163 * uaddr1: source futex user address
1164 * uaddr2: target futex user address
1165 * nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1166 * nr_requeue: number of waiters to requeue (0-INT_MAX)
1167 * requeue_pi: if we are attempting to requeue from a non-pi futex to a
1168 * pi futex (pi to pi requeue is not supported)
1170 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1171 * uaddr2 atomically on behalf of the top waiter.
1174 * >=0 - on success, the number of tasks requeued or woken
1177 static int futex_requeue(u32 __user
*uaddr1
, int fshared
, u32 __user
*uaddr2
,
1178 int nr_wake
, int nr_requeue
, u32
*cmpval
,
1181 union futex_key key1
= FUTEX_KEY_INIT
, key2
= FUTEX_KEY_INIT
;
1182 int drop_count
= 0, task_count
= 0, ret
;
1183 struct futex_pi_state
*pi_state
= NULL
;
1184 struct futex_hash_bucket
*hb1
, *hb2
;
1185 struct plist_head
*head1
;
1186 struct futex_q
*this, *next
;
1191 * requeue_pi requires a pi_state, try to allocate it now
1192 * without any locks in case it fails.
1194 if (refill_pi_state_cache())
1197 * requeue_pi must wake as many tasks as it can, up to nr_wake
1198 * + nr_requeue, since it acquires the rt_mutex prior to
1199 * returning to userspace, so as to not leave the rt_mutex with
1200 * waiters and no owner. However, second and third wake-ups
1201 * cannot be predicted as they involve race conditions with the
1202 * first wake and a fault while looking up the pi_state. Both
1203 * pthread_cond_signal() and pthread_cond_broadcast() should
1211 if (pi_state
!= NULL
) {
1213 * We will have to lookup the pi_state again, so free this one
1214 * to keep the accounting correct.
1216 free_pi_state(pi_state
);
1220 ret
= get_futex_key(uaddr1
, fshared
, &key1
, VERIFY_READ
);
1221 if (unlikely(ret
!= 0))
1223 ret
= get_futex_key(uaddr2
, fshared
, &key2
,
1224 requeue_pi
? VERIFY_WRITE
: VERIFY_READ
);
1225 if (unlikely(ret
!= 0))
1228 hb1
= hash_futex(&key1
);
1229 hb2
= hash_futex(&key2
);
1232 double_lock_hb(hb1
, hb2
);
1234 if (likely(cmpval
!= NULL
)) {
1237 ret
= get_futex_value_locked(&curval
, uaddr1
);
1239 if (unlikely(ret
)) {
1240 double_unlock_hb(hb1
, hb2
);
1242 ret
= get_user(curval
, uaddr1
);
1249 put_futex_key(fshared
, &key2
);
1250 put_futex_key(fshared
, &key1
);
1253 if (curval
!= *cmpval
) {
1259 if (requeue_pi
&& (task_count
- nr_wake
< nr_requeue
)) {
1261 * Attempt to acquire uaddr2 and wake the top waiter. If we
1262 * intend to requeue waiters, force setting the FUTEX_WAITERS
1263 * bit. We force this here where we are able to easily handle
1264 * faults rather in the requeue loop below.
1266 ret
= futex_proxy_trylock_atomic(uaddr2
, hb1
, hb2
, &key1
,
1267 &key2
, &pi_state
, nr_requeue
);
1270 * At this point the top_waiter has either taken uaddr2 or is
1271 * waiting on it. If the former, then the pi_state will not
1272 * exist yet, look it up one more time to ensure we have a
1279 ret
= get_futex_value_locked(&curval2
, uaddr2
);
1281 ret
= lookup_pi_state(curval2
, hb2
, &key2
,
1289 double_unlock_hb(hb1
, hb2
);
1290 put_futex_key(fshared
, &key2
);
1291 put_futex_key(fshared
, &key1
);
1292 ret
= fault_in_user_writeable(uaddr2
);
1297 /* The owner was exiting, try again. */
1298 double_unlock_hb(hb1
, hb2
);
1299 put_futex_key(fshared
, &key2
);
1300 put_futex_key(fshared
, &key1
);
1308 head1
= &hb1
->chain
;
1309 plist_for_each_entry_safe(this, next
, head1
, list
) {
1310 if (task_count
- nr_wake
>= nr_requeue
)
1313 if (!match_futex(&this->key
, &key1
))
1317 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1318 * be paired with each other and no other futex ops.
1320 if ((requeue_pi
&& !this->rt_waiter
) ||
1321 (!requeue_pi
&& this->rt_waiter
)) {
1327 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1328 * lock, we already woke the top_waiter. If not, it will be
1329 * woken by futex_unlock_pi().
1331 if (++task_count
<= nr_wake
&& !requeue_pi
) {
1336 /* Ensure we requeue to the expected futex for requeue_pi. */
1337 if (requeue_pi
&& !match_futex(this->requeue_pi_key
, &key2
)) {
1343 * Requeue nr_requeue waiters and possibly one more in the case
1344 * of requeue_pi if we couldn't acquire the lock atomically.
1347 /* Prepare the waiter to take the rt_mutex. */
1348 atomic_inc(&pi_state
->refcount
);
1349 this->pi_state
= pi_state
;
1350 ret
= rt_mutex_start_proxy_lock(&pi_state
->pi_mutex
,
1354 /* We got the lock. */
1355 requeue_pi_wake_futex(this, &key2
, hb2
);
1360 this->pi_state
= NULL
;
1361 free_pi_state(pi_state
);
1365 requeue_futex(this, hb1
, hb2
, &key2
);
1370 double_unlock_hb(hb1
, hb2
);
1373 * drop_futex_key_refs() must be called outside the spinlocks. During
1374 * the requeue we moved futex_q's from the hash bucket at key1 to the
1375 * one at key2 and updated their key pointer. We no longer need to
1376 * hold the references to key1.
1378 while (--drop_count
>= 0)
1379 drop_futex_key_refs(&key1
);
1382 put_futex_key(fshared
, &key2
);
1384 put_futex_key(fshared
, &key1
);
1386 if (pi_state
!= NULL
)
1387 free_pi_state(pi_state
);
1388 return ret
? ret
: task_count
;
1391 /* The key must be already stored in q->key. */
1392 static inline struct futex_hash_bucket
*queue_lock(struct futex_q
*q
)
1394 struct futex_hash_bucket
*hb
;
1396 hb
= hash_futex(&q
->key
);
1397 q
->lock_ptr
= &hb
->lock
;
1399 spin_lock(&hb
->lock
);
1404 queue_unlock(struct futex_q
*q
, struct futex_hash_bucket
*hb
)
1406 spin_unlock(&hb
->lock
);
1410 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1411 * @q: The futex_q to enqueue
1412 * @hb: The destination hash bucket
1414 * The hb->lock must be held by the caller, and is released here. A call to
1415 * queue_me() is typically paired with exactly one call to unqueue_me(). The
1416 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1417 * or nothing if the unqueue is done as part of the wake process and the unqueue
1418 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1421 static inline void queue_me(struct futex_q
*q
, struct futex_hash_bucket
*hb
)
1426 * The priority used to register this element is
1427 * - either the real thread-priority for the real-time threads
1428 * (i.e. threads with a priority lower than MAX_RT_PRIO)
1429 * - or MAX_RT_PRIO for non-RT threads.
1430 * Thus, all RT-threads are woken first in priority order, and
1431 * the others are woken last, in FIFO order.
1433 prio
= min(current
->normal_prio
, MAX_RT_PRIO
);
1435 plist_node_init(&q
->list
, prio
);
1436 #ifdef CONFIG_DEBUG_PI_LIST
1437 q
->list
.plist
.lock
= &hb
->lock
;
1439 plist_add(&q
->list
, &hb
->chain
);
1441 spin_unlock(&hb
->lock
);
1445 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1446 * @q: The futex_q to unqueue
1448 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1449 * be paired with exactly one earlier call to queue_me().
1452 * 1 - if the futex_q was still queued (and we removed unqueued it)
1453 * 0 - if the futex_q was already removed by the waking thread
1455 static int unqueue_me(struct futex_q
*q
)
1457 spinlock_t
*lock_ptr
;
1460 /* In the common case we don't take the spinlock, which is nice. */
1462 lock_ptr
= q
->lock_ptr
;
1464 if (lock_ptr
!= NULL
) {
1465 spin_lock(lock_ptr
);
1467 * q->lock_ptr can change between reading it and
1468 * spin_lock(), causing us to take the wrong lock. This
1469 * corrects the race condition.
1471 * Reasoning goes like this: if we have the wrong lock,
1472 * q->lock_ptr must have changed (maybe several times)
1473 * between reading it and the spin_lock(). It can
1474 * change again after the spin_lock() but only if it was
1475 * already changed before the spin_lock(). It cannot,
1476 * however, change back to the original value. Therefore
1477 * we can detect whether we acquired the correct lock.
1479 if (unlikely(lock_ptr
!= q
->lock_ptr
)) {
1480 spin_unlock(lock_ptr
);
1483 WARN_ON(plist_node_empty(&q
->list
));
1484 plist_del(&q
->list
, &q
->list
.plist
);
1486 BUG_ON(q
->pi_state
);
1488 spin_unlock(lock_ptr
);
1492 drop_futex_key_refs(&q
->key
);
1497 * PI futexes can not be requeued and must remove themself from the
1498 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1501 static void unqueue_me_pi(struct futex_q
*q
)
1503 WARN_ON(plist_node_empty(&q
->list
));
1504 plist_del(&q
->list
, &q
->list
.plist
);
1506 BUG_ON(!q
->pi_state
);
1507 free_pi_state(q
->pi_state
);
1510 spin_unlock(q
->lock_ptr
);
1514 * Fixup the pi_state owner with the new owner.
1516 * Must be called with hash bucket lock held and mm->sem held for non
1519 static int fixup_pi_state_owner(u32 __user
*uaddr
, struct futex_q
*q
,
1520 struct task_struct
*newowner
, int fshared
)
1522 u32 newtid
= task_pid_vnr(newowner
) | FUTEX_WAITERS
;
1523 struct futex_pi_state
*pi_state
= q
->pi_state
;
1524 struct task_struct
*oldowner
= pi_state
->owner
;
1525 u32 uval
, curval
, newval
;
1529 if (!pi_state
->owner
)
1530 newtid
|= FUTEX_OWNER_DIED
;
1533 * We are here either because we stole the rtmutex from the
1534 * pending owner or we are the pending owner which failed to
1535 * get the rtmutex. We have to replace the pending owner TID
1536 * in the user space variable. This must be atomic as we have
1537 * to preserve the owner died bit here.
1539 * Note: We write the user space value _before_ changing the pi_state
1540 * because we can fault here. Imagine swapped out pages or a fork
1541 * that marked all the anonymous memory readonly for cow.
1543 * Modifying pi_state _before_ the user space value would
1544 * leave the pi_state in an inconsistent state when we fault
1545 * here, because we need to drop the hash bucket lock to
1546 * handle the fault. This might be observed in the PID check
1547 * in lookup_pi_state.
1550 if (get_futex_value_locked(&uval
, uaddr
))
1554 newval
= (uval
& FUTEX_OWNER_DIED
) | newtid
;
1556 curval
= cmpxchg_futex_value_locked(uaddr
, uval
, newval
);
1558 if (curval
== -EFAULT
)
1566 * We fixed up user space. Now we need to fix the pi_state
1569 if (pi_state
->owner
!= NULL
) {
1570 spin_lock_irq(&pi_state
->owner
->pi_lock
);
1571 WARN_ON(list_empty(&pi_state
->list
));
1572 list_del_init(&pi_state
->list
);
1573 spin_unlock_irq(&pi_state
->owner
->pi_lock
);
1576 pi_state
->owner
= newowner
;
1578 spin_lock_irq(&newowner
->pi_lock
);
1579 WARN_ON(!list_empty(&pi_state
->list
));
1580 list_add(&pi_state
->list
, &newowner
->pi_state_list
);
1581 spin_unlock_irq(&newowner
->pi_lock
);
1585 * To handle the page fault we need to drop the hash bucket
1586 * lock here. That gives the other task (either the pending
1587 * owner itself or the task which stole the rtmutex) the
1588 * chance to try the fixup of the pi_state. So once we are
1589 * back from handling the fault we need to check the pi_state
1590 * after reacquiring the hash bucket lock and before trying to
1591 * do another fixup. When the fixup has been done already we
1595 spin_unlock(q
->lock_ptr
);
1597 ret
= fault_in_user_writeable(uaddr
);
1599 spin_lock(q
->lock_ptr
);
1602 * Check if someone else fixed it for us:
1604 if (pi_state
->owner
!= oldowner
)
1614 * In case we must use restart_block to restart a futex_wait,
1615 * we encode in the 'flags' shared capability
1617 #define FLAGS_SHARED 0x01
1618 #define FLAGS_CLOCKRT 0x02
1619 #define FLAGS_HAS_TIMEOUT 0x04
1621 static long futex_wait_restart(struct restart_block
*restart
);
1624 * fixup_owner() - Post lock pi_state and corner case management
1625 * @uaddr: user address of the futex
1626 * @fshared: whether the futex is shared (1) or not (0)
1627 * @q: futex_q (contains pi_state and access to the rt_mutex)
1628 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
1630 * After attempting to lock an rt_mutex, this function is called to cleanup
1631 * the pi_state owner as well as handle race conditions that may allow us to
1632 * acquire the lock. Must be called with the hb lock held.
1635 * 1 - success, lock taken
1636 * 0 - success, lock not taken
1637 * <0 - on error (-EFAULT)
1639 static int fixup_owner(u32 __user
*uaddr
, int fshared
, struct futex_q
*q
,
1642 struct task_struct
*owner
;
1647 * Got the lock. We might not be the anticipated owner if we
1648 * did a lock-steal - fix up the PI-state in that case:
1650 if (q
->pi_state
->owner
!= current
)
1651 ret
= fixup_pi_state_owner(uaddr
, q
, current
, fshared
);
1656 * Catch the rare case, where the lock was released when we were on the
1657 * way back before we locked the hash bucket.
1659 if (q
->pi_state
->owner
== current
) {
1661 * Try to get the rt_mutex now. This might fail as some other
1662 * task acquired the rt_mutex after we removed ourself from the
1663 * rt_mutex waiters list.
1665 if (rt_mutex_trylock(&q
->pi_state
->pi_mutex
)) {
1671 * pi_state is incorrect, some other task did a lock steal and
1672 * we returned due to timeout or signal without taking the
1673 * rt_mutex. Too late. We can access the rt_mutex_owner without
1674 * locking, as the other task is now blocked on the hash bucket
1675 * lock. Fix the state up.
1677 owner
= rt_mutex_owner(&q
->pi_state
->pi_mutex
);
1678 ret
= fixup_pi_state_owner(uaddr
, q
, owner
, fshared
);
1683 * Paranoia check. If we did not take the lock, then we should not be
1684 * the owner, nor the pending owner, of the rt_mutex.
1686 if (rt_mutex_owner(&q
->pi_state
->pi_mutex
) == current
)
1687 printk(KERN_ERR
"fixup_owner: ret = %d pi-mutex: %p "
1688 "pi-state %p\n", ret
,
1689 q
->pi_state
->pi_mutex
.owner
,
1690 q
->pi_state
->owner
);
1693 return ret
? ret
: locked
;
1697 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1698 * @hb: the futex hash bucket, must be locked by the caller
1699 * @q: the futex_q to queue up on
1700 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
1702 static void futex_wait_queue_me(struct futex_hash_bucket
*hb
, struct futex_q
*q
,
1703 struct hrtimer_sleeper
*timeout
)
1706 * The task state is guaranteed to be set before another task can
1707 * wake it. set_current_state() is implemented using set_mb() and
1708 * queue_me() calls spin_unlock() upon completion, both serializing
1709 * access to the hash list and forcing another memory barrier.
1711 set_current_state(TASK_INTERRUPTIBLE
);
1716 hrtimer_start_expires(&timeout
->timer
, HRTIMER_MODE_ABS
);
1717 if (!hrtimer_active(&timeout
->timer
))
1718 timeout
->task
= NULL
;
1722 * If we have been removed from the hash list, then another task
1723 * has tried to wake us, and we can skip the call to schedule().
1725 if (likely(!plist_node_empty(&q
->list
))) {
1727 * If the timer has already expired, current will already be
1728 * flagged for rescheduling. Only call schedule if there
1729 * is no timeout, or if it has yet to expire.
1731 if (!timeout
|| timeout
->task
)
1734 __set_current_state(TASK_RUNNING
);
1738 * futex_wait_setup() - Prepare to wait on a futex
1739 * @uaddr: the futex userspace address
1740 * @val: the expected value
1741 * @fshared: whether the futex is shared (1) or not (0)
1742 * @q: the associated futex_q
1743 * @hb: storage for hash_bucket pointer to be returned to caller
1745 * Setup the futex_q and locate the hash_bucket. Get the futex value and
1746 * compare it with the expected value. Handle atomic faults internally.
1747 * Return with the hb lock held and a q.key reference on success, and unlocked
1748 * with no q.key reference on failure.
1751 * 0 - uaddr contains val and hb has been locked
1752 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
1754 static int futex_wait_setup(u32 __user
*uaddr
, u32 val
, int fshared
,
1755 struct futex_q
*q
, struct futex_hash_bucket
**hb
)
1761 * Access the page AFTER the hash-bucket is locked.
1762 * Order is important:
1764 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1765 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
1767 * The basic logical guarantee of a futex is that it blocks ONLY
1768 * if cond(var) is known to be true at the time of blocking, for
1769 * any cond. If we queued after testing *uaddr, that would open
1770 * a race condition where we could block indefinitely with
1771 * cond(var) false, which would violate the guarantee.
1773 * A consequence is that futex_wait() can return zero and absorb
1774 * a wakeup when *uaddr != val on entry to the syscall. This is
1778 q
->key
= FUTEX_KEY_INIT
;
1779 ret
= get_futex_key(uaddr
, fshared
, &q
->key
, VERIFY_READ
);
1780 if (unlikely(ret
!= 0))
1784 *hb
= queue_lock(q
);
1786 ret
= get_futex_value_locked(&uval
, uaddr
);
1789 queue_unlock(q
, *hb
);
1791 ret
= get_user(uval
, uaddr
);
1798 put_futex_key(fshared
, &q
->key
);
1803 queue_unlock(q
, *hb
);
1809 put_futex_key(fshared
, &q
->key
);
1813 static int futex_wait(u32 __user
*uaddr
, int fshared
,
1814 u32 val
, ktime_t
*abs_time
, u32 bitset
, int clockrt
)
1816 struct hrtimer_sleeper timeout
, *to
= NULL
;
1817 struct restart_block
*restart
;
1818 struct futex_hash_bucket
*hb
;
1828 q
.requeue_pi_key
= NULL
;
1833 hrtimer_init_on_stack(&to
->timer
, clockrt
? CLOCK_REALTIME
:
1834 CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
1835 hrtimer_init_sleeper(to
, current
);
1836 hrtimer_set_expires_range_ns(&to
->timer
, *abs_time
,
1837 current
->timer_slack_ns
);
1842 * Prepare to wait on uaddr. On success, holds hb lock and increments
1845 ret
= futex_wait_setup(uaddr
, val
, fshared
, &q
, &hb
);
1849 /* queue_me and wait for wakeup, timeout, or a signal. */
1850 futex_wait_queue_me(hb
, &q
, to
);
1852 /* If we were woken (and unqueued), we succeeded, whatever. */
1854 /* unqueue_me() drops q.key ref */
1855 if (!unqueue_me(&q
))
1858 if (to
&& !to
->task
)
1862 * We expect signal_pending(current), but we might be the
1863 * victim of a spurious wakeup as well.
1865 if (!signal_pending(current
))
1872 restart
= ¤t_thread_info()->restart_block
;
1873 restart
->fn
= futex_wait_restart
;
1874 restart
->futex
.uaddr
= (u32
*)uaddr
;
1875 restart
->futex
.val
= val
;
1876 restart
->futex
.time
= abs_time
->tv64
;
1877 restart
->futex
.bitset
= bitset
;
1878 restart
->futex
.flags
= FLAGS_HAS_TIMEOUT
;
1881 restart
->futex
.flags
|= FLAGS_SHARED
;
1883 restart
->futex
.flags
|= FLAGS_CLOCKRT
;
1885 ret
= -ERESTART_RESTARTBLOCK
;
1889 hrtimer_cancel(&to
->timer
);
1890 destroy_hrtimer_on_stack(&to
->timer
);
1896 static long futex_wait_restart(struct restart_block
*restart
)
1898 u32 __user
*uaddr
= (u32 __user
*)restart
->futex
.uaddr
;
1900 ktime_t t
, *tp
= NULL
;
1902 if (restart
->futex
.flags
& FLAGS_HAS_TIMEOUT
) {
1903 t
.tv64
= restart
->futex
.time
;
1906 restart
->fn
= do_no_restart_syscall
;
1907 if (restart
->futex
.flags
& FLAGS_SHARED
)
1909 return (long)futex_wait(uaddr
, fshared
, restart
->futex
.val
, tp
,
1910 restart
->futex
.bitset
,
1911 restart
->futex
.flags
& FLAGS_CLOCKRT
);
1916 * Userspace tried a 0 -> TID atomic transition of the futex value
1917 * and failed. The kernel side here does the whole locking operation:
1918 * if there are waiters then it will block, it does PI, etc. (Due to
1919 * races the kernel might see a 0 value of the futex too.)
1921 static int futex_lock_pi(u32 __user
*uaddr
, int fshared
,
1922 int detect
, ktime_t
*time
, int trylock
)
1924 struct hrtimer_sleeper timeout
, *to
= NULL
;
1925 struct futex_hash_bucket
*hb
;
1929 if (refill_pi_state_cache())
1934 hrtimer_init_on_stack(&to
->timer
, CLOCK_REALTIME
,
1936 hrtimer_init_sleeper(to
, current
);
1937 hrtimer_set_expires(&to
->timer
, *time
);
1942 q
.requeue_pi_key
= NULL
;
1944 q
.key
= FUTEX_KEY_INIT
;
1945 ret
= get_futex_key(uaddr
, fshared
, &q
.key
, VERIFY_WRITE
);
1946 if (unlikely(ret
!= 0))
1950 hb
= queue_lock(&q
);
1952 ret
= futex_lock_pi_atomic(uaddr
, hb
, &q
.key
, &q
.pi_state
, current
, 0);
1953 if (unlikely(ret
)) {
1956 /* We got the lock. */
1958 goto out_unlock_put_key
;
1963 * Task is exiting and we just wait for the
1966 queue_unlock(&q
, hb
);
1967 put_futex_key(fshared
, &q
.key
);
1971 goto out_unlock_put_key
;
1976 * Only actually queue now that the atomic ops are done:
1980 WARN_ON(!q
.pi_state
);
1982 * Block on the PI mutex:
1985 ret
= rt_mutex_timed_lock(&q
.pi_state
->pi_mutex
, to
, 1);
1987 ret
= rt_mutex_trylock(&q
.pi_state
->pi_mutex
);
1988 /* Fixup the trylock return value: */
1989 ret
= ret
? 0 : -EWOULDBLOCK
;
1992 spin_lock(q
.lock_ptr
);
1994 * Fixup the pi_state owner and possibly acquire the lock if we
1997 res
= fixup_owner(uaddr
, fshared
, &q
, !ret
);
1999 * If fixup_owner() returned an error, proprogate that. If it acquired
2000 * the lock, clear our -ETIMEDOUT or -EINTR.
2003 ret
= (res
< 0) ? res
: 0;
2006 * If fixup_owner() faulted and was unable to handle the fault, unlock
2007 * it and return the fault to userspace.
2009 if (ret
&& (rt_mutex_owner(&q
.pi_state
->pi_mutex
) == current
))
2010 rt_mutex_unlock(&q
.pi_state
->pi_mutex
);
2012 /* Unqueue and drop the lock */
2018 queue_unlock(&q
, hb
);
2021 put_futex_key(fshared
, &q
.key
);
2024 destroy_hrtimer_on_stack(&to
->timer
);
2025 return ret
!= -EINTR
? ret
: -ERESTARTNOINTR
;
2028 queue_unlock(&q
, hb
);
2030 ret
= fault_in_user_writeable(uaddr
);
2037 put_futex_key(fshared
, &q
.key
);
2042 * Userspace attempted a TID -> 0 atomic transition, and failed.
2043 * This is the in-kernel slowpath: we look up the PI state (if any),
2044 * and do the rt-mutex unlock.
2046 static int futex_unlock_pi(u32 __user
*uaddr
, int fshared
)
2048 struct futex_hash_bucket
*hb
;
2049 struct futex_q
*this, *next
;
2051 struct plist_head
*head
;
2052 union futex_key key
= FUTEX_KEY_INIT
;
2056 if (get_user(uval
, uaddr
))
2059 * We release only a lock we actually own:
2061 if ((uval
& FUTEX_TID_MASK
) != task_pid_vnr(current
))
2064 ret
= get_futex_key(uaddr
, fshared
, &key
, VERIFY_WRITE
);
2065 if (unlikely(ret
!= 0))
2068 hb
= hash_futex(&key
);
2069 spin_lock(&hb
->lock
);
2072 * To avoid races, try to do the TID -> 0 atomic transition
2073 * again. If it succeeds then we can return without waking
2076 if (!(uval
& FUTEX_OWNER_DIED
))
2077 uval
= cmpxchg_futex_value_locked(uaddr
, task_pid_vnr(current
), 0);
2080 if (unlikely(uval
== -EFAULT
))
2083 * Rare case: we managed to release the lock atomically,
2084 * no need to wake anyone else up:
2086 if (unlikely(uval
== task_pid_vnr(current
)))
2090 * Ok, other tasks may need to be woken up - check waiters
2091 * and do the wakeup if necessary:
2095 plist_for_each_entry_safe(this, next
, head
, list
) {
2096 if (!match_futex (&this->key
, &key
))
2098 ret
= wake_futex_pi(uaddr
, uval
, this);
2100 * The atomic access to the futex value
2101 * generated a pagefault, so retry the
2102 * user-access and the wakeup:
2109 * No waiters - kernel unlocks the futex:
2111 if (!(uval
& FUTEX_OWNER_DIED
)) {
2112 ret
= unlock_futex_pi(uaddr
, uval
);
2118 spin_unlock(&hb
->lock
);
2119 put_futex_key(fshared
, &key
);
2125 spin_unlock(&hb
->lock
);
2126 put_futex_key(fshared
, &key
);
2128 ret
= fault_in_user_writeable(uaddr
);
2136 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2137 * @hb: the hash_bucket futex_q was original enqueued on
2138 * @q: the futex_q woken while waiting to be requeued
2139 * @key2: the futex_key of the requeue target futex
2140 * @timeout: the timeout associated with the wait (NULL if none)
2142 * Detect if the task was woken on the initial futex as opposed to the requeue
2143 * target futex. If so, determine if it was a timeout or a signal that caused
2144 * the wakeup and return the appropriate error code to the caller. Must be
2145 * called with the hb lock held.
2148 * 0 - no early wakeup detected
2149 * <0 - -ETIMEDOUT or -ERESTARTNOINTR
2152 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket
*hb
,
2153 struct futex_q
*q
, union futex_key
*key2
,
2154 struct hrtimer_sleeper
*timeout
)
2159 * With the hb lock held, we avoid races while we process the wakeup.
2160 * We only need to hold hb (and not hb2) to ensure atomicity as the
2161 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2162 * It can't be requeued from uaddr2 to something else since we don't
2163 * support a PI aware source futex for requeue.
2165 if (!match_futex(&q
->key
, key2
)) {
2166 WARN_ON(q
->lock_ptr
&& (&hb
->lock
!= q
->lock_ptr
));
2168 * We were woken prior to requeue by a timeout or a signal.
2169 * Unqueue the futex_q and determine which it was.
2171 plist_del(&q
->list
, &q
->list
.plist
);
2173 /* Handle spurious wakeups gracefully */
2175 if (timeout
&& !timeout
->task
)
2177 else if (signal_pending(current
))
2178 ret
= -ERESTARTNOINTR
;
2184 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2185 * @uaddr: the futex we initially wait on (non-pi)
2186 * @fshared: whether the futexes are shared (1) or not (0). They must be
2187 * the same type, no requeueing from private to shared, etc.
2188 * @val: the expected value of uaddr
2189 * @abs_time: absolute timeout
2190 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
2191 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2192 * @uaddr2: the pi futex we will take prior to returning to user-space
2194 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2195 * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
2196 * complete the acquisition of the rt_mutex prior to returning to userspace.
2197 * This ensures the rt_mutex maintains an owner when it has waiters; without
2198 * one, the pi logic wouldn't know which task to boost/deboost, if there was a
2201 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2202 * via the following:
2203 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2204 * 2) wakeup on uaddr2 after a requeue
2208 * If 3, cleanup and return -ERESTARTNOINTR.
2210 * If 2, we may then block on trying to take the rt_mutex and return via:
2211 * 5) successful lock
2214 * 8) other lock acquisition failure
2216 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2218 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2224 static int futex_wait_requeue_pi(u32 __user
*uaddr
, int fshared
,
2225 u32 val
, ktime_t
*abs_time
, u32 bitset
,
2226 int clockrt
, u32 __user
*uaddr2
)
2228 struct hrtimer_sleeper timeout
, *to
= NULL
;
2229 struct rt_mutex_waiter rt_waiter
;
2230 struct rt_mutex
*pi_mutex
= NULL
;
2231 struct futex_hash_bucket
*hb
;
2232 union futex_key key2
;
2241 hrtimer_init_on_stack(&to
->timer
, clockrt
? CLOCK_REALTIME
:
2242 CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
2243 hrtimer_init_sleeper(to
, current
);
2244 hrtimer_set_expires_range_ns(&to
->timer
, *abs_time
,
2245 current
->timer_slack_ns
);
2249 * The waiter is allocated on our stack, manipulated by the requeue
2250 * code while we sleep on uaddr.
2252 debug_rt_mutex_init_waiter(&rt_waiter
);
2253 rt_waiter
.task
= NULL
;
2255 key2
= FUTEX_KEY_INIT
;
2256 ret
= get_futex_key(uaddr2
, fshared
, &key2
, VERIFY_WRITE
);
2257 if (unlikely(ret
!= 0))
2262 q
.rt_waiter
= &rt_waiter
;
2263 q
.requeue_pi_key
= &key2
;
2266 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2269 ret
= futex_wait_setup(uaddr
, val
, fshared
, &q
, &hb
);
2273 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2274 futex_wait_queue_me(hb
, &q
, to
);
2276 spin_lock(&hb
->lock
);
2277 ret
= handle_early_requeue_pi_wakeup(hb
, &q
, &key2
, to
);
2278 spin_unlock(&hb
->lock
);
2283 * In order for us to be here, we know our q.key == key2, and since
2284 * we took the hb->lock above, we also know that futex_requeue() has
2285 * completed and we no longer have to concern ourselves with a wakeup
2286 * race with the atomic proxy lock acquisition by the requeue code. The
2287 * futex_requeue dropped our key1 reference and incremented our key2
2291 /* Check if the requeue code acquired the second futex for us. */
2294 * Got the lock. We might not be the anticipated owner if we
2295 * did a lock-steal - fix up the PI-state in that case.
2297 if (q
.pi_state
&& (q
.pi_state
->owner
!= current
)) {
2298 spin_lock(q
.lock_ptr
);
2299 ret
= fixup_pi_state_owner(uaddr2
, &q
, current
,
2301 spin_unlock(q
.lock_ptr
);
2305 * We have been woken up by futex_unlock_pi(), a timeout, or a
2306 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2309 WARN_ON(!&q
.pi_state
);
2310 pi_mutex
= &q
.pi_state
->pi_mutex
;
2311 ret
= rt_mutex_finish_proxy_lock(pi_mutex
, to
, &rt_waiter
, 1);
2312 debug_rt_mutex_free_waiter(&rt_waiter
);
2314 spin_lock(q
.lock_ptr
);
2316 * Fixup the pi_state owner and possibly acquire the lock if we
2319 res
= fixup_owner(uaddr2
, fshared
, &q
, !ret
);
2321 * If fixup_owner() returned an error, proprogate that. If it
2322 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2325 ret
= (res
< 0) ? res
: 0;
2327 /* Unqueue and drop the lock. */
2332 * If fixup_pi_state_owner() faulted and was unable to handle the
2333 * fault, unlock the rt_mutex and return the fault to userspace.
2335 if (ret
== -EFAULT
) {
2336 if (rt_mutex_owner(pi_mutex
) == current
)
2337 rt_mutex_unlock(pi_mutex
);
2338 } else if (ret
== -EINTR
) {
2340 * We've already been requeued, but cannot restart by calling
2341 * futex_lock_pi() directly. We could restart this syscall, but
2342 * it would detect that the user space "val" changed and return
2343 * -EWOULDBLOCK. Save the overhead of the restart and return
2344 * -EWOULDBLOCK directly.
2350 put_futex_key(fshared
, &q
.key
);
2352 put_futex_key(fshared
, &key2
);
2356 hrtimer_cancel(&to
->timer
);
2357 destroy_hrtimer_on_stack(&to
->timer
);
2363 * Support for robust futexes: the kernel cleans up held futexes at
2366 * Implementation: user-space maintains a per-thread list of locks it
2367 * is holding. Upon do_exit(), the kernel carefully walks this list,
2368 * and marks all locks that are owned by this thread with the
2369 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2370 * always manipulated with the lock held, so the list is private and
2371 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2372 * field, to allow the kernel to clean up if the thread dies after
2373 * acquiring the lock, but just before it could have added itself to
2374 * the list. There can only be one such pending lock.
2378 * sys_set_robust_list() - Set the robust-futex list head of a task
2379 * @head: pointer to the list-head
2380 * @len: length of the list-head, as userspace expects
2382 SYSCALL_DEFINE2(set_robust_list
, struct robust_list_head __user
*, head
,
2385 if (!futex_cmpxchg_enabled
)
2388 * The kernel knows only one size for now:
2390 if (unlikely(len
!= sizeof(*head
)))
2393 current
->robust_list
= head
;
2399 * sys_get_robust_list() - Get the robust-futex list head of a task
2400 * @pid: pid of the process [zero for current task]
2401 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2402 * @len_ptr: pointer to a length field, the kernel fills in the header size
2404 SYSCALL_DEFINE3(get_robust_list
, int, pid
,
2405 struct robust_list_head __user
* __user
*, head_ptr
,
2406 size_t __user
*, len_ptr
)
2408 struct robust_list_head __user
*head
;
2410 const struct cred
*cred
= current_cred(), *pcred
;
2412 if (!futex_cmpxchg_enabled
)
2416 head
= current
->robust_list
;
2418 struct task_struct
*p
;
2422 p
= find_task_by_vpid(pid
);
2426 pcred
= __task_cred(p
);
2427 if (cred
->euid
!= pcred
->euid
&&
2428 cred
->euid
!= pcred
->uid
&&
2429 !capable(CAP_SYS_PTRACE
))
2431 head
= p
->robust_list
;
2435 if (put_user(sizeof(*head
), len_ptr
))
2437 return put_user(head
, head_ptr
);
2446 * Process a futex-list entry, check whether it's owned by the
2447 * dying task, and do notification if so:
2449 int handle_futex_death(u32 __user
*uaddr
, struct task_struct
*curr
, int pi
)
2451 u32 uval
, nval
, mval
;
2454 if (get_user(uval
, uaddr
))
2457 if ((uval
& FUTEX_TID_MASK
) == task_pid_vnr(curr
)) {
2459 * Ok, this dying thread is truly holding a futex
2460 * of interest. Set the OWNER_DIED bit atomically
2461 * via cmpxchg, and if the value had FUTEX_WAITERS
2462 * set, wake up a waiter (if any). (We have to do a
2463 * futex_wake() even if OWNER_DIED is already set -
2464 * to handle the rare but possible case of recursive
2465 * thread-death.) The rest of the cleanup is done in
2468 mval
= (uval
& FUTEX_WAITERS
) | FUTEX_OWNER_DIED
;
2469 nval
= futex_atomic_cmpxchg_inatomic(uaddr
, uval
, mval
);
2471 if (nval
== -EFAULT
)
2478 * Wake robust non-PI futexes here. The wakeup of
2479 * PI futexes happens in exit_pi_state():
2481 if (!pi
&& (uval
& FUTEX_WAITERS
))
2482 futex_wake(uaddr
, 1, 1, FUTEX_BITSET_MATCH_ANY
);
2488 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2490 static inline int fetch_robust_entry(struct robust_list __user
**entry
,
2491 struct robust_list __user
* __user
*head
,
2494 unsigned long uentry
;
2496 if (get_user(uentry
, (unsigned long __user
*)head
))
2499 *entry
= (void __user
*)(uentry
& ~1UL);
2506 * Walk curr->robust_list (very carefully, it's a userspace list!)
2507 * and mark any locks found there dead, and notify any waiters.
2509 * We silently return on any sign of list-walking problem.
2511 void exit_robust_list(struct task_struct
*curr
)
2513 struct robust_list_head __user
*head
= curr
->robust_list
;
2514 struct robust_list __user
*entry
, *next_entry
, *pending
;
2515 unsigned int limit
= ROBUST_LIST_LIMIT
, pi
, next_pi
, pip
;
2516 unsigned long futex_offset
;
2519 if (!futex_cmpxchg_enabled
)
2523 * Fetch the list head (which was registered earlier, via
2524 * sys_set_robust_list()):
2526 if (fetch_robust_entry(&entry
, &head
->list
.next
, &pi
))
2529 * Fetch the relative futex offset:
2531 if (get_user(futex_offset
, &head
->futex_offset
))
2534 * Fetch any possibly pending lock-add first, and handle it
2537 if (fetch_robust_entry(&pending
, &head
->list_op_pending
, &pip
))
2540 next_entry
= NULL
; /* avoid warning with gcc */
2541 while (entry
!= &head
->list
) {
2543 * Fetch the next entry in the list before calling
2544 * handle_futex_death:
2546 rc
= fetch_robust_entry(&next_entry
, &entry
->next
, &next_pi
);
2548 * A pending lock might already be on the list, so
2549 * don't process it twice:
2551 if (entry
!= pending
)
2552 if (handle_futex_death((void __user
*)entry
+ futex_offset
,
2560 * Avoid excessively long or circular lists:
2569 handle_futex_death((void __user
*)pending
+ futex_offset
,
2573 long do_futex(u32 __user
*uaddr
, int op
, u32 val
, ktime_t
*timeout
,
2574 u32 __user
*uaddr2
, u32 val2
, u32 val3
)
2576 int clockrt
, ret
= -ENOSYS
;
2577 int cmd
= op
& FUTEX_CMD_MASK
;
2580 if (!(op
& FUTEX_PRIVATE_FLAG
))
2583 clockrt
= op
& FUTEX_CLOCK_REALTIME
;
2584 if (clockrt
&& cmd
!= FUTEX_WAIT_BITSET
&& cmd
!= FUTEX_WAIT_REQUEUE_PI
)
2589 val3
= FUTEX_BITSET_MATCH_ANY
;
2590 case FUTEX_WAIT_BITSET
:
2591 ret
= futex_wait(uaddr
, fshared
, val
, timeout
, val3
, clockrt
);
2594 val3
= FUTEX_BITSET_MATCH_ANY
;
2595 case FUTEX_WAKE_BITSET
:
2596 ret
= futex_wake(uaddr
, fshared
, val
, val3
);
2599 ret
= futex_requeue(uaddr
, fshared
, uaddr2
, val
, val2
, NULL
, 0);
2601 case FUTEX_CMP_REQUEUE
:
2602 ret
= futex_requeue(uaddr
, fshared
, uaddr2
, val
, val2
, &val3
,
2606 ret
= futex_wake_op(uaddr
, fshared
, uaddr2
, val
, val2
, val3
);
2609 if (futex_cmpxchg_enabled
)
2610 ret
= futex_lock_pi(uaddr
, fshared
, val
, timeout
, 0);
2612 case FUTEX_UNLOCK_PI
:
2613 if (futex_cmpxchg_enabled
)
2614 ret
= futex_unlock_pi(uaddr
, fshared
);
2616 case FUTEX_TRYLOCK_PI
:
2617 if (futex_cmpxchg_enabled
)
2618 ret
= futex_lock_pi(uaddr
, fshared
, 0, timeout
, 1);
2620 case FUTEX_WAIT_REQUEUE_PI
:
2621 val3
= FUTEX_BITSET_MATCH_ANY
;
2622 ret
= futex_wait_requeue_pi(uaddr
, fshared
, val
, timeout
, val3
,
2625 case FUTEX_CMP_REQUEUE_PI
:
2626 ret
= futex_requeue(uaddr
, fshared
, uaddr2
, val
, val2
, &val3
,
2636 SYSCALL_DEFINE6(futex
, u32 __user
*, uaddr
, int, op
, u32
, val
,
2637 struct timespec __user
*, utime
, u32 __user
*, uaddr2
,
2641 ktime_t t
, *tp
= NULL
;
2643 int cmd
= op
& FUTEX_CMD_MASK
;
2645 if (utime
&& (cmd
== FUTEX_WAIT
|| cmd
== FUTEX_LOCK_PI
||
2646 cmd
== FUTEX_WAIT_BITSET
||
2647 cmd
== FUTEX_WAIT_REQUEUE_PI
)) {
2648 if (copy_from_user(&ts
, utime
, sizeof(ts
)) != 0)
2650 if (!timespec_valid(&ts
))
2653 t
= timespec_to_ktime(ts
);
2654 if (cmd
== FUTEX_WAIT
)
2655 t
= ktime_add_safe(ktime_get(), t
);
2659 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
2660 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2662 if (cmd
== FUTEX_REQUEUE
|| cmd
== FUTEX_CMP_REQUEUE
||
2663 cmd
== FUTEX_CMP_REQUEUE_PI
|| cmd
== FUTEX_WAKE_OP
)
2664 val2
= (u32
) (unsigned long) utime
;
2666 return do_futex(uaddr
, op
, val
, tp
, uaddr2
, val2
, val3
);
2669 static int __init
futex_init(void)
2675 * This will fail and we want it. Some arch implementations do
2676 * runtime detection of the futex_atomic_cmpxchg_inatomic()
2677 * functionality. We want to know that before we call in any
2678 * of the complex code paths. Also we want to prevent
2679 * registration of robust lists in that case. NULL is
2680 * guaranteed to fault and we get -EFAULT on functional
2681 * implementation, the non functional ones will return
2684 curval
= cmpxchg_futex_value_locked(NULL
, 0, 0);
2685 if (curval
== -EFAULT
)
2686 futex_cmpxchg_enabled
= 1;
2688 for (i
= 0; i
< ARRAY_SIZE(futex_queues
); i
++) {
2689 plist_head_init(&futex_queues
[i
].chain
, &futex_queues
[i
].lock
);
2690 spin_lock_init(&futex_queues
[i
].lock
);
2695 __initcall(futex_init
);