Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[linux-2.6/mini2440.git] / arch / ia64 / include / asm / mutex.h
blobbed73a643a56e3c0169a4fd43b705fcba491f5c8
1 /*
2 * ia64 implementation of the mutex fastpath.
4 * Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com>
6 */
8 #ifndef _ASM_MUTEX_H
9 #define _ASM_MUTEX_H
11 /**
12 * __mutex_fastpath_lock - try to take the lock by moving the count
13 * from 1 to a 0 value
14 * @count: pointer of type atomic_t
15 * @fail_fn: function to call if the original value was not 1
17 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
18 * it wasn't 1 originally. This function MUST leave the value lower than
19 * 1 even when the "1" assertion wasn't true.
21 static inline void
22 __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
24 if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
25 fail_fn(count);
28 /**
29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
30 * from 1 to a 0 value
31 * @count: pointer of type atomic_t
32 * @fail_fn: function to call if the original value was not 1
34 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
35 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
36 * or anything the slow path function returns.
38 static inline int
39 __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
41 if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
42 return fail_fn(count);
43 return 0;
46 /**
47 * __mutex_fastpath_unlock - try to promote the count from 0 to 1
48 * @count: pointer of type atomic_t
49 * @fail_fn: function to call if the original value was not 0
51 * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
52 * In the failure case, this function is allowed to either set the value to
53 * 1, or to set it to a value lower than 1.
55 * If the implementation sets it to a value of lower than 1, then the
56 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
57 * to return 0 otherwise.
59 static inline void
60 __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
62 int ret = ia64_fetchadd4_rel(count, 1);
63 if (unlikely(ret < 0))
64 fail_fn(count);
67 #define __mutex_slowpath_needs_to_unlock() 1
69 /**
70 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
72 * @count: pointer of type atomic_t
73 * @fail_fn: fallback function
75 * Change the count from 1 to a value lower than 1, and return 0 (failure)
76 * if it wasn't 1 originally, or return 1 (success) otherwise. This function
77 * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
78 * Additionally, if the value was < 0 originally, this function must not leave
79 * it to 0 on failure.
81 * If the architecture has no effective trylock variant, it should call the
82 * <fail_fn> spinlock-based trylock variant unconditionally.
84 static inline int
85 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
87 if (cmpxchg_acq(count, 1, 0) == 1)
88 return 1;
89 return 0;
92 #endif