3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 #ifndef _ASM_SEMAPHORE_H
12 #define _ASM_SEMAPHORE_H
16 #include <linux/linkage.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/rwsem.h>
21 #define SEMAPHORE_DEBUG 0
24 * the semaphore definition
25 * - if count is >0 then there are tokens available on the semaphore for down
27 * - if count is <=0 then there are no spare tokens, and anyone that wants one
29 * - if wait_list is not empty, then there are processes waiting for the
33 atomic_t count
; /* it's not really atomic, it's
34 * just that certain modules
35 * expect to be able to access
38 struct list_head wait_list
;
45 # define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
47 # define __SEM_DEBUG_INIT(name)
51 #define __SEMAPHORE_INITIALIZER(name, init_count) \
53 .count = ATOMIC_INIT(init_count), \
54 .wait_lock = __SPIN_LOCK_UNLOCKED((name).wait_lock), \
55 .wait_list = LIST_HEAD_INIT((name).wait_list) \
56 __SEM_DEBUG_INIT(name) \
59 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
60 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
62 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
63 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
65 static inline void sema_init(struct semaphore
*sem
, int val
)
67 *sem
= (struct semaphore
) __SEMAPHORE_INITIALIZER(*sem
, val
);
70 static inline void init_MUTEX(struct semaphore
*sem
)
75 static inline void init_MUTEX_LOCKED(struct semaphore
*sem
)
80 extern void __down(struct semaphore
*sem
, unsigned long flags
);
81 extern int __down_interruptible(struct semaphore
*sem
, unsigned long flags
);
82 extern void __up(struct semaphore
*sem
);
84 static inline void down(struct semaphore
*sem
)
90 CHECK_MAGIC(sem
->__magic
);
93 spin_lock_irqsave(&sem
->wait_lock
, flags
);
94 count
= atomic_read(&sem
->count
);
95 if (likely(count
> 0)) {
96 atomic_set(&sem
->count
, count
- 1);
97 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
103 static inline int down_interruptible(struct semaphore
*sem
)
109 CHECK_MAGIC(sem
->__magic
);
112 spin_lock_irqsave(&sem
->wait_lock
, flags
);
113 count
= atomic_read(&sem
->count
);
114 if (likely(count
> 0)) {
115 atomic_set(&sem
->count
, count
- 1);
116 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
118 ret
= __down_interruptible(sem
, flags
);
124 * non-blockingly attempt to down() a semaphore.
125 * - returns zero if we acquired it
127 static inline int down_trylock(struct semaphore
*sem
)
130 int count
, success
= 0;
133 CHECK_MAGIC(sem
->__magic
);
136 spin_lock_irqsave(&sem
->wait_lock
, flags
);
137 count
= atomic_read(&sem
->count
);
138 if (likely(count
> 0)) {
139 atomic_set(&sem
->count
, count
- 1);
142 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
146 static inline void up(struct semaphore
*sem
)
151 CHECK_MAGIC(sem
->__magic
);
154 spin_lock_irqsave(&sem
->wait_lock
, flags
);
155 if (!list_empty(&sem
->wait_list
))
158 atomic_set(&sem
->count
, atomic_read(&sem
->count
) + 1);
159 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
162 static inline int sem_getcount(struct semaphore
*sem
)
164 return atomic_read(&sem
->count
);
167 #endif /* __ASSEMBLY__ */