2 * Hardware spinlock framework
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #define pr_fmt(fmt) "%s: " fmt, __func__
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24 #include <linux/err.h>
25 #include <linux/jiffies.h>
26 #include <linux/radix-tree.h>
27 #include <linux/hwspinlock.h>
28 #include <linux/pm_runtime.h>
30 #include "hwspinlock_internal.h"
33 #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
36 * A radix tree is used to maintain the available hwspinlock instances.
37 * The tree associates hwspinlock pointers with their integer key id,
38 * and provides easy-to-use API which makes the hwspinlock core code simple
41 * Radix trees are quick on lookups, and reasonably efficient in terms of
42 * storage, especially with high density usages such as this framework
43 * requires (a continuous range of integer keys, beginning with zero, is
44 * used as the ID's of the hwspinlock instances).
46 * The radix tree API supports tagging items in the tree, which this
47 * framework uses to mark unused hwspinlock instances (see the
48 * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
49 * tree, looking for an unused hwspinlock instance, is now reduced to a
50 * single radix tree API call.
52 static RADIX_TREE(hwspinlock_tree
, GFP_KERNEL
);
55 * Synchronization of access to the tree is achieved using this spinlock,
56 * as the radix-tree API requires that users provide all synchronisation.
58 static DEFINE_SPINLOCK(hwspinlock_tree_lock
);
61 * __hwspin_trylock() - attempt to lock a specific hwspinlock
62 * @hwlock: an hwspinlock which we want to trylock
63 * @mode: controls whether local interrupts are disabled or not
64 * @flags: a pointer where the caller's interrupt state will be saved at (if
67 * This function attempts to lock an hwspinlock, and will immediately
68 * fail if the hwspinlock is already taken.
70 * Upon a successful return from this function, preemption (and possibly
71 * interrupts) is disabled, so the caller must not sleep, and is advised to
72 * release the hwspinlock as soon as possible. This is required in order to
73 * minimize remote cores polling on the hardware interconnect.
75 * The user decides whether local interrupts are disabled or not, and if yes,
76 * whether he wants their previous state to be saved. It is up to the user
77 * to choose the appropriate @mode of operation, exactly the same way users
78 * should decide between spin_trylock, spin_trylock_irq and
79 * spin_trylock_irqsave.
81 * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
82 * the hwspinlock was already taken.
83 * This function will never sleep.
85 int __hwspin_trylock(struct hwspinlock
*hwlock
, int mode
, unsigned long *flags
)
90 BUG_ON(!flags
&& mode
== HWLOCK_IRQSTATE
);
93 * This spin_lock{_irq, _irqsave} serves three purposes:
95 * 1. Disable preemption, in order to minimize the period of time
96 * in which the hwspinlock is taken. This is important in order
97 * to minimize the possible polling on the hardware interconnect
98 * by a remote user of this lock.
99 * 2. Make the hwspinlock SMP-safe (so we can take it from
100 * additional contexts on the local host).
101 * 3. Ensure that in_atomic/might_sleep checks catch potential
102 * problems with hwspinlock usage (e.g. scheduler checks like
103 * 'scheduling while atomic' etc.)
105 if (mode
== HWLOCK_IRQSTATE
)
106 ret
= spin_trylock_irqsave(&hwlock
->lock
, *flags
);
107 else if (mode
== HWLOCK_IRQ
)
108 ret
= spin_trylock_irq(&hwlock
->lock
);
110 ret
= spin_trylock(&hwlock
->lock
);
112 /* is lock already taken by another context on the local cpu ? */
116 /* try to take the hwspinlock device */
117 ret
= hwlock
->ops
->trylock(hwlock
);
119 /* if hwlock is already taken, undo spin_trylock_* and exit */
121 if (mode
== HWLOCK_IRQSTATE
)
122 spin_unlock_irqrestore(&hwlock
->lock
, *flags
);
123 else if (mode
== HWLOCK_IRQ
)
124 spin_unlock_irq(&hwlock
->lock
);
126 spin_unlock(&hwlock
->lock
);
132 * We can be sure the other core's memory operations
133 * are observable to us only _after_ we successfully take
134 * the hwspinlock, and we must make sure that subsequent memory
135 * operations (both reads and writes) will not be reordered before
136 * we actually took the hwspinlock.
138 * Note: the implicit memory barrier of the spinlock above is too
139 * early, so we need this additional explicit memory barrier.
145 EXPORT_SYMBOL_GPL(__hwspin_trylock
);
148 * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
149 * @hwlock: the hwspinlock to be locked
150 * @timeout: timeout value in msecs
151 * @mode: mode which controls whether local interrupts are disabled or not
152 * @flags: a pointer to where the caller's interrupt state will be saved at (if
155 * This function locks the given @hwlock. If the @hwlock
156 * is already taken, the function will busy loop waiting for it to
157 * be released, but give up after @timeout msecs have elapsed.
159 * Upon a successful return from this function, preemption is disabled
160 * (and possibly local interrupts, too), so the caller must not sleep,
161 * and is advised to release the hwspinlock as soon as possible.
162 * This is required in order to minimize remote cores polling on the
163 * hardware interconnect.
165 * The user decides whether local interrupts are disabled or not, and if yes,
166 * whether he wants their previous state to be saved. It is up to the user
167 * to choose the appropriate @mode of operation, exactly the same way users
168 * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
170 * Returns 0 when the @hwlock was successfully taken, and an appropriate
171 * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
172 * busy after @timeout msecs). The function will never sleep.
174 int __hwspin_lock_timeout(struct hwspinlock
*hwlock
, unsigned int to
,
175 int mode
, unsigned long *flags
)
178 unsigned long expire
;
180 expire
= msecs_to_jiffies(to
) + jiffies
;
183 /* Try to take the hwspinlock */
184 ret
= __hwspin_trylock(hwlock
, mode
, flags
);
189 * The lock is already taken, let's check if the user wants
192 if (time_is_before_eq_jiffies(expire
))
196 * Allow platform-specific relax handlers to prevent
197 * hogging the interconnect (no sleeping, though)
199 if (hwlock
->ops
->relax
)
200 hwlock
->ops
->relax(hwlock
);
205 EXPORT_SYMBOL_GPL(__hwspin_lock_timeout
);
208 * __hwspin_unlock() - unlock a specific hwspinlock
209 * @hwlock: a previously-acquired hwspinlock which we want to unlock
210 * @mode: controls whether local interrupts needs to be restored or not
211 * @flags: previous caller's interrupt state to restore (if requested)
213 * This function will unlock a specific hwspinlock, enable preemption and
214 * (possibly) enable interrupts or restore their previous state.
215 * @hwlock must be already locked before calling this function: it is a bug
216 * to call unlock on a @hwlock that is already unlocked.
218 * The user decides whether local interrupts should be enabled or not, and
219 * if yes, whether he wants their previous state to be restored. It is up
220 * to the user to choose the appropriate @mode of operation, exactly the
221 * same way users decide between spin_unlock, spin_unlock_irq and
222 * spin_unlock_irqrestore.
224 * The function will never sleep.
226 void __hwspin_unlock(struct hwspinlock
*hwlock
, int mode
, unsigned long *flags
)
229 BUG_ON(!flags
&& mode
== HWLOCK_IRQSTATE
);
232 * We must make sure that memory operations (both reads and writes),
233 * done before unlocking the hwspinlock, will not be reordered
234 * after the lock is released.
236 * That's the purpose of this explicit memory barrier.
238 * Note: the memory barrier induced by the spin_unlock below is too
239 * late; the other core is going to access memory soon after it will
240 * take the hwspinlock, and by then we want to be sure our memory
241 * operations are already observable.
245 hwlock
->ops
->unlock(hwlock
);
247 /* Undo the spin_trylock{_irq, _irqsave} called while locking */
248 if (mode
== HWLOCK_IRQSTATE
)
249 spin_unlock_irqrestore(&hwlock
->lock
, *flags
);
250 else if (mode
== HWLOCK_IRQ
)
251 spin_unlock_irq(&hwlock
->lock
);
253 spin_unlock(&hwlock
->lock
);
255 EXPORT_SYMBOL_GPL(__hwspin_unlock
);
258 * hwspin_lock_register() - register a new hw spinlock
259 * @hwlock: hwspinlock to register.
261 * This function should be called from the underlying platform-specific
262 * implementation, to register a new hwspinlock instance.
264 * Can be called from an atomic context (will not sleep) but not from
265 * within interrupt context.
267 * Returns 0 on success, or an appropriate error code on failure
269 int hwspin_lock_register(struct hwspinlock
*hwlock
)
271 struct hwspinlock
*tmp
;
274 if (!hwlock
|| !hwlock
->ops
||
275 !hwlock
->ops
->trylock
|| !hwlock
->ops
->unlock
) {
276 pr_err("invalid parameters\n");
280 spin_lock_init(&hwlock
->lock
);
282 spin_lock(&hwspinlock_tree_lock
);
284 ret
= radix_tree_insert(&hwspinlock_tree
, hwlock
->id
, hwlock
);
288 /* mark this hwspinlock as available */
289 tmp
= radix_tree_tag_set(&hwspinlock_tree
, hwlock
->id
,
292 /* self-sanity check which should never fail */
293 WARN_ON(tmp
!= hwlock
);
296 spin_unlock(&hwspinlock_tree_lock
);
299 EXPORT_SYMBOL_GPL(hwspin_lock_register
);
302 * hwspin_lock_unregister() - unregister an hw spinlock
303 * @id: index of the specific hwspinlock to unregister
305 * This function should be called from the underlying platform-specific
306 * implementation, to unregister an existing (and unused) hwspinlock.
308 * Can be called from an atomic context (will not sleep) but not from
309 * within interrupt context.
311 * Returns the address of hwspinlock @id on success, or NULL on failure
313 struct hwspinlock
*hwspin_lock_unregister(unsigned int id
)
315 struct hwspinlock
*hwlock
= NULL
;
318 spin_lock(&hwspinlock_tree_lock
);
320 /* make sure the hwspinlock is not in use (tag is set) */
321 ret
= radix_tree_tag_get(&hwspinlock_tree
, id
, HWSPINLOCK_UNUSED
);
323 pr_err("hwspinlock %d still in use (or not present)\n", id
);
327 hwlock
= radix_tree_delete(&hwspinlock_tree
, id
);
329 pr_err("failed to delete hwspinlock %d\n", id
);
334 spin_unlock(&hwspinlock_tree_lock
);
337 EXPORT_SYMBOL_GPL(hwspin_lock_unregister
);
340 * __hwspin_lock_request() - tag an hwspinlock as used and power it up
342 * This is an internal function that prepares an hwspinlock instance
343 * before it is given to the user. The function assumes that
344 * hwspinlock_tree_lock is taken.
346 * Returns 0 or positive to indicate success, and a negative value to
347 * indicate an error (with the appropriate error code)
349 static int __hwspin_lock_request(struct hwspinlock
*hwlock
)
351 struct hwspinlock
*tmp
;
354 /* prevent underlying implementation from being removed */
355 if (!try_module_get(hwlock
->owner
)) {
356 dev_err(hwlock
->dev
, "%s: can't get owner\n", __func__
);
360 /* notify PM core that power is now needed */
361 ret
= pm_runtime_get_sync(hwlock
->dev
);
363 dev_err(hwlock
->dev
, "%s: can't power on device\n", __func__
);
367 /* mark hwspinlock as used, should not fail */
368 tmp
= radix_tree_tag_clear(&hwspinlock_tree
, hwlock
->id
,
371 /* self-sanity check that should never fail */
372 WARN_ON(tmp
!= hwlock
);
378 * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
379 * @hwlock: a valid hwspinlock instance
381 * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
383 int hwspin_lock_get_id(struct hwspinlock
*hwlock
)
386 pr_err("invalid hwlock\n");
392 EXPORT_SYMBOL_GPL(hwspin_lock_get_id
);
395 * hwspin_lock_request() - request an hwspinlock
397 * This function should be called by users of the hwspinlock device,
398 * in order to dynamically assign them an unused hwspinlock.
399 * Usually the user of this lock will then have to communicate the lock's id
400 * to the remote core before it can be used for synchronization (to get the
401 * id of a given hwlock, use hwspin_lock_get_id()).
403 * Can be called from an atomic context (will not sleep) but not from
404 * within interrupt context (simply because there is no use case for
407 * Returns the address of the assigned hwspinlock, or NULL on error
409 struct hwspinlock
*hwspin_lock_request(void)
411 struct hwspinlock
*hwlock
;
414 spin_lock(&hwspinlock_tree_lock
);
416 /* look for an unused lock */
417 ret
= radix_tree_gang_lookup_tag(&hwspinlock_tree
, (void **)&hwlock
,
418 0, 1, HWSPINLOCK_UNUSED
);
420 pr_warn("a free hwspinlock is not available\n");
425 /* sanity check that should never fail */
428 /* mark as used and power up */
429 ret
= __hwspin_lock_request(hwlock
);
434 spin_unlock(&hwspinlock_tree_lock
);
437 EXPORT_SYMBOL_GPL(hwspin_lock_request
);
440 * hwspin_lock_request_specific() - request for a specific hwspinlock
441 * @id: index of the specific hwspinlock that is requested
443 * This function should be called by users of the hwspinlock module,
444 * in order to assign them a specific hwspinlock.
445 * Usually early board code will be calling this function in order to
446 * reserve specific hwspinlock ids for predefined purposes.
448 * Can be called from an atomic context (will not sleep) but not from
449 * within interrupt context (simply because there is no use case for
452 * Returns the address of the assigned hwspinlock, or NULL on error
454 struct hwspinlock
*hwspin_lock_request_specific(unsigned int id
)
456 struct hwspinlock
*hwlock
;
459 spin_lock(&hwspinlock_tree_lock
);
461 /* make sure this hwspinlock exists */
462 hwlock
= radix_tree_lookup(&hwspinlock_tree
, id
);
464 pr_warn("hwspinlock %u does not exist\n", id
);
468 /* sanity check (this shouldn't happen) */
469 WARN_ON(hwlock
->id
!= id
);
471 /* make sure this hwspinlock is unused */
472 ret
= radix_tree_tag_get(&hwspinlock_tree
, id
, HWSPINLOCK_UNUSED
);
474 pr_warn("hwspinlock %u is already in use\n", id
);
479 /* mark as used and power up */
480 ret
= __hwspin_lock_request(hwlock
);
485 spin_unlock(&hwspinlock_tree_lock
);
488 EXPORT_SYMBOL_GPL(hwspin_lock_request_specific
);
491 * hwspin_lock_free() - free a specific hwspinlock
492 * @hwlock: the specific hwspinlock to free
494 * This function mark @hwlock as free again.
495 * Should only be called with an @hwlock that was retrieved from
496 * an earlier call to omap_hwspin_lock_request{_specific}.
498 * Can be called from an atomic context (will not sleep) but not from
499 * within interrupt context (simply because there is no use case for
502 * Returns 0 on success, or an appropriate error code on failure
504 int hwspin_lock_free(struct hwspinlock
*hwlock
)
506 struct hwspinlock
*tmp
;
510 pr_err("invalid hwlock\n");
514 spin_lock(&hwspinlock_tree_lock
);
516 /* make sure the hwspinlock is used */
517 ret
= radix_tree_tag_get(&hwspinlock_tree
, hwlock
->id
,
520 dev_err(hwlock
->dev
, "%s: hwlock is already free\n", __func__
);
526 /* notify the underlying device that power is not needed */
527 ret
= pm_runtime_put(hwlock
->dev
);
531 /* mark this hwspinlock as available */
532 tmp
= radix_tree_tag_set(&hwspinlock_tree
, hwlock
->id
,
535 /* sanity check (this shouldn't happen) */
536 WARN_ON(tmp
!= hwlock
);
538 module_put(hwlock
->owner
);
541 spin_unlock(&hwspinlock_tree_lock
);
544 EXPORT_SYMBOL_GPL(hwspin_lock_free
);
546 MODULE_LICENSE("GPL v2");
547 MODULE_DESCRIPTION("Hardware spinlock interface");
548 MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");