pkt_sched: Remove useless qdisc_stab_lock
[linux-2.6/btrfs-unstable.git] / drivers / dma-buf / dma-fence.c
blob0212af7997d9fa2177b39d05b675ad047743f35f
1 /*
2 * Fence mechanism for dma-buf and to allow for asynchronous dma access
4 * Copyright (C) 2012 Canonical Ltd
5 * Copyright (C) 2012 Texas Instruments
7 * Authors:
8 * Rob Clark <robdclark@gmail.com>
9 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/atomic.h>
24 #include <linux/dma-fence.h>
26 #define CREATE_TRACE_POINTS
27 #include <trace/events/dma_fence.h>
29 EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
30 EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
33 * fence context counter: each execution context should have its own
34 * fence context, this allows checking if fences belong to the same
35 * context or not. One device can have multiple separate contexts,
36 * and they're used if some engine can run independently of another.
38 static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
40 /**
41 * dma_fence_context_alloc - allocate an array of fence contexts
42 * @num: [in] amount of contexts to allocate
44 * This function will return the first index of the number of fences allocated.
45 * The fence context is used for setting fence->context to a unique number.
47 u64 dma_fence_context_alloc(unsigned num)
49 BUG_ON(!num);
50 return atomic64_add_return(num, &dma_fence_context_counter) - num;
52 EXPORT_SYMBOL(dma_fence_context_alloc);
54 /**
55 * dma_fence_signal_locked - signal completion of a fence
56 * @fence: the fence to signal
58 * Signal completion for software callbacks on a fence, this will unblock
59 * dma_fence_wait() calls and run all the callbacks added with
60 * dma_fence_add_callback(). Can be called multiple times, but since a fence
61 * can only go from unsignaled to signaled state, it will only be effective
62 * the first time.
64 * Unlike dma_fence_signal, this function must be called with fence->lock held.
66 int dma_fence_signal_locked(struct dma_fence *fence)
68 struct dma_fence_cb *cur, *tmp;
69 int ret = 0;
71 lockdep_assert_held(fence->lock);
73 if (WARN_ON(!fence))
74 return -EINVAL;
76 if (!ktime_to_ns(fence->timestamp)) {
77 fence->timestamp = ktime_get();
78 smp_mb__before_atomic();
81 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
82 ret = -EINVAL;
85 * we might have raced with the unlocked dma_fence_signal,
86 * still run through all callbacks
88 } else
89 trace_dma_fence_signaled(fence);
91 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
92 list_del_init(&cur->node);
93 cur->func(fence, cur);
95 return ret;
97 EXPORT_SYMBOL(dma_fence_signal_locked);
99 /**
100 * dma_fence_signal - signal completion of a fence
101 * @fence: the fence to signal
103 * Signal completion for software callbacks on a fence, this will unblock
104 * dma_fence_wait() calls and run all the callbacks added with
105 * dma_fence_add_callback(). Can be called multiple times, but since a fence
106 * can only go from unsignaled to signaled state, it will only be effective
107 * the first time.
109 int dma_fence_signal(struct dma_fence *fence)
111 unsigned long flags;
113 if (!fence)
114 return -EINVAL;
116 if (!ktime_to_ns(fence->timestamp)) {
117 fence->timestamp = ktime_get();
118 smp_mb__before_atomic();
121 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
122 return -EINVAL;
124 trace_dma_fence_signaled(fence);
126 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
127 struct dma_fence_cb *cur, *tmp;
129 spin_lock_irqsave(fence->lock, flags);
130 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
131 list_del_init(&cur->node);
132 cur->func(fence, cur);
134 spin_unlock_irqrestore(fence->lock, flags);
136 return 0;
138 EXPORT_SYMBOL(dma_fence_signal);
141 * dma_fence_wait_timeout - sleep until the fence gets signaled
142 * or until timeout elapses
143 * @fence: [in] the fence to wait on
144 * @intr: [in] if true, do an interruptible wait
145 * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
147 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
148 * remaining timeout in jiffies on success. Other error values may be
149 * returned on custom implementations.
151 * Performs a synchronous wait on this fence. It is assumed the caller
152 * directly or indirectly (buf-mgr between reservation and committing)
153 * holds a reference to the fence, otherwise the fence might be
154 * freed before return, resulting in undefined behavior.
156 signed long
157 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
159 signed long ret;
161 if (WARN_ON(timeout < 0))
162 return -EINVAL;
164 trace_dma_fence_wait_start(fence);
165 ret = fence->ops->wait(fence, intr, timeout);
166 trace_dma_fence_wait_end(fence);
167 return ret;
169 EXPORT_SYMBOL(dma_fence_wait_timeout);
171 void dma_fence_release(struct kref *kref)
173 struct dma_fence *fence =
174 container_of(kref, struct dma_fence, refcount);
176 trace_dma_fence_destroy(fence);
178 BUG_ON(!list_empty(&fence->cb_list));
180 if (fence->ops->release)
181 fence->ops->release(fence);
182 else
183 dma_fence_free(fence);
185 EXPORT_SYMBOL(dma_fence_release);
187 void dma_fence_free(struct dma_fence *fence)
189 kfree_rcu(fence, rcu);
191 EXPORT_SYMBOL(dma_fence_free);
194 * dma_fence_enable_sw_signaling - enable signaling on fence
195 * @fence: [in] the fence to enable
197 * this will request for sw signaling to be enabled, to make the fence
198 * complete as soon as possible
200 void dma_fence_enable_sw_signaling(struct dma_fence *fence)
202 unsigned long flags;
204 if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
205 &fence->flags) &&
206 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
207 trace_dma_fence_enable_signal(fence);
209 spin_lock_irqsave(fence->lock, flags);
211 if (!fence->ops->enable_signaling(fence))
212 dma_fence_signal_locked(fence);
214 spin_unlock_irqrestore(fence->lock, flags);
217 EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
220 * dma_fence_add_callback - add a callback to be called when the fence
221 * is signaled
222 * @fence: [in] the fence to wait on
223 * @cb: [in] the callback to register
224 * @func: [in] the function to call
226 * cb will be initialized by dma_fence_add_callback, no initialization
227 * by the caller is required. Any number of callbacks can be registered
228 * to a fence, but a callback can only be registered to one fence at a time.
230 * Note that the callback can be called from an atomic context. If
231 * fence is already signaled, this function will return -ENOENT (and
232 * *not* call the callback)
234 * Add a software callback to the fence. Same restrictions apply to
235 * refcount as it does to dma_fence_wait, however the caller doesn't need to
236 * keep a refcount to fence afterwards: when software access is enabled,
237 * the creator of the fence is required to keep the fence alive until
238 * after it signals with dma_fence_signal. The callback itself can be called
239 * from irq context.
242 int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
243 dma_fence_func_t func)
245 unsigned long flags;
246 int ret = 0;
247 bool was_set;
249 if (WARN_ON(!fence || !func))
250 return -EINVAL;
252 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
253 INIT_LIST_HEAD(&cb->node);
254 return -ENOENT;
257 spin_lock_irqsave(fence->lock, flags);
259 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
260 &fence->flags);
262 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
263 ret = -ENOENT;
264 else if (!was_set) {
265 trace_dma_fence_enable_signal(fence);
267 if (!fence->ops->enable_signaling(fence)) {
268 dma_fence_signal_locked(fence);
269 ret = -ENOENT;
273 if (!ret) {
274 cb->func = func;
275 list_add_tail(&cb->node, &fence->cb_list);
276 } else
277 INIT_LIST_HEAD(&cb->node);
278 spin_unlock_irqrestore(fence->lock, flags);
280 return ret;
282 EXPORT_SYMBOL(dma_fence_add_callback);
285 * dma_fence_remove_callback - remove a callback from the signaling list
286 * @fence: [in] the fence to wait on
287 * @cb: [in] the callback to remove
289 * Remove a previously queued callback from the fence. This function returns
290 * true if the callback is successfully removed, or false if the fence has
291 * already been signaled.
293 * *WARNING*:
294 * Cancelling a callback should only be done if you really know what you're
295 * doing, since deadlocks and race conditions could occur all too easily. For
296 * this reason, it should only ever be done on hardware lockup recovery,
297 * with a reference held to the fence.
299 bool
300 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
302 unsigned long flags;
303 bool ret;
305 spin_lock_irqsave(fence->lock, flags);
307 ret = !list_empty(&cb->node);
308 if (ret)
309 list_del_init(&cb->node);
311 spin_unlock_irqrestore(fence->lock, flags);
313 return ret;
315 EXPORT_SYMBOL(dma_fence_remove_callback);
317 struct default_wait_cb {
318 struct dma_fence_cb base;
319 struct task_struct *task;
322 static void
323 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
325 struct default_wait_cb *wait =
326 container_of(cb, struct default_wait_cb, base);
328 wake_up_state(wait->task, TASK_NORMAL);
332 * dma_fence_default_wait - default sleep until the fence gets signaled
333 * or until timeout elapses
334 * @fence: [in] the fence to wait on
335 * @intr: [in] if true, do an interruptible wait
336 * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
338 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
339 * remaining timeout in jiffies on success. If timeout is zero the value one is
340 * returned if the fence is already signaled for consistency with other
341 * functions taking a jiffies timeout.
343 signed long
344 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
346 struct default_wait_cb cb;
347 unsigned long flags;
348 signed long ret = timeout ? timeout : 1;
349 bool was_set;
351 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
352 return ret;
354 spin_lock_irqsave(fence->lock, flags);
356 if (intr && signal_pending(current)) {
357 ret = -ERESTARTSYS;
358 goto out;
361 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
362 &fence->flags);
364 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
365 goto out;
367 if (!was_set) {
368 trace_dma_fence_enable_signal(fence);
370 if (!fence->ops->enable_signaling(fence)) {
371 dma_fence_signal_locked(fence);
372 goto out;
376 cb.base.func = dma_fence_default_wait_cb;
377 cb.task = current;
378 list_add(&cb.base.node, &fence->cb_list);
380 while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
381 if (intr)
382 __set_current_state(TASK_INTERRUPTIBLE);
383 else
384 __set_current_state(TASK_UNINTERRUPTIBLE);
385 spin_unlock_irqrestore(fence->lock, flags);
387 ret = schedule_timeout(ret);
389 spin_lock_irqsave(fence->lock, flags);
390 if (ret > 0 && intr && signal_pending(current))
391 ret = -ERESTARTSYS;
394 if (!list_empty(&cb.base.node))
395 list_del(&cb.base.node);
396 __set_current_state(TASK_RUNNING);
398 out:
399 spin_unlock_irqrestore(fence->lock, flags);
400 return ret;
402 EXPORT_SYMBOL(dma_fence_default_wait);
404 static bool
405 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
406 uint32_t *idx)
408 int i;
410 for (i = 0; i < count; ++i) {
411 struct dma_fence *fence = fences[i];
412 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
413 if (idx)
414 *idx = i;
415 return true;
418 return false;
422 * dma_fence_wait_any_timeout - sleep until any fence gets signaled
423 * or until timeout elapses
424 * @fences: [in] array of fences to wait on
425 * @count: [in] number of fences to wait on
426 * @intr: [in] if true, do an interruptible wait
427 * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
428 * @idx: [out] the first signaled fence index, meaningful only on
429 * positive return
431 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
432 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
433 * on success.
435 * Synchronous waits for the first fence in the array to be signaled. The
436 * caller needs to hold a reference to all fences in the array, otherwise a
437 * fence might be freed before return, resulting in undefined behavior.
439 signed long
440 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
441 bool intr, signed long timeout, uint32_t *idx)
443 struct default_wait_cb *cb;
444 signed long ret = timeout;
445 unsigned i;
447 if (WARN_ON(!fences || !count || timeout < 0))
448 return -EINVAL;
450 if (timeout == 0) {
451 for (i = 0; i < count; ++i)
452 if (dma_fence_is_signaled(fences[i])) {
453 if (idx)
454 *idx = i;
455 return 1;
458 return 0;
461 cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
462 if (cb == NULL) {
463 ret = -ENOMEM;
464 goto err_free_cb;
467 for (i = 0; i < count; ++i) {
468 struct dma_fence *fence = fences[i];
470 if (fence->ops->wait != dma_fence_default_wait) {
471 ret = -EINVAL;
472 goto fence_rm_cb;
475 cb[i].task = current;
476 if (dma_fence_add_callback(fence, &cb[i].base,
477 dma_fence_default_wait_cb)) {
478 /* This fence is already signaled */
479 if (idx)
480 *idx = i;
481 goto fence_rm_cb;
485 while (ret > 0) {
486 if (intr)
487 set_current_state(TASK_INTERRUPTIBLE);
488 else
489 set_current_state(TASK_UNINTERRUPTIBLE);
491 if (dma_fence_test_signaled_any(fences, count, idx))
492 break;
494 ret = schedule_timeout(ret);
496 if (ret > 0 && intr && signal_pending(current))
497 ret = -ERESTARTSYS;
500 __set_current_state(TASK_RUNNING);
502 fence_rm_cb:
503 while (i-- > 0)
504 dma_fence_remove_callback(fences[i], &cb[i].base);
506 err_free_cb:
507 kfree(cb);
509 return ret;
511 EXPORT_SYMBOL(dma_fence_wait_any_timeout);
514 * dma_fence_init - Initialize a custom fence.
515 * @fence: [in] the fence to initialize
516 * @ops: [in] the dma_fence_ops for operations on this fence
517 * @lock: [in] the irqsafe spinlock to use for locking this fence
518 * @context: [in] the execution context this fence is run on
519 * @seqno: [in] a linear increasing sequence number for this context
521 * Initializes an allocated fence, the caller doesn't have to keep its
522 * refcount after committing with this fence, but it will need to hold a
523 * refcount again if dma_fence_ops.enable_signaling gets called. This can
524 * be used for other implementing other types of fence.
526 * context and seqno are used for easy comparison between fences, allowing
527 * to check which fence is later by simply using dma_fence_later.
529 void
530 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
531 spinlock_t *lock, u64 context, unsigned seqno)
533 BUG_ON(!lock);
534 BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
535 !ops->get_driver_name || !ops->get_timeline_name);
537 kref_init(&fence->refcount);
538 fence->ops = ops;
539 INIT_LIST_HEAD(&fence->cb_list);
540 fence->lock = lock;
541 fence->context = context;
542 fence->seqno = seqno;
543 fence->flags = 0UL;
545 trace_dma_fence_init(fence);
547 EXPORT_SYMBOL(dma_fence_init);