2 * core routines for the asynchronous memory transfer/transform api
4 * Copyright © 2006, Intel Corporation.
6 * Dan Williams <dan.j.williams@intel.com>
8 * with architecture considerations by:
9 * Neil Brown <neilb@suse.de>
10 * Jeff Garzik <jeff@garzik.org>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26 #include <linux/kernel.h>
27 #include <linux/async_tx.h>
29 #ifdef CONFIG_DMA_ENGINE
30 static enum dma_state_client
31 dma_channel_add_remove(struct dma_client
*client
,
32 struct dma_chan
*chan
, enum dma_state state
);
34 static struct dma_client async_tx_dma
= {
35 .event_callback
= dma_channel_add_remove
,
36 /* .cap_mask == 0 defaults to all channels */
40 * dma_cap_mask_all - enable iteration over all operation types
42 static dma_cap_mask_t dma_cap_mask_all
;
45 * chan_ref_percpu - tracks channel allocations per core/opertion
47 struct chan_ref_percpu
{
48 struct dma_chan_ref
*ref
;
51 static int channel_table_initialized
;
52 static struct chan_ref_percpu
*channel_table
[DMA_TX_TYPE_END
];
55 * async_tx_lock - protect modification of async_tx_master_list and serialize
56 * rebalance operations
58 static spinlock_t async_tx_lock
;
60 static struct list_head
61 async_tx_master_list
= LIST_HEAD_INIT(async_tx_master_list
);
63 /* async_tx_issue_pending_all - start all transactions on all channels */
64 void async_tx_issue_pending_all(void)
66 struct dma_chan_ref
*ref
;
69 list_for_each_entry_rcu(ref
, &async_tx_master_list
, node
)
70 ref
->chan
->device
->device_issue_pending(ref
->chan
);
73 EXPORT_SYMBOL_GPL(async_tx_issue_pending_all
);
75 /* dma_wait_for_async_tx - spin wait for a transcation to complete
76 * @tx: transaction to wait on
79 dma_wait_for_async_tx(struct dma_async_tx_descriptor
*tx
)
81 enum dma_status status
;
82 struct dma_async_tx_descriptor
*iter
;
83 struct dma_async_tx_descriptor
*parent
;
88 /* poll through the dependency chain, return when tx is complete */
92 /* find the root of the unsubmitted dependency chain */
93 while (iter
->cookie
== -EBUSY
) {
94 parent
= iter
->parent
;
95 if (parent
&& parent
->cookie
== -EBUSY
)
101 status
= dma_sync_wait(iter
->chan
, iter
->cookie
);
102 } while (status
== DMA_IN_PROGRESS
|| (iter
!= tx
));
106 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx
);
108 /* async_tx_run_dependencies - helper routine for dma drivers to process
109 * (start) dependent operations on their target channel
110 * @tx: transaction with dependencies
113 async_tx_run_dependencies(struct dma_async_tx_descriptor
*tx
)
115 struct dma_async_tx_descriptor
*dep_tx
, *_dep_tx
;
116 struct dma_device
*dev
;
117 struct dma_chan
*chan
;
119 list_for_each_entry_safe(dep_tx
, _dep_tx
, &tx
->depend_list
,
123 /* we can't depend on ourselves */
124 BUG_ON(chan
== tx
->chan
);
125 list_del(&dep_tx
->depend_node
);
126 tx
->tx_submit(dep_tx
);
128 /* we need to poke the engine as client code does not
129 * know about dependency submission events
131 dev
->device_issue_pending(chan
);
134 EXPORT_SYMBOL_GPL(async_tx_run_dependencies
);
137 free_dma_chan_ref(struct rcu_head
*rcu
)
139 struct dma_chan_ref
*ref
;
140 ref
= container_of(rcu
, struct dma_chan_ref
, rcu
);
145 init_dma_chan_ref(struct dma_chan_ref
*ref
, struct dma_chan
*chan
)
147 INIT_LIST_HEAD(&ref
->node
);
148 INIT_RCU_HEAD(&ref
->rcu
);
150 atomic_set(&ref
->count
, 0);
154 * get_chan_ref_by_cap - returns the nth channel of the given capability
155 * defaults to returning the channel with the desired capability and the
156 * lowest reference count if the index can not be satisfied
157 * @cap: capability to match
158 * @index: nth channel desired, passing -1 has the effect of forcing the
159 * default return value
161 static struct dma_chan_ref
*
162 get_chan_ref_by_cap(enum dma_transaction_type cap
, int index
)
164 struct dma_chan_ref
*ret_ref
= NULL
, *min_ref
= NULL
, *ref
;
167 list_for_each_entry_rcu(ref
, &async_tx_master_list
, node
)
168 if (dma_has_cap(cap
, ref
->chan
->device
->cap_mask
)) {
171 else if (atomic_read(&ref
->count
) <
172 atomic_read(&min_ref
->count
))
186 atomic_inc(&ret_ref
->count
);
192 * async_tx_rebalance - redistribute the available channels, optimize
193 * for cpu isolation in the SMP case, and opertaion isolation in the
196 static void async_tx_rebalance(void)
198 int cpu
, cap
, cpu_idx
= 0;
201 if (!channel_table_initialized
)
204 spin_lock_irqsave(&async_tx_lock
, flags
);
206 /* undo the last distribution */
207 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
208 for_each_possible_cpu(cpu
) {
209 struct dma_chan_ref
*ref
=
210 per_cpu_ptr(channel_table
[cap
], cpu
)->ref
;
212 atomic_set(&ref
->count
, 0);
213 per_cpu_ptr(channel_table
[cap
], cpu
)->ref
=
218 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
219 for_each_online_cpu(cpu
) {
220 struct dma_chan_ref
*new;
222 new = get_chan_ref_by_cap(cap
, cpu_idx
++);
224 new = get_chan_ref_by_cap(cap
, -1);
226 per_cpu_ptr(channel_table
[cap
], cpu
)->ref
= new;
229 spin_unlock_irqrestore(&async_tx_lock
, flags
);
232 static enum dma_state_client
233 dma_channel_add_remove(struct dma_client
*client
,
234 struct dma_chan
*chan
, enum dma_state state
)
236 unsigned long found
, flags
;
237 struct dma_chan_ref
*master_ref
, *ref
;
238 enum dma_state_client ack
= DMA_DUP
; /* default: take no action */
241 case DMA_RESOURCE_AVAILABLE
:
244 list_for_each_entry_rcu(ref
, &async_tx_master_list
, node
)
245 if (ref
->chan
== chan
) {
251 pr_debug("async_tx: dma resource available [%s]\n",
252 found
? "old" : "new");
259 /* add the channel to the generic management list */
260 master_ref
= kmalloc(sizeof(*master_ref
), GFP_KERNEL
);
262 /* keep a reference until async_tx is unloaded */
264 init_dma_chan_ref(master_ref
, chan
);
265 spin_lock_irqsave(&async_tx_lock
, flags
);
266 list_add_tail_rcu(&master_ref
->node
,
267 &async_tx_master_list
);
268 spin_unlock_irqrestore(&async_tx_lock
,
271 printk(KERN_WARNING
"async_tx: unable to create"
272 " new master entry in response to"
273 " a DMA_RESOURCE_ADDED event"
278 async_tx_rebalance();
280 case DMA_RESOURCE_REMOVED
:
282 spin_lock_irqsave(&async_tx_lock
, flags
);
283 list_for_each_entry_rcu(ref
, &async_tx_master_list
, node
)
284 if (ref
->chan
== chan
) {
285 /* permit backing devices to go away */
286 dma_chan_put(ref
->chan
);
287 list_del_rcu(&ref
->node
);
288 call_rcu(&ref
->rcu
, free_dma_chan_ref
);
292 spin_unlock_irqrestore(&async_tx_lock
, flags
);
294 pr_debug("async_tx: dma resource removed [%s]\n",
295 found
? "ours" : "not ours");
302 async_tx_rebalance();
304 case DMA_RESOURCE_SUSPEND
:
305 case DMA_RESOURCE_RESUME
:
306 printk(KERN_WARNING
"async_tx: does not support dma channel"
307 " suspend/resume\n");
319 enum dma_transaction_type cap
;
321 spin_lock_init(&async_tx_lock
);
322 bitmap_fill(dma_cap_mask_all
.bits
, DMA_TX_TYPE_END
);
324 /* an interrupt will never be an explicit operation type.
325 * clearing this bit prevents allocation to a slot in 'channel_table'
327 clear_bit(DMA_INTERRUPT
, dma_cap_mask_all
.bits
);
329 for_each_dma_cap_mask(cap
, dma_cap_mask_all
) {
330 channel_table
[cap
] = alloc_percpu(struct chan_ref_percpu
);
331 if (!channel_table
[cap
])
335 channel_table_initialized
= 1;
336 dma_async_client_register(&async_tx_dma
);
337 dma_async_client_chan_request(&async_tx_dma
);
339 printk(KERN_INFO
"async_tx: api initialized (async)\n");
343 printk(KERN_ERR
"async_tx: initialization failure\n");
346 free_percpu(channel_table
[cap
]);
351 static void __exit
async_tx_exit(void)
353 enum dma_transaction_type cap
;
355 channel_table_initialized
= 0;
357 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
358 if (channel_table
[cap
])
359 free_percpu(channel_table
[cap
]);
361 dma_async_client_unregister(&async_tx_dma
);
365 * async_tx_find_channel - find a channel to carry out the operation or let
366 * the transaction execute synchronously
367 * @depend_tx: transaction dependency
368 * @tx_type: transaction type
371 async_tx_find_channel(struct dma_async_tx_descriptor
*depend_tx
,
372 enum dma_transaction_type tx_type
)
374 /* see if we can keep the chain on one channel */
376 dma_has_cap(tx_type
, depend_tx
->chan
->device
->cap_mask
))
377 return depend_tx
->chan
;
378 else if (likely(channel_table_initialized
)) {
379 struct dma_chan_ref
*ref
;
381 ref
= per_cpu_ptr(channel_table
[tx_type
], cpu
)->ref
;
383 return ref
? ref
->chan
: NULL
;
387 EXPORT_SYMBOL_GPL(async_tx_find_channel
);
389 static int __init
async_tx_init(void)
391 printk(KERN_INFO
"async_tx: api initialized (sync-only)\n");
395 static void __exit
async_tx_exit(void)
402 async_tx_submit(struct dma_chan
*chan
, struct dma_async_tx_descriptor
*tx
,
403 enum async_tx_flags flags
, struct dma_async_tx_descriptor
*depend_tx
,
404 dma_async_tx_callback cb_fn
, void *cb_param
)
406 tx
->callback
= cb_fn
;
407 tx
->callback_param
= cb_param
;
409 /* set this new tx to run after depend_tx if:
410 * 1/ a dependency exists (depend_tx is !NULL)
411 * 2/ the tx can not be submitted to the current channel
413 if (depend_tx
&& depend_tx
->chan
!= chan
) {
414 /* if ack is already set then we cannot be sure
415 * we are referring to the correct operation
417 BUG_ON(depend_tx
->ack
);
419 tx
->parent
= depend_tx
;
420 spin_lock_bh(&depend_tx
->lock
);
421 list_add_tail(&tx
->depend_node
, &depend_tx
->depend_list
);
422 if (depend_tx
->cookie
== 0) {
423 struct dma_chan
*dep_chan
= depend_tx
->chan
;
424 struct dma_device
*dep_dev
= dep_chan
->device
;
425 dep_dev
->device_dependency_added(dep_chan
);
427 spin_unlock_bh(&depend_tx
->lock
);
429 /* schedule an interrupt to trigger the channel switch */
430 async_trigger_callback(ASYNC_TX_ACK
, depend_tx
, NULL
, NULL
);
436 if (flags
& ASYNC_TX_ACK
)
439 if (depend_tx
&& (flags
& ASYNC_TX_DEP_ACK
))
440 async_tx_ack(depend_tx
);
442 EXPORT_SYMBOL_GPL(async_tx_submit
);
445 * async_trigger_callback - schedules the callback function to be run after
446 * any dependent operations have been completed.
447 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
448 * @depend_tx: 'callback' requires the completion of this transaction
449 * @cb_fn: function to call after depend_tx completes
450 * @cb_param: parameter to pass to the callback routine
452 struct dma_async_tx_descriptor
*
453 async_trigger_callback(enum async_tx_flags flags
,
454 struct dma_async_tx_descriptor
*depend_tx
,
455 dma_async_tx_callback cb_fn
, void *cb_param
)
457 struct dma_chan
*chan
;
458 struct dma_device
*device
;
459 struct dma_async_tx_descriptor
*tx
;
462 chan
= depend_tx
->chan
;
463 device
= chan
->device
;
465 /* see if we can schedule an interrupt
466 * otherwise poll for completion
468 if (device
&& !dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
))
471 tx
= device
? device
->device_prep_dma_interrupt(chan
) : NULL
;
476 pr_debug("%s: (async)\n", __FUNCTION__
);
478 async_tx_submit(chan
, tx
, flags
, depend_tx
, cb_fn
, cb_param
);
480 pr_debug("%s: (sync)\n", __FUNCTION__
);
482 /* wait for any prerequisite operations */
484 /* if ack is already set then we cannot be sure
485 * we are referring to the correct operation
487 BUG_ON(depend_tx
->ack
);
488 if (dma_wait_for_async_tx(depend_tx
) == DMA_ERROR
)
489 panic("%s: DMA_ERROR waiting for depend_tx\n",
493 async_tx_sync_epilog(flags
, depend_tx
, cb_fn
, cb_param
);
498 EXPORT_SYMBOL_GPL(async_trigger_callback
);
500 module_init(async_tx_init
);
501 module_exit(async_tx_exit
);
503 MODULE_AUTHOR("Intel Corporation");
504 MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
505 MODULE_LICENSE("GPL");