Merge tag 'gpio-v3.13-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-2.6.git] / drivers / dma / dmaengine.c
blobea806bdc12ef92418c528be0b950758de59c3ee7
1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
32 * LOCKING:
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
45 * See Documentation/dmaengine.txt for more details
48 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50 #include <linux/dma-mapping.h>
51 #include <linux/init.h>
52 #include <linux/module.h>
53 #include <linux/mm.h>
54 #include <linux/device.h>
55 #include <linux/dmaengine.h>
56 #include <linux/hardirq.h>
57 #include <linux/spinlock.h>
58 #include <linux/percpu.h>
59 #include <linux/rcupdate.h>
60 #include <linux/mutex.h>
61 #include <linux/jiffies.h>
62 #include <linux/rculist.h>
63 #include <linux/idr.h>
64 #include <linux/slab.h>
65 #include <linux/acpi.h>
66 #include <linux/acpi_dma.h>
67 #include <linux/of_dma.h>
68 #include <linux/mempool.h>
70 static DEFINE_MUTEX(dma_list_mutex);
71 static DEFINE_IDR(dma_idr);
72 static LIST_HEAD(dma_device_list);
73 static long dmaengine_ref_count;
75 /* --- sysfs implementation --- */
77 /**
78 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
79 * @dev - device node
81 * Must be called under dma_list_mutex
83 static struct dma_chan *dev_to_dma_chan(struct device *dev)
85 struct dma_chan_dev *chan_dev;
87 chan_dev = container_of(dev, typeof(*chan_dev), device);
88 return chan_dev->chan;
91 static ssize_t memcpy_count_show(struct device *dev,
92 struct device_attribute *attr, char *buf)
94 struct dma_chan *chan;
95 unsigned long count = 0;
96 int i;
97 int err;
99 mutex_lock(&dma_list_mutex);
100 chan = dev_to_dma_chan(dev);
101 if (chan) {
102 for_each_possible_cpu(i)
103 count += per_cpu_ptr(chan->local, i)->memcpy_count;
104 err = sprintf(buf, "%lu\n", count);
105 } else
106 err = -ENODEV;
107 mutex_unlock(&dma_list_mutex);
109 return err;
111 static DEVICE_ATTR_RO(memcpy_count);
113 static ssize_t bytes_transferred_show(struct device *dev,
114 struct device_attribute *attr, char *buf)
116 struct dma_chan *chan;
117 unsigned long count = 0;
118 int i;
119 int err;
121 mutex_lock(&dma_list_mutex);
122 chan = dev_to_dma_chan(dev);
123 if (chan) {
124 for_each_possible_cpu(i)
125 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
126 err = sprintf(buf, "%lu\n", count);
127 } else
128 err = -ENODEV;
129 mutex_unlock(&dma_list_mutex);
131 return err;
133 static DEVICE_ATTR_RO(bytes_transferred);
135 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
136 char *buf)
138 struct dma_chan *chan;
139 int err;
141 mutex_lock(&dma_list_mutex);
142 chan = dev_to_dma_chan(dev);
143 if (chan)
144 err = sprintf(buf, "%d\n", chan->client_count);
145 else
146 err = -ENODEV;
147 mutex_unlock(&dma_list_mutex);
149 return err;
151 static DEVICE_ATTR_RO(in_use);
153 static struct attribute *dma_dev_attrs[] = {
154 &dev_attr_memcpy_count.attr,
155 &dev_attr_bytes_transferred.attr,
156 &dev_attr_in_use.attr,
157 NULL,
159 ATTRIBUTE_GROUPS(dma_dev);
161 static void chan_dev_release(struct device *dev)
163 struct dma_chan_dev *chan_dev;
165 chan_dev = container_of(dev, typeof(*chan_dev), device);
166 if (atomic_dec_and_test(chan_dev->idr_ref)) {
167 mutex_lock(&dma_list_mutex);
168 idr_remove(&dma_idr, chan_dev->dev_id);
169 mutex_unlock(&dma_list_mutex);
170 kfree(chan_dev->idr_ref);
172 kfree(chan_dev);
175 static struct class dma_devclass = {
176 .name = "dma",
177 .dev_groups = dma_dev_groups,
178 .dev_release = chan_dev_release,
181 /* --- client and device registration --- */
183 #define dma_device_satisfies_mask(device, mask) \
184 __dma_device_satisfies_mask((device), &(mask))
185 static int
186 __dma_device_satisfies_mask(struct dma_device *device,
187 const dma_cap_mask_t *want)
189 dma_cap_mask_t has;
191 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
192 DMA_TX_TYPE_END);
193 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
196 static struct module *dma_chan_to_owner(struct dma_chan *chan)
198 return chan->device->dev->driver->owner;
202 * balance_ref_count - catch up the channel reference count
203 * @chan - channel to balance ->client_count versus dmaengine_ref_count
205 * balance_ref_count must be called under dma_list_mutex
207 static void balance_ref_count(struct dma_chan *chan)
209 struct module *owner = dma_chan_to_owner(chan);
211 while (chan->client_count < dmaengine_ref_count) {
212 __module_get(owner);
213 chan->client_count++;
218 * dma_chan_get - try to grab a dma channel's parent driver module
219 * @chan - channel to grab
221 * Must be called under dma_list_mutex
223 static int dma_chan_get(struct dma_chan *chan)
225 int err = -ENODEV;
226 struct module *owner = dma_chan_to_owner(chan);
228 if (chan->client_count) {
229 __module_get(owner);
230 err = 0;
231 } else if (try_module_get(owner))
232 err = 0;
234 if (err == 0)
235 chan->client_count++;
237 /* allocate upon first client reference */
238 if (chan->client_count == 1 && err == 0) {
239 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
241 if (desc_cnt < 0) {
242 err = desc_cnt;
243 chan->client_count = 0;
244 module_put(owner);
245 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
246 balance_ref_count(chan);
249 return err;
253 * dma_chan_put - drop a reference to a dma channel's parent driver module
254 * @chan - channel to release
256 * Must be called under dma_list_mutex
258 static void dma_chan_put(struct dma_chan *chan)
260 if (!chan->client_count)
261 return; /* this channel failed alloc_chan_resources */
262 chan->client_count--;
263 module_put(dma_chan_to_owner(chan));
264 if (chan->client_count == 0)
265 chan->device->device_free_chan_resources(chan);
268 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
270 enum dma_status status;
271 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
273 dma_async_issue_pending(chan);
274 do {
275 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
276 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
277 pr_err("%s: timeout!\n", __func__);
278 return DMA_ERROR;
280 if (status != DMA_IN_PROGRESS)
281 break;
282 cpu_relax();
283 } while (1);
285 return status;
287 EXPORT_SYMBOL(dma_sync_wait);
290 * dma_cap_mask_all - enable iteration over all operation types
292 static dma_cap_mask_t dma_cap_mask_all;
295 * dma_chan_tbl_ent - tracks channel allocations per core/operation
296 * @chan - associated channel for this entry
298 struct dma_chan_tbl_ent {
299 struct dma_chan *chan;
303 * channel_table - percpu lookup table for memory-to-memory offload providers
305 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
307 static int __init dma_channel_table_init(void)
309 enum dma_transaction_type cap;
310 int err = 0;
312 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
314 /* 'interrupt', 'private', and 'slave' are channel capabilities,
315 * but are not associated with an operation so they do not need
316 * an entry in the channel_table
318 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
319 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
320 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
322 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
323 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
324 if (!channel_table[cap]) {
325 err = -ENOMEM;
326 break;
330 if (err) {
331 pr_err("initialization failure\n");
332 for_each_dma_cap_mask(cap, dma_cap_mask_all)
333 if (channel_table[cap])
334 free_percpu(channel_table[cap]);
337 return err;
339 arch_initcall(dma_channel_table_init);
342 * dma_find_channel - find a channel to carry out the operation
343 * @tx_type: transaction type
345 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
347 return this_cpu_read(channel_table[tx_type]->chan);
349 EXPORT_SYMBOL(dma_find_channel);
352 * net_dma_find_channel - find a channel for net_dma
353 * net_dma has alignment requirements
355 struct dma_chan *net_dma_find_channel(void)
357 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
358 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
359 return NULL;
361 return chan;
363 EXPORT_SYMBOL(net_dma_find_channel);
366 * dma_issue_pending_all - flush all pending operations across all channels
368 void dma_issue_pending_all(void)
370 struct dma_device *device;
371 struct dma_chan *chan;
373 rcu_read_lock();
374 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
375 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
376 continue;
377 list_for_each_entry(chan, &device->channels, device_node)
378 if (chan->client_count)
379 device->device_issue_pending(chan);
381 rcu_read_unlock();
383 EXPORT_SYMBOL(dma_issue_pending_all);
386 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
388 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
390 int node = dev_to_node(chan->device->dev);
391 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
395 * min_chan - returns the channel with min count and in the same numa-node as the cpu
396 * @cap: capability to match
397 * @cpu: cpu index which the channel should be close to
399 * If some channels are close to the given cpu, the one with the lowest
400 * reference count is returned. Otherwise, cpu is ignored and only the
401 * reference count is taken into account.
402 * Must be called under dma_list_mutex.
404 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
406 struct dma_device *device;
407 struct dma_chan *chan;
408 struct dma_chan *min = NULL;
409 struct dma_chan *localmin = NULL;
411 list_for_each_entry(device, &dma_device_list, global_node) {
412 if (!dma_has_cap(cap, device->cap_mask) ||
413 dma_has_cap(DMA_PRIVATE, device->cap_mask))
414 continue;
415 list_for_each_entry(chan, &device->channels, device_node) {
416 if (!chan->client_count)
417 continue;
418 if (!min || chan->table_count < min->table_count)
419 min = chan;
421 if (dma_chan_is_local(chan, cpu))
422 if (!localmin ||
423 chan->table_count < localmin->table_count)
424 localmin = chan;
428 chan = localmin ? localmin : min;
430 if (chan)
431 chan->table_count++;
433 return chan;
437 * dma_channel_rebalance - redistribute the available channels
439 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
440 * operation type) in the SMP case, and operation isolation (avoid
441 * multi-tasking channels) in the non-SMP case. Must be called under
442 * dma_list_mutex.
444 static void dma_channel_rebalance(void)
446 struct dma_chan *chan;
447 struct dma_device *device;
448 int cpu;
449 int cap;
451 /* undo the last distribution */
452 for_each_dma_cap_mask(cap, dma_cap_mask_all)
453 for_each_possible_cpu(cpu)
454 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
456 list_for_each_entry(device, &dma_device_list, global_node) {
457 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
458 continue;
459 list_for_each_entry(chan, &device->channels, device_node)
460 chan->table_count = 0;
463 /* don't populate the channel_table if no clients are available */
464 if (!dmaengine_ref_count)
465 return;
467 /* redistribute available channels */
468 for_each_dma_cap_mask(cap, dma_cap_mask_all)
469 for_each_online_cpu(cpu) {
470 chan = min_chan(cap, cpu);
471 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
475 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
476 struct dma_device *dev,
477 dma_filter_fn fn, void *fn_param)
479 struct dma_chan *chan;
481 if (!__dma_device_satisfies_mask(dev, mask)) {
482 pr_debug("%s: wrong capabilities\n", __func__);
483 return NULL;
485 /* devices with multiple channels need special handling as we need to
486 * ensure that all channels are either private or public.
488 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
489 list_for_each_entry(chan, &dev->channels, device_node) {
490 /* some channels are already publicly allocated */
491 if (chan->client_count)
492 return NULL;
495 list_for_each_entry(chan, &dev->channels, device_node) {
496 if (chan->client_count) {
497 pr_debug("%s: %s busy\n",
498 __func__, dma_chan_name(chan));
499 continue;
501 if (fn && !fn(chan, fn_param)) {
502 pr_debug("%s: %s filter said false\n",
503 __func__, dma_chan_name(chan));
504 continue;
506 return chan;
509 return NULL;
513 * dma_request_slave_channel - try to get specific channel exclusively
514 * @chan: target channel
516 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
518 int err = -EBUSY;
520 /* lock against __dma_request_channel */
521 mutex_lock(&dma_list_mutex);
523 if (chan->client_count == 0) {
524 err = dma_chan_get(chan);
525 if (err)
526 pr_debug("%s: failed to get %s: (%d)\n",
527 __func__, dma_chan_name(chan), err);
528 } else
529 chan = NULL;
531 mutex_unlock(&dma_list_mutex);
534 return chan;
536 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
539 * __dma_request_channel - try to allocate an exclusive channel
540 * @mask: capabilities that the channel must satisfy
541 * @fn: optional callback to disposition available channels
542 * @fn_param: opaque parameter to pass to dma_filter_fn
544 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
545 dma_filter_fn fn, void *fn_param)
547 struct dma_device *device, *_d;
548 struct dma_chan *chan = NULL;
549 int err;
551 /* Find a channel */
552 mutex_lock(&dma_list_mutex);
553 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
554 chan = private_candidate(mask, device, fn, fn_param);
555 if (chan) {
556 /* Found a suitable channel, try to grab, prep, and
557 * return it. We first set DMA_PRIVATE to disable
558 * balance_ref_count as this channel will not be
559 * published in the general-purpose allocator
561 dma_cap_set(DMA_PRIVATE, device->cap_mask);
562 device->privatecnt++;
563 err = dma_chan_get(chan);
565 if (err == -ENODEV) {
566 pr_debug("%s: %s module removed\n",
567 __func__, dma_chan_name(chan));
568 list_del_rcu(&device->global_node);
569 } else if (err)
570 pr_debug("%s: failed to get %s: (%d)\n",
571 __func__, dma_chan_name(chan), err);
572 else
573 break;
574 if (--device->privatecnt == 0)
575 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
576 chan = NULL;
579 mutex_unlock(&dma_list_mutex);
581 pr_debug("%s: %s (%s)\n",
582 __func__,
583 chan ? "success" : "fail",
584 chan ? dma_chan_name(chan) : NULL);
586 return chan;
588 EXPORT_SYMBOL_GPL(__dma_request_channel);
591 * dma_request_slave_channel - try to allocate an exclusive slave channel
592 * @dev: pointer to client device structure
593 * @name: slave channel name
595 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name)
597 /* If device-tree is present get slave info from here */
598 if (dev->of_node)
599 return of_dma_request_slave_channel(dev->of_node, name);
601 /* If device was enumerated by ACPI get slave info from here */
602 if (ACPI_HANDLE(dev))
603 return acpi_dma_request_slave_chan_by_name(dev, name);
605 return NULL;
607 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
609 void dma_release_channel(struct dma_chan *chan)
611 mutex_lock(&dma_list_mutex);
612 WARN_ONCE(chan->client_count != 1,
613 "chan reference count %d != 1\n", chan->client_count);
614 dma_chan_put(chan);
615 /* drop PRIVATE cap enabled by __dma_request_channel() */
616 if (--chan->device->privatecnt == 0)
617 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
618 mutex_unlock(&dma_list_mutex);
620 EXPORT_SYMBOL_GPL(dma_release_channel);
623 * dmaengine_get - register interest in dma_channels
625 void dmaengine_get(void)
627 struct dma_device *device, *_d;
628 struct dma_chan *chan;
629 int err;
631 mutex_lock(&dma_list_mutex);
632 dmaengine_ref_count++;
634 /* try to grab channels */
635 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
636 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
637 continue;
638 list_for_each_entry(chan, &device->channels, device_node) {
639 err = dma_chan_get(chan);
640 if (err == -ENODEV) {
641 /* module removed before we could use it */
642 list_del_rcu(&device->global_node);
643 break;
644 } else if (err)
645 pr_debug("%s: failed to get %s: (%d)\n",
646 __func__, dma_chan_name(chan), err);
650 /* if this is the first reference and there were channels
651 * waiting we need to rebalance to get those channels
652 * incorporated into the channel table
654 if (dmaengine_ref_count == 1)
655 dma_channel_rebalance();
656 mutex_unlock(&dma_list_mutex);
658 EXPORT_SYMBOL(dmaengine_get);
661 * dmaengine_put - let dma drivers be removed when ref_count == 0
663 void dmaengine_put(void)
665 struct dma_device *device;
666 struct dma_chan *chan;
668 mutex_lock(&dma_list_mutex);
669 dmaengine_ref_count--;
670 BUG_ON(dmaengine_ref_count < 0);
671 /* drop channel references */
672 list_for_each_entry(device, &dma_device_list, global_node) {
673 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
674 continue;
675 list_for_each_entry(chan, &device->channels, device_node)
676 dma_chan_put(chan);
678 mutex_unlock(&dma_list_mutex);
680 EXPORT_SYMBOL(dmaengine_put);
682 static bool device_has_all_tx_types(struct dma_device *device)
684 /* A device that satisfies this test has channels that will never cause
685 * an async_tx channel switch event as all possible operation types can
686 * be handled.
688 #ifdef CONFIG_ASYNC_TX_DMA
689 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
690 return false;
691 #endif
693 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
694 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
695 return false;
696 #endif
698 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
699 if (!dma_has_cap(DMA_XOR, device->cap_mask))
700 return false;
702 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
703 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
704 return false;
705 #endif
706 #endif
708 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
709 if (!dma_has_cap(DMA_PQ, device->cap_mask))
710 return false;
712 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
713 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
714 return false;
715 #endif
716 #endif
718 return true;
721 static int get_dma_id(struct dma_device *device)
723 int rc;
725 mutex_lock(&dma_list_mutex);
727 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
728 if (rc >= 0)
729 device->dev_id = rc;
731 mutex_unlock(&dma_list_mutex);
732 return rc < 0 ? rc : 0;
736 * dma_async_device_register - registers DMA devices found
737 * @device: &dma_device
739 int dma_async_device_register(struct dma_device *device)
741 int chancnt = 0, rc;
742 struct dma_chan* chan;
743 atomic_t *idr_ref;
745 if (!device)
746 return -ENODEV;
748 /* validate device routines */
749 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
750 !device->device_prep_dma_memcpy);
751 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
752 !device->device_prep_dma_xor);
753 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
754 !device->device_prep_dma_xor_val);
755 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
756 !device->device_prep_dma_pq);
757 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
758 !device->device_prep_dma_pq_val);
759 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
760 !device->device_prep_dma_interrupt);
761 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
762 !device->device_prep_dma_sg);
763 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
764 !device->device_prep_dma_cyclic);
765 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
766 !device->device_control);
767 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
768 !device->device_prep_interleaved_dma);
770 BUG_ON(!device->device_alloc_chan_resources);
771 BUG_ON(!device->device_free_chan_resources);
772 BUG_ON(!device->device_tx_status);
773 BUG_ON(!device->device_issue_pending);
774 BUG_ON(!device->dev);
776 /* note: this only matters in the
777 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
779 if (device_has_all_tx_types(device))
780 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
782 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
783 if (!idr_ref)
784 return -ENOMEM;
785 rc = get_dma_id(device);
786 if (rc != 0) {
787 kfree(idr_ref);
788 return rc;
791 atomic_set(idr_ref, 0);
793 /* represent channels in sysfs. Probably want devs too */
794 list_for_each_entry(chan, &device->channels, device_node) {
795 rc = -ENOMEM;
796 chan->local = alloc_percpu(typeof(*chan->local));
797 if (chan->local == NULL)
798 goto err_out;
799 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
800 if (chan->dev == NULL) {
801 free_percpu(chan->local);
802 chan->local = NULL;
803 goto err_out;
806 chan->chan_id = chancnt++;
807 chan->dev->device.class = &dma_devclass;
808 chan->dev->device.parent = device->dev;
809 chan->dev->chan = chan;
810 chan->dev->idr_ref = idr_ref;
811 chan->dev->dev_id = device->dev_id;
812 atomic_inc(idr_ref);
813 dev_set_name(&chan->dev->device, "dma%dchan%d",
814 device->dev_id, chan->chan_id);
816 rc = device_register(&chan->dev->device);
817 if (rc) {
818 free_percpu(chan->local);
819 chan->local = NULL;
820 kfree(chan->dev);
821 atomic_dec(idr_ref);
822 goto err_out;
824 chan->client_count = 0;
826 device->chancnt = chancnt;
828 mutex_lock(&dma_list_mutex);
829 /* take references on public channels */
830 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
831 list_for_each_entry(chan, &device->channels, device_node) {
832 /* if clients are already waiting for channels we need
833 * to take references on their behalf
835 if (dma_chan_get(chan) == -ENODEV) {
836 /* note we can only get here for the first
837 * channel as the remaining channels are
838 * guaranteed to get a reference
840 rc = -ENODEV;
841 mutex_unlock(&dma_list_mutex);
842 goto err_out;
845 list_add_tail_rcu(&device->global_node, &dma_device_list);
846 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
847 device->privatecnt++; /* Always private */
848 dma_channel_rebalance();
849 mutex_unlock(&dma_list_mutex);
851 return 0;
853 err_out:
854 /* if we never registered a channel just release the idr */
855 if (atomic_read(idr_ref) == 0) {
856 mutex_lock(&dma_list_mutex);
857 idr_remove(&dma_idr, device->dev_id);
858 mutex_unlock(&dma_list_mutex);
859 kfree(idr_ref);
860 return rc;
863 list_for_each_entry(chan, &device->channels, device_node) {
864 if (chan->local == NULL)
865 continue;
866 mutex_lock(&dma_list_mutex);
867 chan->dev->chan = NULL;
868 mutex_unlock(&dma_list_mutex);
869 device_unregister(&chan->dev->device);
870 free_percpu(chan->local);
872 return rc;
874 EXPORT_SYMBOL(dma_async_device_register);
877 * dma_async_device_unregister - unregister a DMA device
878 * @device: &dma_device
880 * This routine is called by dma driver exit routines, dmaengine holds module
881 * references to prevent it being called while channels are in use.
883 void dma_async_device_unregister(struct dma_device *device)
885 struct dma_chan *chan;
887 mutex_lock(&dma_list_mutex);
888 list_del_rcu(&device->global_node);
889 dma_channel_rebalance();
890 mutex_unlock(&dma_list_mutex);
892 list_for_each_entry(chan, &device->channels, device_node) {
893 WARN_ONCE(chan->client_count,
894 "%s called while %d clients hold a reference\n",
895 __func__, chan->client_count);
896 mutex_lock(&dma_list_mutex);
897 chan->dev->chan = NULL;
898 mutex_unlock(&dma_list_mutex);
899 device_unregister(&chan->dev->device);
900 free_percpu(chan->local);
903 EXPORT_SYMBOL(dma_async_device_unregister);
905 struct dmaengine_unmap_pool {
906 struct kmem_cache *cache;
907 const char *name;
908 mempool_t *pool;
909 size_t size;
912 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
913 static struct dmaengine_unmap_pool unmap_pool[] = {
914 __UNMAP_POOL(2),
915 #if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
916 __UNMAP_POOL(16),
917 __UNMAP_POOL(128),
918 __UNMAP_POOL(256),
919 #endif
922 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
924 int order = get_count_order(nr);
926 switch (order) {
927 case 0 ... 1:
928 return &unmap_pool[0];
929 case 2 ... 4:
930 return &unmap_pool[1];
931 case 5 ... 7:
932 return &unmap_pool[2];
933 case 8:
934 return &unmap_pool[3];
935 default:
936 BUG();
937 return NULL;
941 static void dmaengine_unmap(struct kref *kref)
943 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
944 struct device *dev = unmap->dev;
945 int cnt, i;
947 cnt = unmap->to_cnt;
948 for (i = 0; i < cnt; i++)
949 dma_unmap_page(dev, unmap->addr[i], unmap->len,
950 DMA_TO_DEVICE);
951 cnt += unmap->from_cnt;
952 for (; i < cnt; i++)
953 dma_unmap_page(dev, unmap->addr[i], unmap->len,
954 DMA_FROM_DEVICE);
955 cnt += unmap->bidi_cnt;
956 for (; i < cnt; i++) {
957 if (unmap->addr[i] == 0)
958 continue;
959 dma_unmap_page(dev, unmap->addr[i], unmap->len,
960 DMA_BIDIRECTIONAL);
962 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
965 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
967 if (unmap)
968 kref_put(&unmap->kref, dmaengine_unmap);
970 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
972 static void dmaengine_destroy_unmap_pool(void)
974 int i;
976 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
977 struct dmaengine_unmap_pool *p = &unmap_pool[i];
979 if (p->pool)
980 mempool_destroy(p->pool);
981 p->pool = NULL;
982 if (p->cache)
983 kmem_cache_destroy(p->cache);
984 p->cache = NULL;
988 static int __init dmaengine_init_unmap_pool(void)
990 int i;
992 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
993 struct dmaengine_unmap_pool *p = &unmap_pool[i];
994 size_t size;
996 size = sizeof(struct dmaengine_unmap_data) +
997 sizeof(dma_addr_t) * p->size;
999 p->cache = kmem_cache_create(p->name, size, 0,
1000 SLAB_HWCACHE_ALIGN, NULL);
1001 if (!p->cache)
1002 break;
1003 p->pool = mempool_create_slab_pool(1, p->cache);
1004 if (!p->pool)
1005 break;
1008 if (i == ARRAY_SIZE(unmap_pool))
1009 return 0;
1011 dmaengine_destroy_unmap_pool();
1012 return -ENOMEM;
1015 struct dmaengine_unmap_data *
1016 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1018 struct dmaengine_unmap_data *unmap;
1020 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1021 if (!unmap)
1022 return NULL;
1024 memset(unmap, 0, sizeof(*unmap));
1025 kref_init(&unmap->kref);
1026 unmap->dev = dev;
1028 return unmap;
1030 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1033 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
1034 * @chan: DMA channel to offload copy to
1035 * @dest_pg: destination page
1036 * @dest_off: offset in page to copy to
1037 * @src_pg: source page
1038 * @src_off: offset in page to copy from
1039 * @len: length
1041 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
1042 * address according to the DMA mapping API rules for streaming mappings.
1043 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
1044 * (kernel memory or locked user space pages).
1046 dma_cookie_t
1047 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1048 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
1049 size_t len)
1051 struct dma_device *dev = chan->device;
1052 struct dma_async_tx_descriptor *tx;
1053 struct dmaengine_unmap_data *unmap;
1054 dma_cookie_t cookie;
1055 unsigned long flags;
1057 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
1058 if (!unmap)
1059 return -ENOMEM;
1061 unmap->to_cnt = 1;
1062 unmap->from_cnt = 1;
1063 unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
1064 DMA_TO_DEVICE);
1065 unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
1066 DMA_FROM_DEVICE);
1067 unmap->len = len;
1068 flags = DMA_CTRL_ACK;
1069 tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
1070 len, flags);
1072 if (!tx) {
1073 dmaengine_unmap_put(unmap);
1074 return -ENOMEM;
1077 dma_set_unmap(tx, unmap);
1078 cookie = tx->tx_submit(tx);
1079 dmaengine_unmap_put(unmap);
1081 preempt_disable();
1082 __this_cpu_add(chan->local->bytes_transferred, len);
1083 __this_cpu_inc(chan->local->memcpy_count);
1084 preempt_enable();
1086 return cookie;
1088 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1091 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
1092 * @chan: DMA channel to offload copy to
1093 * @dest: destination address (virtual)
1094 * @src: source address (virtual)
1095 * @len: length
1097 * Both @dest and @src must be mappable to a bus address according to the
1098 * DMA mapping API rules for streaming mappings.
1099 * Both @dest and @src must stay memory resident (kernel memory or locked
1100 * user space pages).
1102 dma_cookie_t
1103 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
1104 void *src, size_t len)
1106 return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
1107 (unsigned long) dest & ~PAGE_MASK,
1108 virt_to_page(src),
1109 (unsigned long) src & ~PAGE_MASK, len);
1111 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
1114 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
1115 * @chan: DMA channel to offload copy to
1116 * @page: destination page
1117 * @offset: offset in page to copy to
1118 * @kdata: source address (virtual)
1119 * @len: length
1121 * Both @page/@offset and @kdata must be mappable to a bus address according
1122 * to the DMA mapping API rules for streaming mappings.
1123 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
1124 * locked user space pages)
1126 dma_cookie_t
1127 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
1128 unsigned int offset, void *kdata, size_t len)
1130 return dma_async_memcpy_pg_to_pg(chan, page, offset,
1131 virt_to_page(kdata),
1132 (unsigned long) kdata & ~PAGE_MASK, len);
1134 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
1136 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1137 struct dma_chan *chan)
1139 tx->chan = chan;
1140 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1141 spin_lock_init(&tx->lock);
1142 #endif
1144 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1146 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1147 * @tx: in-flight transaction to wait on
1149 enum dma_status
1150 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1152 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1154 if (!tx)
1155 return DMA_COMPLETE;
1157 while (tx->cookie == -EBUSY) {
1158 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1159 pr_err("%s timeout waiting for descriptor submission\n",
1160 __func__);
1161 return DMA_ERROR;
1163 cpu_relax();
1165 return dma_sync_wait(tx->chan, tx->cookie);
1167 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1169 /* dma_run_dependencies - helper routine for dma drivers to process
1170 * (start) dependent operations on their target channel
1171 * @tx: transaction with dependencies
1173 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1175 struct dma_async_tx_descriptor *dep = txd_next(tx);
1176 struct dma_async_tx_descriptor *dep_next;
1177 struct dma_chan *chan;
1179 if (!dep)
1180 return;
1182 /* we'll submit tx->next now, so clear the link */
1183 txd_clear_next(tx);
1184 chan = dep->chan;
1186 /* keep submitting up until a channel switch is detected
1187 * in that case we will be called again as a result of
1188 * processing the interrupt from async_tx_channel_switch
1190 for (; dep; dep = dep_next) {
1191 txd_lock(dep);
1192 txd_clear_parent(dep);
1193 dep_next = txd_next(dep);
1194 if (dep_next && dep_next->chan == chan)
1195 txd_clear_next(dep); /* ->next will be submitted */
1196 else
1197 dep_next = NULL; /* submit current dep and terminate */
1198 txd_unlock(dep);
1200 dep->tx_submit(dep);
1203 chan->device->device_issue_pending(chan);
1205 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1207 static int __init dma_bus_init(void)
1209 int err = dmaengine_init_unmap_pool();
1211 if (err)
1212 return err;
1213 return class_register(&dma_devclass);
1215 arch_initcall(dma_bus_init);