2 * DMA Engine test module
4 * Copyright (C) 2007 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/init.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/wait.h>
19 #include <linux/amba/xilinx_dma.h>
21 static unsigned int test_buf_size = 64;
22 module_param(test_buf_size, uint, S_IRUGO);
23 MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
25 static char test_channel[20];
26 module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
27 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
29 static char test_device[20];
30 module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
31 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
33 static unsigned int threads_per_chan = 1;
34 module_param(threads_per_chan, uint, S_IRUGO);
35 MODULE_PARM_DESC(threads_per_chan,
36 "Number of threads to start per channel (default: 1)");
38 static unsigned int max_channels;
39 module_param(max_channels, uint, S_IRUGO);
40 MODULE_PARM_DESC(max_channels,
41 "Maximum number of channels to use (default: all)");
43 static unsigned int iterations;
44 module_param(iterations, uint, S_IRUGO);
45 MODULE_PARM_DESC(iterations,
46 "Iterations before stopping test (default: infinite)");
48 static unsigned int xor_sources = 3;
49 module_param(xor_sources, uint, S_IRUGO);
50 MODULE_PARM_DESC(xor_sources,
51 "Number of xor source buffers (default: 3)");
53 static unsigned int pq_sources = 3;
54 module_param(pq_sources, uint, S_IRUGO);
55 MODULE_PARM_DESC(pq_sources,
56 "Number of p+q source buffers (default: 3)");
59 * Initialization patterns. All bytes in the source buffer has bit 7
60 * set, all bytes in the destination buffer has bit 7 cleared.
62 * Bit 6 is set for all bytes which are to be copied by the DMA
63 * engine. Bit 5 is set for all bytes which are to be overwritten by
66 * The remaining bits are the inverse of a counter which increments by
67 * one for each byte address.
69 #define PATTERN_SRC 0x80
70 #define PATTERN_DST 0x00
71 #define PATTERN_COPY 0x40
72 #define PATTERN_OVERWRITE 0x20
73 #define PATTERN_COUNT_MASK 0x1f
75 struct dmatest_thread {
76 struct list_head node;
77 struct task_struct *task;
78 struct dma_chan *chan;
81 enum dma_transaction_type type;
84 struct dmatest_slave_thread {
85 struct list_head node;
86 struct task_struct *task;
87 struct dma_chan *tx_chan;
88 struct dma_chan *rx_chan;
91 enum dma_transaction_type type;
95 struct list_head node;
96 struct dma_chan *chan;
97 struct list_head threads;
101 * These are protected by dma_list_mutex since they're only used by
102 * the DMA filter function callback
104 static LIST_HEAD(dmatest_channels);
105 static unsigned int nr_channels;
107 static bool dmatest_match_channel(struct dma_chan *chan)
109 if (test_channel[0] == '\0')
111 return strcmp(dma_chan_name(chan), test_channel) == 0;
114 static bool dmatest_match_device(struct dma_device *device)
116 if (test_device[0] == '\0')
118 return strcmp(dev_name(device->dev), test_device) == 0;
121 static unsigned long dmatest_random(void)
125 get_random_bytes(&buf, sizeof(buf));
129 static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
134 for (; (buf = *bufs); bufs++) {
135 for (i = 0; i < start; i++)
136 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
137 for ( ; i < start + len; i++)
138 buf[i] = PATTERN_SRC | PATTERN_COPY
139 | (~i & PATTERN_COUNT_MASK);
140 for ( ; i < test_buf_size; i++)
141 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
146 static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
151 for (; (buf = *bufs); bufs++) {
152 for (i = 0; i < start; i++)
153 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
154 for ( ; i < start + len; i++)
155 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
156 | (~i & PATTERN_COUNT_MASK);
157 for ( ; i < test_buf_size; i++)
158 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
162 static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
163 unsigned int counter, bool is_srcbuf)
165 u8 diff = actual ^ pattern;
166 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
167 const char *thread_name = current->comm;
170 pr_warning("%s: srcbuf[0x%x] overwritten!"
171 " Expected %02x, got %02x\n",
172 thread_name, index, expected, actual);
173 else if ((pattern & PATTERN_COPY)
174 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
175 pr_warning("%s: dstbuf[0x%x] not copied!"
176 " Expected %02x, got %02x\n",
177 thread_name, index, expected, actual);
178 else if (diff & PATTERN_SRC)
179 pr_warning("%s: dstbuf[0x%x] was copied!"
180 " Expected %02x, got %02x\n",
181 thread_name, index, expected, actual);
183 pr_warning("%s: dstbuf[0x%x] mismatch!"
184 " Expected %02x, got %02x\n",
185 thread_name, index, expected, actual);
188 static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
189 unsigned int end, unsigned int counter, u8 pattern,
193 unsigned int error_count = 0;
197 unsigned int counter_orig = counter;
199 for (; (buf = *bufs); bufs++) {
200 counter = counter_orig;
201 for (i = start; i < end; i++) {
203 expected = pattern | (~counter & PATTERN_COUNT_MASK);
204 if (actual != expected) {
205 if (error_count < 32)
206 dmatest_mismatch(actual, pattern, i,
214 if (error_count > 32)
215 pr_warning("%s: %u errors suppressed\n",
216 current->comm, error_count - 32);
221 static void dmatest_callback(void *completion)
223 complete(completion);
226 static void dmatest_slave_tx_callback(void *completion)
228 pr_debug("Got tx callbakc\n");
229 complete(completion);
232 static void dmatest_slave_rx_callback(void *completion)
234 pr_debug("Got rx callbakc\n");
235 complete(completion);
239 * This function repeatedly tests DMA transfers of various lengths and
240 * offsets for a given operation type until it is told to exit by
241 * kthread_stop(). There may be multiple threads running this function
242 * in parallel for a single channel, and there may be multiple channels
243 * being tested in parallel.
245 * Before each test, the source and destination buffer is initialized
246 * with a known pattern. This pattern is different depending on
247 * whether it's in an area which is supposed to be copied or
248 * overwritten, and different in the source and destination buffers.
249 * So if the DMA engine doesn't copy exactly what we tell it to copy,
252 static int dmatest_func(void *data)
254 struct dmatest_thread *thread = data;
255 struct dma_chan *chan;
256 const char *thread_name;
257 unsigned int src_off, dst_off, len;
258 unsigned int error_count;
259 unsigned int failed_tests = 0;
260 unsigned int total_tests = 0;
262 enum dma_status status;
263 enum dma_ctrl_flags flags;
264 u8 pq_coefs[pq_sources + 1];
270 thread_name = current->comm;
274 /* JZ: limit testing scope here */
279 if (thread->type == DMA_MEMCPY)
280 src_cnt = dst_cnt = 1;
281 else if (thread->type == DMA_XOR) {
282 src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
284 } else if (thread->type == DMA_PQ) {
285 src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
287 for (i = 0; i < src_cnt; i++)
292 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
295 for (i = 0; i < src_cnt; i++) {
296 thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
297 if (!thread->srcs[i])
300 thread->srcs[i] = NULL;
302 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
305 for (i = 0; i < dst_cnt; i++) {
306 thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
307 if (!thread->dsts[i])
310 thread->dsts[i] = NULL;
312 set_user_nice(current, 10);
314 flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT;
316 while (!kthread_should_stop()
317 && !(iterations && total_tests >= iterations)) {
318 struct dma_device *dev = chan->device;
319 struct dma_async_tx_descriptor *tx = NULL;
320 dma_addr_t dma_srcs[src_cnt];
321 dma_addr_t dma_dsts[dst_cnt];
322 struct completion cmp;
323 unsigned long tmo = msecs_to_jiffies(3000);
328 /* honor alignment restrictions */
329 if (thread->type == DMA_MEMCPY)
330 align = dev->copy_align;
331 else if (thread->type == DMA_XOR)
332 align = dev->xor_align;
333 else if (thread->type == DMA_PQ)
334 align = dev->pq_align;
336 if (1 << align > test_buf_size) {
337 pr_err("%u-byte buffer too small for %d-byte alignment\n",
338 test_buf_size, 1 << align);
342 len = dmatest_random() % test_buf_size + 1;
343 len = (len >> align) << align;
346 src_off = dmatest_random() % (test_buf_size - len + 1);
347 dst_off = dmatest_random() % (test_buf_size - len + 1);
349 src_off = (src_off >> align) << align;
350 dst_off = (dst_off >> align) << align;
352 dmatest_init_srcs(thread->srcs, src_off, len);
353 dmatest_init_dsts(thread->dsts, dst_off, len);
355 for (i = 0; i < src_cnt; i++) {
356 u8 *buf = thread->srcs[i] + src_off;
358 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
361 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
362 for (i = 0; i < dst_cnt; i++) {
364 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
370 if (thread->type == DMA_MEMCPY) {
372 printk(KERN_INFO "To call prep_dma_memcpy %x/%x/%x/%x/%x\n",chan,
373 dma_dsts[0] + dst_off,
377 tx = dev->device_prep_dma_memcpy(chan,
378 dma_dsts[0] + dst_off,
383 else if (thread->type == DMA_XOR)
384 tx = dev->device_prep_dma_xor(chan,
385 dma_dsts[0] + dst_off,
388 else if (thread->type == DMA_PQ) {
389 dma_addr_t dma_pq[dst_cnt];
391 for (i = 0; i < dst_cnt; i++)
392 dma_pq[i] = dma_dsts[i] + dst_off;
393 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
399 for (i = 0; i < src_cnt; i++)
400 dma_unmap_single(dev->dev, dma_srcs[i], len,
402 for (i = 0; i < dst_cnt; i++)
403 dma_unmap_single(dev->dev, dma_dsts[i],
406 pr_warning("%s: #%u: prep error with src_off=0x%x "
407 "dst_off=0x%x len=0x%x\n",
408 thread_name, total_tests - 1,
409 src_off, dst_off, len);
415 init_completion(&cmp);
416 tx->callback = dmatest_callback;
417 tx->callback_param = &cmp;
418 cookie = tx->tx_submit(tx);
420 //printk(KERN_INFO "cookie from submit %d\n", cookie);
422 if (dma_submit_error(cookie)) {
423 pr_warning("%s: #%u: submit error %d with src_off=0x%x "
424 "dst_off=0x%x len=0x%x\n",
425 thread_name, total_tests - 1, cookie,
426 src_off, dst_off, len);
431 dma_async_issue_pending(chan);
433 tmo = wait_for_completion_timeout(&cmp, tmo);
434 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
437 pr_warning("%s: #%u: test timed out\n",
438 thread_name, total_tests - 1);
441 } else if (status != DMA_SUCCESS) {
442 pr_warning("%s: #%u: got completion callback,"
443 " but status is \'%s\'\n",
444 thread_name, total_tests - 1,
445 status == DMA_ERROR ? "error" : "in progress");
450 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
451 for (i = 0; i < dst_cnt; i++)
452 dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
457 pr_debug("%s: verifying source buffer...\n", thread_name);
458 error_count += dmatest_verify(thread->srcs, 0, src_off,
459 0, PATTERN_SRC, true);
460 error_count += dmatest_verify(thread->srcs, src_off,
461 src_off + len, src_off,
462 PATTERN_SRC | PATTERN_COPY, true);
463 error_count += dmatest_verify(thread->srcs, src_off + len,
464 test_buf_size, src_off + len,
467 pr_debug("%s: verifying dest buffer...\n",
469 error_count += dmatest_verify(thread->dsts, 0, dst_off,
470 0, PATTERN_DST, false);
471 error_count += dmatest_verify(thread->dsts, dst_off,
472 dst_off + len, src_off,
473 PATTERN_SRC | PATTERN_COPY, false);
474 error_count += dmatest_verify(thread->dsts, dst_off + len,
475 test_buf_size, dst_off + len,
479 pr_warning("%s: #%u: %u errors with "
480 "src_off=0x%x dst_off=0x%x len=0x%x\n",
481 thread_name, total_tests - 1, error_count,
482 src_off, dst_off, len);
485 pr_debug("%s: #%u: No errors with "
486 "src_off=0x%x dst_off=0x%x len=0x%x\n",
487 thread_name, total_tests - 1,
488 src_off, dst_off, len);
493 for (i = 0; thread->dsts[i]; i++)
494 kfree(thread->dsts[i]);
498 for (i = 0; thread->srcs[i]; i++)
499 kfree(thread->srcs[i]);
503 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
504 thread_name, total_tests, failed_tests, ret);
507 while (!kthread_should_stop()) {
508 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
509 interruptible_sleep_on(&wait_dmatest_exit);
515 /* Function for slave transfers
516 * Each thread requires 2 channels, one for transmit, and one for receive
518 static int dmatest_slave_func(void *data)
520 struct dmatest_slave_thread *thread = data;
521 struct dma_chan *tx_chan;
522 struct dma_chan *rx_chan;
523 const char *thread_name;
525 unsigned int error_count;
526 unsigned int failed_tests = 0;
527 unsigned int total_tests = 0;
528 dma_cookie_t tx_cookie;
529 dma_cookie_t rx_cookie;
530 enum dma_status status;
531 enum dma_ctrl_flags flags;
539 //struct {int direction; int vsize; int hsize;int stride;int num_frms;int frm_dly; int park;int gen_lock;int master;int frm_cnt_en; int park_frm; int coalesc;int delay;} config;
540 struct xilinx_dma_config config;
541 thread_name = current->comm;
545 /* JZ: limit testing scope here */
547 test_buf_size = hsize * vsize;
550 tx_chan = thread->tx_chan;
551 rx_chan = thread->rx_chan;
553 thread->srcs = kcalloc(frm_cnt+1, sizeof(u8 *), GFP_KERNEL);
556 for (i = 0; i < frm_cnt; i++) {
557 thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
558 if (!thread->srcs[i])
561 thread->srcs[i] = NULL;
563 thread->dsts = kcalloc(frm_cnt+1, sizeof(u8 *), GFP_KERNEL);
566 for (i = 0; i < frm_cnt; i++) {
567 thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
568 if (!thread->dsts[i])
571 thread->dsts[i] = NULL;
573 set_user_nice(current, 10);
575 flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT;
577 while (!kthread_should_stop()
578 && !(iterations && total_tests >= iterations)) {
579 struct dma_device *tx_dev = tx_chan->device;
580 struct dma_device *rx_dev = rx_chan->device;
581 struct dma_async_tx_descriptor *txd = NULL;
582 struct dma_async_tx_descriptor *rxd = NULL;
583 dma_addr_t dma_srcs[frm_cnt];
584 dma_addr_t dma_dsts[frm_cnt];
585 struct completion rx_cmp;
586 struct completion tx_cmp;
587 unsigned long rx_tmo = msecs_to_jiffies(30000); /* RX takes longer */
588 unsigned long tx_tmo = msecs_to_jiffies(30000);
590 struct scatterlist tx_sg[frm_cnt];
591 struct scatterlist rx_sg[frm_cnt];
595 /* honor larger alignment restrictions */
596 align = tx_dev->copy_align;
597 if (rx_dev->copy_align > align) {
598 align = rx_dev->copy_align;
601 if (1 << align > test_buf_size) {
602 pr_err("%u-byte buffer too small for %d-byte alignment\n",
603 test_buf_size, 1 << align);
608 dmatest_init_srcs(thread->srcs, 0, len);
609 dmatest_init_dsts(thread->dsts, 0, len);
611 sg_init_table(tx_sg, frm_cnt);
612 sg_init_table(rx_sg, frm_cnt);
614 for (i = 0; i < frm_cnt; i++) {
615 u8 *buf = thread->srcs[i];
617 dma_srcs[i] = dma_map_single(tx_dev->dev, buf, len,
619 pr_debug("src buf %x dma %x\n", (unsigned int)buf, dma_srcs[i]);
620 sg_dma_address(&tx_sg[i]) = dma_srcs[i];
621 sg_dma_len(&tx_sg[i]) = len;
624 for (i = 0; i < frm_cnt; i++) {
626 dma_dsts[i] = dma_map_single(rx_dev->dev, thread->dsts[i],
629 pr_debug("dst %x dma %x\n", thread->dsts[i], dma_dsts[i]);
630 sg_dma_address(&rx_sg[i]) = dma_dsts[i];
631 sg_dma_len(&rx_sg[i]) = len;
634 /* Set up hardware configuration information */
635 config.direction = DMA_TO_DEVICE;
636 config.vsize = vsize;
637 config.hsize = hsize;
638 config.stride = hsize;
639 config.frm_cnt_en = 1;
640 config.coalesc = frm_cnt * 10;
642 /* The following is do-not-care, need to set to 0 */
648 config.disable_intr = 0;
649 tx_dev->device_control(tx_chan, DMA_SLAVE_CONFIG, (unsigned long)&config);
651 config.direction = DMA_FROM_DEVICE;
653 rx_dev->device_control(rx_chan, DMA_SLAVE_CONFIG, (unsigned long)&config);
655 rxd = rx_dev->device_prep_slave_sg(rx_chan, rx_sg, frm_cnt,
656 DMA_FROM_DEVICE, flags);
658 txd = tx_dev->device_prep_slave_sg(tx_chan, tx_sg, frm_cnt,
659 DMA_TO_DEVICE, flags);
662 for (i = 0; i < frm_cnt; i++)
663 dma_unmap_single(tx_dev->dev, dma_srcs[i], len,
665 for (i = 0; i < frm_cnt; i++)
666 dma_unmap_single(rx_dev->dev, dma_dsts[i],
669 pr_warning("%s: #%u: prep error with len=0x%x ",
670 thread_name, total_tests - 1, len);
676 init_completion(&rx_cmp);
677 rxd->callback = dmatest_slave_rx_callback;
678 rxd->callback_param = &rx_cmp;
679 rx_cookie = rxd->tx_submit(rxd);
681 init_completion(&tx_cmp);
682 txd->callback = dmatest_slave_tx_callback;
683 txd->callback_param = &tx_cmp;
684 tx_cookie = txd->tx_submit(txd);
686 //printk(KERN_INFO "cookie from submit rx %d tx %d\n", rx_cookie,
689 if (dma_submit_error(rx_cookie) || dma_submit_error(tx_cookie)) {
690 pr_warning("%s: #%u: submit error %d/%d with len=0x%x ",
691 thread_name, total_tests - 1, rx_cookie, tx_cookie, len);
696 dma_async_issue_pending(tx_chan);
697 dma_async_issue_pending(rx_chan);
699 tx_tmo = wait_for_completion_timeout(&tx_cmp, tx_tmo);
701 status = dma_async_is_tx_complete(tx_chan, tx_cookie, NULL, NULL);
704 pr_warning("%s: #%u: tx test timed out\n",
705 thread_name, total_tests - 1);
708 } else if (status != DMA_SUCCESS) {
709 pr_warning("%s: #%u: tx got completion callback,"
710 " but status is \'%s\'\n",
711 thread_name, total_tests - 1,
712 status == DMA_ERROR ? "error" : "in progress");
717 rx_tmo = wait_for_completion_timeout(&rx_cmp, rx_tmo);
718 status = dma_async_is_tx_complete(rx_chan, rx_cookie, NULL, NULL);
721 pr_warning("%s: #%u: rx test timed out\n",
722 thread_name, total_tests - 1);
725 } else if (status != DMA_SUCCESS) {
726 pr_warning("%s: #%u: rx got completion callback,"
727 " but status is \'%s\'\n",
728 thread_name, total_tests - 1,
729 status == DMA_ERROR ? "error" : "in progress");
734 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
735 for (i = 0; i < frm_cnt; i++)
736 dma_unmap_single(rx_dev->dev, dma_dsts[i], test_buf_size,
741 pr_debug("%s: verifying source buffer...\n", thread_name);
742 error_count += dmatest_verify(thread->srcs, 0, 0,
743 0, PATTERN_SRC, true);
744 error_count += dmatest_verify(thread->srcs, 0,
745 len, 0, PATTERN_SRC | PATTERN_COPY, true);
746 error_count += dmatest_verify(thread->srcs, len,
747 test_buf_size, len, PATTERN_SRC, true);
749 pr_debug("%s: verifying dest buffer...\n",
751 error_count += dmatest_verify(thread->dsts, 0, 0,
752 0, PATTERN_DST, false);
753 error_count += dmatest_verify(thread->dsts, 0,
754 len, 0, PATTERN_SRC | PATTERN_COPY, false);
755 error_count += dmatest_verify(thread->dsts, len,
756 test_buf_size, len, PATTERN_DST, false);
759 pr_warning("%s: #%u: %u errors with len=0x%x\n",
760 thread_name, total_tests - 1, error_count, len);
763 pr_debug("%s: #%u: No errors with len=0x%x\n",
764 thread_name, total_tests - 1, len);
769 for (i = 0; thread->dsts[i]; i++)
770 kfree(thread->dsts[i]);
774 for (i = 0; thread->srcs[i]; i++)
775 kfree(thread->srcs[i]);
779 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
780 thread_name, total_tests, failed_tests, ret);
783 while (!kthread_should_stop()) {
784 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
785 interruptible_sleep_on(&wait_dmatest_exit);
791 static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
793 struct dmatest_thread *thread;
794 struct dmatest_thread *_thread;
797 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
798 ret = kthread_stop(thread->task);
799 pr_debug("dmatest: thread %s exited with status %d\n",
800 thread->task->comm, ret);
801 list_del(&thread->node);
807 static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
809 struct dmatest_thread *thread;
810 struct dma_chan *chan = dtc->chan;
814 if (type == DMA_MEMCPY)
816 else if (type == DMA_XOR)
818 else if (type == DMA_PQ)
823 for (i = 0; i < threads_per_chan; i++) {
824 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
826 pr_warning("dmatest: No memory for %s-%s%u\n",
827 dma_chan_name(chan), op, i);
831 thread->chan = dtc->chan;
834 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
835 dma_chan_name(chan), op, i);
836 if (IS_ERR(thread->task)) {
837 pr_warning("dmatest: Failed to run thread %s-%s%u\n",
838 dma_chan_name(chan), op, i);
843 /* srcbuf and dstbuf are allocated by the thread itself */
845 list_add_tail(&thread->node, &dtc->threads);
851 static int dmatest_add_slave_threads(struct dmatest_chan *tx_dtc, struct dmatest_chan *rx_dtc)
853 struct dmatest_slave_thread *thread;
854 struct dma_chan *tx_chan = tx_dtc->chan;
855 struct dma_chan *rx_chan = rx_dtc->chan;
857 thread = kzalloc(sizeof(struct dmatest_slave_thread), GFP_KERNEL);
859 pr_warning("dmatest: No memory for slave thread %s-%s\n",
860 dma_chan_name(tx_chan), dma_chan_name(rx_chan));
864 thread->tx_chan = tx_chan;
865 thread->rx_chan = rx_chan;
866 thread->type = (enum dma_transaction_type)DMA_SLAVE;
868 thread->task = kthread_run(dmatest_slave_func, thread, "%s-%s",
869 dma_chan_name(tx_chan), dma_chan_name(rx_chan));
870 if (IS_ERR(thread->task)) {
871 pr_warning("dmatest: Failed to run thread %s-%s\n",
872 dma_chan_name(tx_chan), dma_chan_name(rx_chan));
876 /* srcbuf and dstbuf are allocated by the thread itself */
878 list_add_tail(&thread->node, &tx_dtc->threads);
880 /* Added one thread with 2 channels */
884 static int dmatest_add_channel(struct dma_chan *chan)
886 struct dmatest_chan *dtc;
887 struct dma_device *dma_dev = chan->device;
888 unsigned int thread_count = 0;
891 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
893 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
898 INIT_LIST_HEAD(&dtc->threads);
900 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
901 cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
902 thread_count += cnt > 0 ? cnt : 0;
904 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
905 cnt = dmatest_add_threads(dtc, DMA_XOR);
906 thread_count += cnt > 0 ? cnt : 0;
908 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
909 cnt = dmatest_add_threads(dtc, DMA_PQ);
910 thread_count += cnt > 0 ?: 0;
913 pr_info("dmatest: Started %u threads using %s\n",
914 thread_count, dma_chan_name(chan));
916 list_add_tail(&dtc->node, &dmatest_channels);
922 static int dmatest_add_slave_channels(struct dma_chan *tx_chan, struct dma_chan *rx_chan)
924 struct dmatest_chan *tx_dtc;
925 struct dmatest_chan *rx_dtc;
926 unsigned int thread_count = 0;
928 tx_dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
930 pr_warning("dmatest: No memory for tx %s\n", dma_chan_name(tx_chan));
934 rx_dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
936 pr_warning("dmatest: No memory for rx %s\n", dma_chan_name(rx_chan));
940 tx_dtc->chan = tx_chan;
941 rx_dtc->chan = rx_chan;
942 INIT_LIST_HEAD(&tx_dtc->threads);
943 INIT_LIST_HEAD(&rx_dtc->threads);
945 dmatest_add_slave_threads(tx_dtc, rx_dtc);
948 pr_info("dmatest: Started %u threads using %s %s\n",
949 thread_count, dma_chan_name(tx_chan), dma_chan_name(rx_chan));
951 list_add_tail(&tx_dtc->node, &dmatest_channels);
952 list_add_tail(&rx_dtc->node, &dmatest_channels);
958 static bool filter(struct dma_chan *chan, void *param)
960 if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
966 static bool xdma_filter(struct dma_chan *chan, void *param)
968 if (*((int *)chan->private) == *(int *)param) {
975 static int __init dmatest_init(void)
978 struct dma_chan *chan;
982 dma_cap_set(DMA_MEMCPY, mask);
984 chan = dma_request_channel(mask, filter, NULL);
987 err = dmatest_add_channel(chan);
989 dma_release_channel(chan);
990 break; /* add_channel failed, punt */
993 break; /* no more channels available */
994 if (max_channels && nr_channels >= max_channels)
995 break; /* we have all we need */
1000 enum dma_data_direction direction;
1002 struct dma_chan *rx_chan;
1005 dma_cap_set(DMA_SLAVE | DMA_PRIVATE, mask);
1007 direction = DMA_TO_DEVICE;
1008 match = (direction & 0xFF) | XILINX_DMA_IP_VDMA;
1009 pr_info("match is %x\n", match);
1011 chan = dma_request_channel(mask, xdma_filter, (void *)&match);
1014 pr_info("Found tx device\n");
1016 pr_info("Did not find tx device\n");
1019 direction = DMA_FROM_DEVICE;
1020 match = (direction & 0xFF) | XILINX_DMA_IP_VDMA;
1021 rx_chan = dma_request_channel(mask, xdma_filter, &match);
1024 pr_info("Found rx device\n");
1026 pr_info("Did not find rx device\n");
1029 if(chan && rx_chan) {
1030 err = dmatest_add_slave_channels(chan, rx_chan);
1032 dma_release_channel(chan);
1033 dma_release_channel(rx_chan);
1040 /* when compiled-in wait for drivers to load first */
1041 late_initcall(dmatest_init);
1043 static void __exit dmatest_exit(void)
1045 struct dmatest_chan *dtc, *_dtc;
1046 struct dma_chan *chan;
1048 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
1049 list_del(&dtc->node);
1051 dmatest_cleanup_channel(dtc);
1052 pr_debug("dmatest: dropped channel %s\n",
1053 dma_chan_name(chan));
1054 dma_release_channel(chan);
1057 module_exit(dmatest_exit);
1059 MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
1060 MODULE_LICENSE("GPL v2");