2 * DMA Engine test module
4 * Copyright (C) 2007 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/freezer.h>
14 #include <linux/init.h>
15 #include <linux/kthread.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/random.h>
19 #include <linux/slab.h>
20 #include <linux/wait.h>
22 static unsigned int test_buf_size
= 16384;
23 module_param(test_buf_size
, uint
, S_IRUGO
);
24 MODULE_PARM_DESC(test_buf_size
, "Size of the memcpy test buffer");
26 static char test_channel
[20];
27 module_param_string(channel
, test_channel
, sizeof(test_channel
), S_IRUGO
);
28 MODULE_PARM_DESC(channel
, "Bus ID of the channel to test (default: any)");
30 static char test_device
[20];
31 module_param_string(device
, test_device
, sizeof(test_device
), S_IRUGO
);
32 MODULE_PARM_DESC(device
, "Bus ID of the DMA Engine to test (default: any)");
34 static unsigned int threads_per_chan
= 1;
35 module_param(threads_per_chan
, uint
, S_IRUGO
);
36 MODULE_PARM_DESC(threads_per_chan
,
37 "Number of threads to start per channel (default: 1)");
39 static unsigned int max_channels
;
40 module_param(max_channels
, uint
, S_IRUGO
);
41 MODULE_PARM_DESC(max_channels
,
42 "Maximum number of channels to use (default: all)");
44 static unsigned int iterations
;
45 module_param(iterations
, uint
, S_IRUGO
);
46 MODULE_PARM_DESC(iterations
,
47 "Iterations before stopping test (default: infinite)");
49 static unsigned int xor_sources
= 3;
50 module_param(xor_sources
, uint
, S_IRUGO
);
51 MODULE_PARM_DESC(xor_sources
,
52 "Number of xor source buffers (default: 3)");
54 static unsigned int pq_sources
= 3;
55 module_param(pq_sources
, uint
, S_IRUGO
);
56 MODULE_PARM_DESC(pq_sources
,
57 "Number of p+q source buffers (default: 3)");
59 static int timeout
= 3000;
60 module_param(timeout
, uint
, S_IRUGO
);
61 MODULE_PARM_DESC(timeout
, "Transfer Timeout in msec (default: 3000), "
62 "Pass -1 for infinite timeout");
65 * Initialization patterns. All bytes in the source buffer has bit 7
66 * set, all bytes in the destination buffer has bit 7 cleared.
68 * Bit 6 is set for all bytes which are to be copied by the DMA
69 * engine. Bit 5 is set for all bytes which are to be overwritten by
72 * The remaining bits are the inverse of a counter which increments by
73 * one for each byte address.
75 #define PATTERN_SRC 0x80
76 #define PATTERN_DST 0x00
77 #define PATTERN_COPY 0x40
78 #define PATTERN_OVERWRITE 0x20
79 #define PATTERN_COUNT_MASK 0x1f
81 struct dmatest_thread
{
82 struct list_head node
;
83 struct task_struct
*task
;
84 struct dma_chan
*chan
;
87 enum dma_transaction_type type
;
91 struct list_head node
;
92 struct dma_chan
*chan
;
93 struct list_head threads
;
97 * These are protected by dma_list_mutex since they're only used by
98 * the DMA filter function callback
100 static LIST_HEAD(dmatest_channels
);
101 static unsigned int nr_channels
;
103 static bool dmatest_match_channel(struct dma_chan
*chan
)
105 if (test_channel
[0] == '\0')
107 return strcmp(dma_chan_name(chan
), test_channel
) == 0;
110 static bool dmatest_match_device(struct dma_device
*device
)
112 if (test_device
[0] == '\0')
114 return strcmp(dev_name(device
->dev
), test_device
) == 0;
117 static unsigned long dmatest_random(void)
121 get_random_bytes(&buf
, sizeof(buf
));
125 static void dmatest_init_srcs(u8
**bufs
, unsigned int start
, unsigned int len
)
130 for (; (buf
= *bufs
); bufs
++) {
131 for (i
= 0; i
< start
; i
++)
132 buf
[i
] = PATTERN_SRC
| (~i
& PATTERN_COUNT_MASK
);
133 for ( ; i
< start
+ len
; i
++)
134 buf
[i
] = PATTERN_SRC
| PATTERN_COPY
135 | (~i
& PATTERN_COUNT_MASK
);
136 for ( ; i
< test_buf_size
; i
++)
137 buf
[i
] = PATTERN_SRC
| (~i
& PATTERN_COUNT_MASK
);
142 static void dmatest_init_dsts(u8
**bufs
, unsigned int start
, unsigned int len
)
147 for (; (buf
= *bufs
); bufs
++) {
148 for (i
= 0; i
< start
; i
++)
149 buf
[i
] = PATTERN_DST
| (~i
& PATTERN_COUNT_MASK
);
150 for ( ; i
< start
+ len
; i
++)
151 buf
[i
] = PATTERN_DST
| PATTERN_OVERWRITE
152 | (~i
& PATTERN_COUNT_MASK
);
153 for ( ; i
< test_buf_size
; i
++)
154 buf
[i
] = PATTERN_DST
| (~i
& PATTERN_COUNT_MASK
);
158 static void dmatest_mismatch(u8 actual
, u8 pattern
, unsigned int index
,
159 unsigned int counter
, bool is_srcbuf
)
161 u8 diff
= actual
^ pattern
;
162 u8 expected
= pattern
| (~counter
& PATTERN_COUNT_MASK
);
163 const char *thread_name
= current
->comm
;
166 pr_warning("%s: srcbuf[0x%x] overwritten!"
167 " Expected %02x, got %02x\n",
168 thread_name
, index
, expected
, actual
);
169 else if ((pattern
& PATTERN_COPY
)
170 && (diff
& (PATTERN_COPY
| PATTERN_OVERWRITE
)))
171 pr_warning("%s: dstbuf[0x%x] not copied!"
172 " Expected %02x, got %02x\n",
173 thread_name
, index
, expected
, actual
);
174 else if (diff
& PATTERN_SRC
)
175 pr_warning("%s: dstbuf[0x%x] was copied!"
176 " Expected %02x, got %02x\n",
177 thread_name
, index
, expected
, actual
);
179 pr_warning("%s: dstbuf[0x%x] mismatch!"
180 " Expected %02x, got %02x\n",
181 thread_name
, index
, expected
, actual
);
184 static unsigned int dmatest_verify(u8
**bufs
, unsigned int start
,
185 unsigned int end
, unsigned int counter
, u8 pattern
,
189 unsigned int error_count
= 0;
193 unsigned int counter_orig
= counter
;
195 for (; (buf
= *bufs
); bufs
++) {
196 counter
= counter_orig
;
197 for (i
= start
; i
< end
; i
++) {
199 expected
= pattern
| (~counter
& PATTERN_COUNT_MASK
);
200 if (actual
!= expected
) {
201 if (error_count
< 32)
202 dmatest_mismatch(actual
, pattern
, i
,
210 if (error_count
> 32)
211 pr_warning("%s: %u errors suppressed\n",
212 current
->comm
, error_count
- 32);
217 /* poor man's completion - we want to use wait_event_freezable() on it */
218 struct dmatest_done
{
220 wait_queue_head_t
*wait
;
223 static void dmatest_callback(void *arg
)
225 struct dmatest_done
*done
= arg
;
228 wake_up_all(done
->wait
);
231 static inline void unmap_src(struct device
*dev
, dma_addr_t
*addr
, size_t len
,
235 dma_unmap_single(dev
, addr
[count
], len
, DMA_TO_DEVICE
);
238 static inline void unmap_dst(struct device
*dev
, dma_addr_t
*addr
, size_t len
,
242 dma_unmap_single(dev
, addr
[count
], len
, DMA_BIDIRECTIONAL
);
246 * This function repeatedly tests DMA transfers of various lengths and
247 * offsets for a given operation type until it is told to exit by
248 * kthread_stop(). There may be multiple threads running this function
249 * in parallel for a single channel, and there may be multiple channels
250 * being tested in parallel.
252 * Before each test, the source and destination buffer is initialized
253 * with a known pattern. This pattern is different depending on
254 * whether it's in an area which is supposed to be copied or
255 * overwritten, and different in the source and destination buffers.
256 * So if the DMA engine doesn't copy exactly what we tell it to copy,
259 static int dmatest_func(void *data
)
261 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait
);
262 struct dmatest_thread
*thread
= data
;
263 struct dmatest_done done
= { .wait
= &done_wait
};
264 struct dma_chan
*chan
;
265 const char *thread_name
;
266 unsigned int src_off
, dst_off
, len
;
267 unsigned int error_count
;
268 unsigned int failed_tests
= 0;
269 unsigned int total_tests
= 0;
271 enum dma_status status
;
272 enum dma_ctrl_flags flags
;
273 u8 pq_coefs
[pq_sources
+ 1];
279 thread_name
= current
->comm
;
286 if (thread
->type
== DMA_MEMCPY
)
287 src_cnt
= dst_cnt
= 1;
288 else if (thread
->type
== DMA_XOR
) {
289 src_cnt
= xor_sources
| 1; /* force odd to ensure dst = src */
291 } else if (thread
->type
== DMA_PQ
) {
292 src_cnt
= pq_sources
| 1; /* force odd to ensure dst = src */
294 for (i
= 0; i
< src_cnt
; i
++)
299 thread
->srcs
= kcalloc(src_cnt
+1, sizeof(u8
*), GFP_KERNEL
);
302 for (i
= 0; i
< src_cnt
; i
++) {
303 thread
->srcs
[i
] = kmalloc(test_buf_size
, GFP_KERNEL
);
304 if (!thread
->srcs
[i
])
307 thread
->srcs
[i
] = NULL
;
309 thread
->dsts
= kcalloc(dst_cnt
+1, sizeof(u8
*), GFP_KERNEL
);
312 for (i
= 0; i
< dst_cnt
; i
++) {
313 thread
->dsts
[i
] = kmalloc(test_buf_size
, GFP_KERNEL
);
314 if (!thread
->dsts
[i
])
317 thread
->dsts
[i
] = NULL
;
319 set_user_nice(current
, 10);
322 * src buffers are freed by the DMAEngine code with dma_unmap_single()
323 * dst buffers are freed by ourselves below
325 flags
= DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
326 | DMA_COMPL_SKIP_DEST_UNMAP
| DMA_COMPL_SRC_UNMAP_SINGLE
;
328 while (!kthread_should_stop()
329 && !(iterations
&& total_tests
>= iterations
)) {
330 struct dma_device
*dev
= chan
->device
;
331 struct dma_async_tx_descriptor
*tx
= NULL
;
332 dma_addr_t dma_srcs
[src_cnt
];
333 dma_addr_t dma_dsts
[dst_cnt
];
338 /* honor alignment restrictions */
339 if (thread
->type
== DMA_MEMCPY
)
340 align
= dev
->copy_align
;
341 else if (thread
->type
== DMA_XOR
)
342 align
= dev
->xor_align
;
343 else if (thread
->type
== DMA_PQ
)
344 align
= dev
->pq_align
;
346 if (1 << align
> test_buf_size
) {
347 pr_err("%u-byte buffer too small for %d-byte alignment\n",
348 test_buf_size
, 1 << align
);
352 len
= dmatest_random() % test_buf_size
+ 1;
353 len
= (len
>> align
) << align
;
356 src_off
= dmatest_random() % (test_buf_size
- len
+ 1);
357 dst_off
= dmatest_random() % (test_buf_size
- len
+ 1);
359 src_off
= (src_off
>> align
) << align
;
360 dst_off
= (dst_off
>> align
) << align
;
362 dmatest_init_srcs(thread
->srcs
, src_off
, len
);
363 dmatest_init_dsts(thread
->dsts
, dst_off
, len
);
365 for (i
= 0; i
< src_cnt
; i
++) {
366 u8
*buf
= thread
->srcs
[i
] + src_off
;
368 dma_srcs
[i
] = dma_map_single(dev
->dev
, buf
, len
,
370 ret
= dma_mapping_error(dev
->dev
, dma_srcs
[i
]);
372 unmap_src(dev
->dev
, dma_srcs
, len
, i
);
373 pr_warn("%s: #%u: mapping error %d with "
374 "src_off=0x%x len=0x%x\n",
375 thread_name
, total_tests
- 1, ret
,
381 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
382 for (i
= 0; i
< dst_cnt
; i
++) {
383 dma_dsts
[i
] = dma_map_single(dev
->dev
, thread
->dsts
[i
],
386 ret
= dma_mapping_error(dev
->dev
, dma_dsts
[i
]);
388 unmap_src(dev
->dev
, dma_srcs
, len
, src_cnt
);
389 unmap_dst(dev
->dev
, dma_dsts
, test_buf_size
, i
);
390 pr_warn("%s: #%u: mapping error %d with "
391 "dst_off=0x%x len=0x%x\n",
392 thread_name
, total_tests
- 1, ret
,
393 dst_off
, test_buf_size
);
399 if (thread
->type
== DMA_MEMCPY
)
400 tx
= dev
->device_prep_dma_memcpy(chan
,
401 dma_dsts
[0] + dst_off
,
404 else if (thread
->type
== DMA_XOR
)
405 tx
= dev
->device_prep_dma_xor(chan
,
406 dma_dsts
[0] + dst_off
,
409 else if (thread
->type
== DMA_PQ
) {
410 dma_addr_t dma_pq
[dst_cnt
];
412 for (i
= 0; i
< dst_cnt
; i
++)
413 dma_pq
[i
] = dma_dsts
[i
] + dst_off
;
414 tx
= dev
->device_prep_dma_pq(chan
, dma_pq
, dma_srcs
,
420 unmap_src(dev
->dev
, dma_srcs
, len
, src_cnt
);
421 unmap_dst(dev
->dev
, dma_dsts
, test_buf_size
, dst_cnt
);
422 pr_warning("%s: #%u: prep error with src_off=0x%x "
423 "dst_off=0x%x len=0x%x\n",
424 thread_name
, total_tests
- 1,
425 src_off
, dst_off
, len
);
432 tx
->callback
= dmatest_callback
;
433 tx
->callback_param
= &done
;
434 cookie
= tx
->tx_submit(tx
);
436 if (dma_submit_error(cookie
)) {
437 pr_warning("%s: #%u: submit error %d with src_off=0x%x "
438 "dst_off=0x%x len=0x%x\n",
439 thread_name
, total_tests
- 1, cookie
,
440 src_off
, dst_off
, len
);
445 dma_async_issue_pending(chan
);
447 wait_event_freezable_timeout(done_wait
, done
.done
,
448 msecs_to_jiffies(timeout
));
450 status
= dma_async_is_tx_complete(chan
, cookie
, NULL
, NULL
);
454 * We're leaving the timed out dma operation with
455 * dangling pointer to done_wait. To make this
456 * correct, we'll need to allocate wait_done for
457 * each test iteration and perform "who's gonna
458 * free it this time?" dancing. For now, just
461 pr_warning("%s: #%u: test timed out\n",
462 thread_name
, total_tests
- 1);
465 } else if (status
!= DMA_SUCCESS
) {
466 pr_warning("%s: #%u: got completion callback,"
467 " but status is \'%s\'\n",
468 thread_name
, total_tests
- 1,
469 status
== DMA_ERROR
? "error" : "in progress");
474 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
475 unmap_dst(dev
->dev
, dma_dsts
, test_buf_size
, dst_cnt
);
479 pr_debug("%s: verifying source buffer...\n", thread_name
);
480 error_count
+= dmatest_verify(thread
->srcs
, 0, src_off
,
481 0, PATTERN_SRC
, true);
482 error_count
+= dmatest_verify(thread
->srcs
, src_off
,
483 src_off
+ len
, src_off
,
484 PATTERN_SRC
| PATTERN_COPY
, true);
485 error_count
+= dmatest_verify(thread
->srcs
, src_off
+ len
,
486 test_buf_size
, src_off
+ len
,
489 pr_debug("%s: verifying dest buffer...\n",
491 error_count
+= dmatest_verify(thread
->dsts
, 0, dst_off
,
492 0, PATTERN_DST
, false);
493 error_count
+= dmatest_verify(thread
->dsts
, dst_off
,
494 dst_off
+ len
, src_off
,
495 PATTERN_SRC
| PATTERN_COPY
, false);
496 error_count
+= dmatest_verify(thread
->dsts
, dst_off
+ len
,
497 test_buf_size
, dst_off
+ len
,
501 pr_warning("%s: #%u: %u errors with "
502 "src_off=0x%x dst_off=0x%x len=0x%x\n",
503 thread_name
, total_tests
- 1, error_count
,
504 src_off
, dst_off
, len
);
507 pr_debug("%s: #%u: No errors with "
508 "src_off=0x%x dst_off=0x%x len=0x%x\n",
509 thread_name
, total_tests
- 1,
510 src_off
, dst_off
, len
);
515 for (i
= 0; thread
->dsts
[i
]; i
++)
516 kfree(thread
->dsts
[i
]);
520 for (i
= 0; thread
->srcs
[i
]; i
++)
521 kfree(thread
->srcs
[i
]);
525 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
526 thread_name
, total_tests
, failed_tests
, ret
);
528 /* terminate all transfers on specified channels */
529 chan
->device
->device_control(chan
, DMA_TERMINATE_ALL
, 0);
531 while (!kthread_should_stop()) {
532 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit
);
533 interruptible_sleep_on(&wait_dmatest_exit
);
539 static void dmatest_cleanup_channel(struct dmatest_chan
*dtc
)
541 struct dmatest_thread
*thread
;
542 struct dmatest_thread
*_thread
;
545 list_for_each_entry_safe(thread
, _thread
, &dtc
->threads
, node
) {
546 ret
= kthread_stop(thread
->task
);
547 pr_debug("dmatest: thread %s exited with status %d\n",
548 thread
->task
->comm
, ret
);
549 list_del(&thread
->node
);
553 /* terminate all transfers on specified channels */
554 dtc
->chan
->device
->device_control(dtc
->chan
, DMA_TERMINATE_ALL
, 0);
559 static int dmatest_add_threads(struct dmatest_chan
*dtc
, enum dma_transaction_type type
)
561 struct dmatest_thread
*thread
;
562 struct dma_chan
*chan
= dtc
->chan
;
566 if (type
== DMA_MEMCPY
)
568 else if (type
== DMA_XOR
)
570 else if (type
== DMA_PQ
)
575 for (i
= 0; i
< threads_per_chan
; i
++) {
576 thread
= kzalloc(sizeof(struct dmatest_thread
), GFP_KERNEL
);
578 pr_warning("dmatest: No memory for %s-%s%u\n",
579 dma_chan_name(chan
), op
, i
);
583 thread
->chan
= dtc
->chan
;
586 thread
->task
= kthread_run(dmatest_func
, thread
, "%s-%s%u",
587 dma_chan_name(chan
), op
, i
);
588 if (IS_ERR(thread
->task
)) {
589 pr_warning("dmatest: Failed to run thread %s-%s%u\n",
590 dma_chan_name(chan
), op
, i
);
595 /* srcbuf and dstbuf are allocated by the thread itself */
597 list_add_tail(&thread
->node
, &dtc
->threads
);
603 static int dmatest_add_channel(struct dma_chan
*chan
)
605 struct dmatest_chan
*dtc
;
606 struct dma_device
*dma_dev
= chan
->device
;
607 unsigned int thread_count
= 0;
610 dtc
= kmalloc(sizeof(struct dmatest_chan
), GFP_KERNEL
);
612 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan
));
617 INIT_LIST_HEAD(&dtc
->threads
);
619 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
)) {
620 cnt
= dmatest_add_threads(dtc
, DMA_MEMCPY
);
621 thread_count
+= cnt
> 0 ? cnt
: 0;
623 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
624 cnt
= dmatest_add_threads(dtc
, DMA_XOR
);
625 thread_count
+= cnt
> 0 ? cnt
: 0;
627 if (dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
)) {
628 cnt
= dmatest_add_threads(dtc
, DMA_PQ
);
629 thread_count
+= cnt
> 0 ? cnt
: 0;
632 pr_info("dmatest: Started %u threads using %s\n",
633 thread_count
, dma_chan_name(chan
));
635 list_add_tail(&dtc
->node
, &dmatest_channels
);
641 static bool filter(struct dma_chan
*chan
, void *param
)
643 if (!dmatest_match_channel(chan
) || !dmatest_match_device(chan
->device
))
649 static int __init
dmatest_init(void)
652 struct dma_chan
*chan
;
656 dma_cap_set(DMA_MEMCPY
, mask
);
658 chan
= dma_request_channel(mask
, filter
, NULL
);
660 err
= dmatest_add_channel(chan
);
662 dma_release_channel(chan
);
663 break; /* add_channel failed, punt */
666 break; /* no more channels available */
667 if (max_channels
&& nr_channels
>= max_channels
)
668 break; /* we have all we need */
673 /* when compiled-in wait for drivers to load first */
674 late_initcall(dmatest_init
);
676 static void __exit
dmatest_exit(void)
678 struct dmatest_chan
*dtc
, *_dtc
;
679 struct dma_chan
*chan
;
681 list_for_each_entry_safe(dtc
, _dtc
, &dmatest_channels
, node
) {
682 list_del(&dtc
->node
);
684 dmatest_cleanup_channel(dtc
);
685 pr_debug("dmatest: dropped channel %s\n",
686 dma_chan_name(chan
));
687 dma_release_channel(chan
);
690 module_exit(dmatest_exit
);
692 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
693 MODULE_LICENSE("GPL v2");