2 * DMA Engine test module
4 * Copyright (C) 2007 Atmel Corporation
5 * Copyright (C) 2013 Intel Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/freezer.h>
15 #include <linux/init.h>
16 #include <linux/kthread.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/wait.h>
22 #include <linux/ctype.h>
23 #include <linux/debugfs.h>
24 #include <linux/uaccess.h>
25 #include <linux/seq_file.h>
27 static unsigned int test_buf_size
= 16384;
28 module_param(test_buf_size
, uint
, S_IRUGO
);
29 MODULE_PARM_DESC(test_buf_size
, "Size of the memcpy test buffer");
31 static char test_channel
[20];
32 module_param_string(channel
, test_channel
, sizeof(test_channel
), S_IRUGO
);
33 MODULE_PARM_DESC(channel
, "Bus ID of the channel to test (default: any)");
35 static char test_device
[20];
36 module_param_string(device
, test_device
, sizeof(test_device
), S_IRUGO
);
37 MODULE_PARM_DESC(device
, "Bus ID of the DMA Engine to test (default: any)");
39 static unsigned int threads_per_chan
= 1;
40 module_param(threads_per_chan
, uint
, S_IRUGO
);
41 MODULE_PARM_DESC(threads_per_chan
,
42 "Number of threads to start per channel (default: 1)");
44 static unsigned int max_channels
;
45 module_param(max_channels
, uint
, S_IRUGO
);
46 MODULE_PARM_DESC(max_channels
,
47 "Maximum number of channels to use (default: all)");
49 static unsigned int iterations
;
50 module_param(iterations
, uint
, S_IRUGO
);
51 MODULE_PARM_DESC(iterations
,
52 "Iterations before stopping test (default: infinite)");
54 static unsigned int xor_sources
= 3;
55 module_param(xor_sources
, uint
, S_IRUGO
);
56 MODULE_PARM_DESC(xor_sources
,
57 "Number of xor source buffers (default: 3)");
59 static unsigned int pq_sources
= 3;
60 module_param(pq_sources
, uint
, S_IRUGO
);
61 MODULE_PARM_DESC(pq_sources
,
62 "Number of p+q source buffers (default: 3)");
64 static int timeout
= 3000;
65 module_param(timeout
, uint
, S_IRUGO
);
66 MODULE_PARM_DESC(timeout
, "Transfer Timeout in msec (default: 3000), "
67 "Pass -1 for infinite timeout");
69 /* Maximum amount of mismatched bytes in buffer to print */
70 #define MAX_ERROR_COUNT 32
73 * Initialization patterns. All bytes in the source buffer has bit 7
74 * set, all bytes in the destination buffer has bit 7 cleared.
76 * Bit 6 is set for all bytes which are to be copied by the DMA
77 * engine. Bit 5 is set for all bytes which are to be overwritten by
80 * The remaining bits are the inverse of a counter which increments by
81 * one for each byte address.
83 #define PATTERN_SRC 0x80
84 #define PATTERN_DST 0x00
85 #define PATTERN_COPY 0x40
86 #define PATTERN_OVERWRITE 0x20
87 #define PATTERN_COUNT_MASK 0x1f
89 enum dmatest_error_type
{
97 DMATEST_ET_DMA_IN_PROGRESS
,
99 DMATEST_ET_VERIFY_BUF
,
102 struct dmatest_verify_buffer
{
108 struct dmatest_verify_result
{
109 unsigned int error_count
;
110 struct dmatest_verify_buffer data
[MAX_ERROR_COUNT
];
115 struct dmatest_thread_result
{
116 struct list_head node
;
118 unsigned int src_off
;
119 unsigned int dst_off
;
121 enum dmatest_error_type type
;
125 enum dma_status status
;
127 struct dmatest_verify_result
*vr
;
131 struct dmatest_result
{
132 struct list_head node
;
134 struct list_head results
;
139 struct dmatest_thread
{
140 struct list_head node
;
141 struct dmatest_info
*info
;
142 struct task_struct
*task
;
143 struct dma_chan
*chan
;
146 enum dma_transaction_type type
;
150 struct dmatest_chan
{
151 struct list_head node
;
152 struct dma_chan
*chan
;
153 struct list_head threads
;
157 * struct dmatest_params - test parameters.
158 * @buf_size: size of the memcpy test buffer
159 * @channel: bus ID of the channel to test
160 * @device: bus ID of the DMA Engine to test
161 * @threads_per_chan: number of threads to start per channel
162 * @max_channels: maximum number of channels to use
163 * @iterations: iterations before stopping test
164 * @xor_sources: number of xor source buffers
165 * @pq_sources: number of p+q source buffers
166 * @timeout: transfer timeout in msec, -1 for infinite timeout
168 struct dmatest_params
{
169 unsigned int buf_size
;
172 unsigned int threads_per_chan
;
173 unsigned int max_channels
;
174 unsigned int iterations
;
175 unsigned int xor_sources
;
176 unsigned int pq_sources
;
181 * struct dmatest_info - test information.
182 * @params: test parameters
183 * @lock: access protection to the fields of this structure
185 struct dmatest_info
{
186 /* Test parameters */
187 struct dmatest_params params
;
190 struct list_head channels
;
191 unsigned int nr_channels
;
194 /* debugfs related stuff */
196 struct dmatest_params dbgfs_params
;
199 struct list_head results
;
200 struct mutex results_lock
;
203 static struct dmatest_info test_info
;
205 static bool dmatest_match_channel(struct dmatest_params
*params
,
206 struct dma_chan
*chan
)
208 if (params
->channel
[0] == '\0')
210 return strcmp(dma_chan_name(chan
), params
->channel
) == 0;
213 static bool dmatest_match_device(struct dmatest_params
*params
,
214 struct dma_device
*device
)
216 if (params
->device
[0] == '\0')
218 return strcmp(dev_name(device
->dev
), params
->device
) == 0;
221 static unsigned long dmatest_random(void)
225 get_random_bytes(&buf
, sizeof(buf
));
229 static void dmatest_init_srcs(u8
**bufs
, unsigned int start
, unsigned int len
,
230 unsigned int buf_size
)
235 for (; (buf
= *bufs
); bufs
++) {
236 for (i
= 0; i
< start
; i
++)
237 buf
[i
] = PATTERN_SRC
| (~i
& PATTERN_COUNT_MASK
);
238 for ( ; i
< start
+ len
; i
++)
239 buf
[i
] = PATTERN_SRC
| PATTERN_COPY
240 | (~i
& PATTERN_COUNT_MASK
);
241 for ( ; i
< buf_size
; i
++)
242 buf
[i
] = PATTERN_SRC
| (~i
& PATTERN_COUNT_MASK
);
247 static void dmatest_init_dsts(u8
**bufs
, unsigned int start
, unsigned int len
,
248 unsigned int buf_size
)
253 for (; (buf
= *bufs
); bufs
++) {
254 for (i
= 0; i
< start
; i
++)
255 buf
[i
] = PATTERN_DST
| (~i
& PATTERN_COUNT_MASK
);
256 for ( ; i
< start
+ len
; i
++)
257 buf
[i
] = PATTERN_DST
| PATTERN_OVERWRITE
258 | (~i
& PATTERN_COUNT_MASK
);
259 for ( ; i
< buf_size
; i
++)
260 buf
[i
] = PATTERN_DST
| (~i
& PATTERN_COUNT_MASK
);
264 static unsigned int dmatest_verify(struct dmatest_verify_result
*vr
, u8
**bufs
,
265 unsigned int start
, unsigned int end
, unsigned int counter
,
266 u8 pattern
, bool is_srcbuf
)
269 unsigned int error_count
= 0;
273 unsigned int counter_orig
= counter
;
274 struct dmatest_verify_buffer
*vb
;
276 for (; (buf
= *bufs
); bufs
++) {
277 counter
= counter_orig
;
278 for (i
= start
; i
< end
; i
++) {
280 expected
= pattern
| (~counter
& PATTERN_COUNT_MASK
);
281 if (actual
!= expected
) {
282 if (error_count
< MAX_ERROR_COUNT
&& vr
) {
283 vb
= &vr
->data
[error_count
];
285 vb
->expected
= expected
;
294 if (error_count
> MAX_ERROR_COUNT
)
295 pr_warning("%s: %u errors suppressed\n",
296 current
->comm
, error_count
- MAX_ERROR_COUNT
);
301 /* poor man's completion - we want to use wait_event_freezable() on it */
302 struct dmatest_done
{
304 wait_queue_head_t
*wait
;
307 static void dmatest_callback(void *arg
)
309 struct dmatest_done
*done
= arg
;
312 wake_up_all(done
->wait
);
315 static inline void unmap_src(struct device
*dev
, dma_addr_t
*addr
, size_t len
,
319 dma_unmap_single(dev
, addr
[count
], len
, DMA_TO_DEVICE
);
322 static inline void unmap_dst(struct device
*dev
, dma_addr_t
*addr
, size_t len
,
326 dma_unmap_single(dev
, addr
[count
], len
, DMA_BIDIRECTIONAL
);
329 static unsigned int min_odd(unsigned int x
, unsigned int y
)
331 unsigned int val
= min(x
, y
);
333 return val
% 2 ? val
: val
- 1;
336 static char *verify_result_get_one(struct dmatest_verify_result
*vr
,
339 struct dmatest_verify_buffer
*vb
= &vr
->data
[i
];
340 u8 diff
= vb
->actual
^ vr
->pattern
;
341 static char buf
[512];
345 msg
= "srcbuf overwritten!";
346 else if ((vr
->pattern
& PATTERN_COPY
)
347 && (diff
& (PATTERN_COPY
| PATTERN_OVERWRITE
)))
348 msg
= "dstbuf not copied!";
349 else if (diff
& PATTERN_SRC
)
350 msg
= "dstbuf was copied!";
352 msg
= "dstbuf mismatch!";
354 snprintf(buf
, sizeof(buf
) - 1, "%s [0x%x] Expected %02x, got %02x", msg
,
355 vb
->index
, vb
->expected
, vb
->actual
);
360 static char *thread_result_get(const char *name
,
361 struct dmatest_thread_result
*tr
)
363 static const char * const messages
[] = {
364 [DMATEST_ET_OK
] = "No errors",
365 [DMATEST_ET_MAP_SRC
] = "src mapping error",
366 [DMATEST_ET_MAP_DST
] = "dst mapping error",
367 [DMATEST_ET_PREP
] = "prep error",
368 [DMATEST_ET_SUBMIT
] = "submit error",
369 [DMATEST_ET_TIMEOUT
] = "test timed out",
370 [DMATEST_ET_DMA_ERROR
] =
371 "got completion callback (DMA_ERROR)",
372 [DMATEST_ET_DMA_IN_PROGRESS
] =
373 "got completion callback (DMA_IN_PROGRESS)",
374 [DMATEST_ET_VERIFY
] = "errors",
375 [DMATEST_ET_VERIFY_BUF
] = "verify errors",
377 static char buf
[512];
379 snprintf(buf
, sizeof(buf
) - 1,
380 "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
381 name
, tr
->n
, messages
[tr
->type
], tr
->src_off
, tr
->dst_off
,
387 static int thread_result_add(struct dmatest_info
*info
,
388 struct dmatest_result
*r
, enum dmatest_error_type type
,
389 unsigned int n
, unsigned int src_off
, unsigned int dst_off
,
390 unsigned int len
, unsigned long data
)
392 struct dmatest_thread_result
*tr
;
394 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
400 tr
->src_off
= src_off
;
401 tr
->dst_off
= dst_off
;
405 mutex_lock(&info
->results_lock
);
406 list_add_tail(&tr
->node
, &r
->results
);
407 mutex_unlock(&info
->results_lock
);
409 pr_warn("%s\n", thread_result_get(r
->name
, tr
));
413 static unsigned int verify_result_add(struct dmatest_info
*info
,
414 struct dmatest_result
*r
, unsigned int n
,
415 unsigned int src_off
, unsigned int dst_off
, unsigned int len
,
416 u8
**bufs
, int whence
, unsigned int counter
, u8 pattern
,
419 struct dmatest_verify_result
*vr
;
420 unsigned int error_count
;
421 unsigned int buf_off
= is_srcbuf
? src_off
: dst_off
;
422 unsigned int start
, end
;
427 } else if (whence
> 0) {
428 start
= buf_off
+ len
;
429 end
= info
->params
.buf_size
;
435 vr
= kmalloc(sizeof(*vr
), GFP_KERNEL
);
437 pr_warn("dmatest: No memory to store verify result\n");
438 return dmatest_verify(NULL
, bufs
, start
, end
, counter
, pattern
,
442 vr
->pattern
= pattern
;
443 vr
->is_srcbuf
= is_srcbuf
;
445 error_count
= dmatest_verify(vr
, bufs
, start
, end
, counter
, pattern
,
448 vr
->error_count
= error_count
;
449 thread_result_add(info
, r
, DMATEST_ET_VERIFY_BUF
, n
, src_off
,
450 dst_off
, len
, (unsigned long)vr
);
458 static void result_free(struct dmatest_info
*info
, const char *name
)
460 struct dmatest_result
*r
, *_r
;
462 mutex_lock(&info
->results_lock
);
463 list_for_each_entry_safe(r
, _r
, &info
->results
, node
) {
464 struct dmatest_thread_result
*tr
, *_tr
;
466 if (name
&& strcmp(r
->name
, name
))
469 list_for_each_entry_safe(tr
, _tr
, &r
->results
, node
) {
470 if (tr
->type
== DMATEST_ET_VERIFY_BUF
)
481 mutex_unlock(&info
->results_lock
);
484 static struct dmatest_result
*result_init(struct dmatest_info
*info
,
487 struct dmatest_result
*r
;
489 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
491 r
->name
= kstrdup(name
, GFP_KERNEL
);
492 INIT_LIST_HEAD(&r
->results
);
493 mutex_lock(&info
->results_lock
);
494 list_add_tail(&r
->node
, &info
->results
);
495 mutex_unlock(&info
->results_lock
);
501 * This function repeatedly tests DMA transfers of various lengths and
502 * offsets for a given operation type until it is told to exit by
503 * kthread_stop(). There may be multiple threads running this function
504 * in parallel for a single channel, and there may be multiple channels
505 * being tested in parallel.
507 * Before each test, the source and destination buffer is initialized
508 * with a known pattern. This pattern is different depending on
509 * whether it's in an area which is supposed to be copied or
510 * overwritten, and different in the source and destination buffers.
511 * So if the DMA engine doesn't copy exactly what we tell it to copy,
514 static int dmatest_func(void *data
)
516 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait
);
517 struct dmatest_thread
*thread
= data
;
518 struct dmatest_done done
= { .wait
= &done_wait
};
519 struct dmatest_info
*info
;
520 struct dmatest_params
*params
;
521 struct dma_chan
*chan
;
522 struct dma_device
*dev
;
523 const char *thread_name
;
524 unsigned int src_off
, dst_off
, len
;
525 unsigned int error_count
;
526 unsigned int failed_tests
= 0;
527 unsigned int total_tests
= 0;
529 enum dma_status status
;
530 enum dma_ctrl_flags flags
;
536 struct dmatest_result
*result
;
538 thread_name
= current
->comm
;
545 params
= &info
->params
;
548 if (thread
->type
== DMA_MEMCPY
)
549 src_cnt
= dst_cnt
= 1;
550 else if (thread
->type
== DMA_XOR
) {
551 /* force odd to ensure dst = src */
552 src_cnt
= min_odd(params
->xor_sources
| 1, dev
->max_xor
);
554 } else if (thread
->type
== DMA_PQ
) {
555 /* force odd to ensure dst = src */
556 src_cnt
= min_odd(params
->pq_sources
| 1, dma_maxpq(dev
, 0));
559 pq_coefs
= kmalloc(params
->pq_sources
+1, GFP_KERNEL
);
561 goto err_thread_type
;
563 for (i
= 0; i
< src_cnt
; i
++)
566 goto err_thread_type
;
568 result
= result_init(info
, thread_name
);
572 thread
->srcs
= kcalloc(src_cnt
+1, sizeof(u8
*), GFP_KERNEL
);
575 for (i
= 0; i
< src_cnt
; i
++) {
576 thread
->srcs
[i
] = kmalloc(params
->buf_size
, GFP_KERNEL
);
577 if (!thread
->srcs
[i
])
580 thread
->srcs
[i
] = NULL
;
582 thread
->dsts
= kcalloc(dst_cnt
+1, sizeof(u8
*), GFP_KERNEL
);
585 for (i
= 0; i
< dst_cnt
; i
++) {
586 thread
->dsts
[i
] = kmalloc(params
->buf_size
, GFP_KERNEL
);
587 if (!thread
->dsts
[i
])
590 thread
->dsts
[i
] = NULL
;
592 set_user_nice(current
, 10);
595 * src buffers are freed by the DMAEngine code with dma_unmap_single()
596 * dst buffers are freed by ourselves below
598 flags
= DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
599 | DMA_COMPL_SKIP_DEST_UNMAP
| DMA_COMPL_SRC_UNMAP_SINGLE
;
601 while (!kthread_should_stop()
602 && !(params
->iterations
&& total_tests
>= params
->iterations
)) {
603 struct dma_async_tx_descriptor
*tx
= NULL
;
604 dma_addr_t dma_srcs
[src_cnt
];
605 dma_addr_t dma_dsts
[dst_cnt
];
610 /* honor alignment restrictions */
611 if (thread
->type
== DMA_MEMCPY
)
612 align
= dev
->copy_align
;
613 else if (thread
->type
== DMA_XOR
)
614 align
= dev
->xor_align
;
615 else if (thread
->type
== DMA_PQ
)
616 align
= dev
->pq_align
;
618 if (1 << align
> params
->buf_size
) {
619 pr_err("%u-byte buffer too small for %d-byte alignment\n",
620 params
->buf_size
, 1 << align
);
624 len
= dmatest_random() % params
->buf_size
+ 1;
625 len
= (len
>> align
) << align
;
628 src_off
= dmatest_random() % (params
->buf_size
- len
+ 1);
629 dst_off
= dmatest_random() % (params
->buf_size
- len
+ 1);
631 src_off
= (src_off
>> align
) << align
;
632 dst_off
= (dst_off
>> align
) << align
;
634 dmatest_init_srcs(thread
->srcs
, src_off
, len
, params
->buf_size
);
635 dmatest_init_dsts(thread
->dsts
, dst_off
, len
, params
->buf_size
);
637 for (i
= 0; i
< src_cnt
; i
++) {
638 u8
*buf
= thread
->srcs
[i
] + src_off
;
640 dma_srcs
[i
] = dma_map_single(dev
->dev
, buf
, len
,
642 ret
= dma_mapping_error(dev
->dev
, dma_srcs
[i
]);
644 unmap_src(dev
->dev
, dma_srcs
, len
, i
);
645 thread_result_add(info
, result
,
647 total_tests
, src_off
, dst_off
,
653 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
654 for (i
= 0; i
< dst_cnt
; i
++) {
655 dma_dsts
[i
] = dma_map_single(dev
->dev
, thread
->dsts
[i
],
658 ret
= dma_mapping_error(dev
->dev
, dma_dsts
[i
]);
660 unmap_src(dev
->dev
, dma_srcs
, len
, src_cnt
);
661 unmap_dst(dev
->dev
, dma_dsts
, params
->buf_size
,
663 thread_result_add(info
, result
,
665 total_tests
, src_off
, dst_off
,
672 if (thread
->type
== DMA_MEMCPY
)
673 tx
= dev
->device_prep_dma_memcpy(chan
,
674 dma_dsts
[0] + dst_off
,
677 else if (thread
->type
== DMA_XOR
)
678 tx
= dev
->device_prep_dma_xor(chan
,
679 dma_dsts
[0] + dst_off
,
682 else if (thread
->type
== DMA_PQ
) {
683 dma_addr_t dma_pq
[dst_cnt
];
685 for (i
= 0; i
< dst_cnt
; i
++)
686 dma_pq
[i
] = dma_dsts
[i
] + dst_off
;
687 tx
= dev
->device_prep_dma_pq(chan
, dma_pq
, dma_srcs
,
693 unmap_src(dev
->dev
, dma_srcs
, len
, src_cnt
);
694 unmap_dst(dev
->dev
, dma_dsts
, params
->buf_size
,
696 thread_result_add(info
, result
, DMATEST_ET_PREP
,
697 total_tests
, src_off
, dst_off
,
705 tx
->callback
= dmatest_callback
;
706 tx
->callback_param
= &done
;
707 cookie
= tx
->tx_submit(tx
);
709 if (dma_submit_error(cookie
)) {
710 thread_result_add(info
, result
, DMATEST_ET_SUBMIT
,
711 total_tests
, src_off
, dst_off
,
717 dma_async_issue_pending(chan
);
719 wait_event_freezable_timeout(done_wait
, done
.done
,
720 msecs_to_jiffies(params
->timeout
));
722 status
= dma_async_is_tx_complete(chan
, cookie
, NULL
, NULL
);
726 * We're leaving the timed out dma operation with
727 * dangling pointer to done_wait. To make this
728 * correct, we'll need to allocate wait_done for
729 * each test iteration and perform "who's gonna
730 * free it this time?" dancing. For now, just
733 thread_result_add(info
, result
, DMATEST_ET_TIMEOUT
,
734 total_tests
, src_off
, dst_off
,
738 } else if (status
!= DMA_SUCCESS
) {
739 enum dmatest_error_type type
= (status
== DMA_ERROR
) ?
740 DMATEST_ET_DMA_ERROR
: DMATEST_ET_DMA_IN_PROGRESS
;
741 thread_result_add(info
, result
, type
,
742 total_tests
, src_off
, dst_off
,
748 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
749 unmap_dst(dev
->dev
, dma_dsts
, params
->buf_size
, dst_cnt
);
753 pr_debug("%s: verifying source buffer...\n", thread_name
);
754 error_count
+= verify_result_add(info
, result
, total_tests
,
755 src_off
, dst_off
, len
, thread
->srcs
, -1,
756 0, PATTERN_SRC
, true);
757 error_count
+= verify_result_add(info
, result
, total_tests
,
758 src_off
, dst_off
, len
, thread
->srcs
, 0,
759 src_off
, PATTERN_SRC
| PATTERN_COPY
, true);
760 error_count
+= verify_result_add(info
, result
, total_tests
,
761 src_off
, dst_off
, len
, thread
->srcs
, 1,
762 src_off
+ len
, PATTERN_SRC
, true);
764 pr_debug("%s: verifying dest buffer...\n", thread_name
);
765 error_count
+= verify_result_add(info
, result
, total_tests
,
766 src_off
, dst_off
, len
, thread
->dsts
, -1,
767 0, PATTERN_DST
, false);
768 error_count
+= verify_result_add(info
, result
, total_tests
,
769 src_off
, dst_off
, len
, thread
->dsts
, 0,
770 src_off
, PATTERN_SRC
| PATTERN_COPY
, false);
771 error_count
+= verify_result_add(info
, result
, total_tests
,
772 src_off
, dst_off
, len
, thread
->dsts
, 1,
773 dst_off
+ len
, PATTERN_DST
, false);
776 thread_result_add(info
, result
, DMATEST_ET_VERIFY
,
777 total_tests
, src_off
, dst_off
,
781 thread_result_add(info
, result
, DMATEST_ET_OK
,
782 total_tests
, src_off
, dst_off
,
788 for (i
= 0; thread
->dsts
[i
]; i
++)
789 kfree(thread
->dsts
[i
]);
793 for (i
= 0; thread
->srcs
[i
]; i
++)
794 kfree(thread
->srcs
[i
]);
800 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
801 thread_name
, total_tests
, failed_tests
, ret
);
803 /* terminate all transfers on specified channels */
805 dmaengine_terminate_all(chan
);
809 if (params
->iterations
> 0)
810 while (!kthread_should_stop()) {
811 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit
);
812 interruptible_sleep_on(&wait_dmatest_exit
);
818 static void dmatest_cleanup_channel(struct dmatest_chan
*dtc
)
820 struct dmatest_thread
*thread
;
821 struct dmatest_thread
*_thread
;
824 list_for_each_entry_safe(thread
, _thread
, &dtc
->threads
, node
) {
825 ret
= kthread_stop(thread
->task
);
826 pr_debug("dmatest: thread %s exited with status %d\n",
827 thread
->task
->comm
, ret
);
828 list_del(&thread
->node
);
832 /* terminate all transfers on specified channels */
833 dmaengine_terminate_all(dtc
->chan
);
838 static int dmatest_add_threads(struct dmatest_info
*info
,
839 struct dmatest_chan
*dtc
, enum dma_transaction_type type
)
841 struct dmatest_params
*params
= &info
->params
;
842 struct dmatest_thread
*thread
;
843 struct dma_chan
*chan
= dtc
->chan
;
847 if (type
== DMA_MEMCPY
)
849 else if (type
== DMA_XOR
)
851 else if (type
== DMA_PQ
)
856 for (i
= 0; i
< params
->threads_per_chan
; i
++) {
857 thread
= kzalloc(sizeof(struct dmatest_thread
), GFP_KERNEL
);
859 pr_warning("dmatest: No memory for %s-%s%u\n",
860 dma_chan_name(chan
), op
, i
);
865 thread
->chan
= dtc
->chan
;
868 thread
->task
= kthread_run(dmatest_func
, thread
, "%s-%s%u",
869 dma_chan_name(chan
), op
, i
);
870 if (IS_ERR(thread
->task
)) {
871 pr_warning("dmatest: Failed to run thread %s-%s%u\n",
872 dma_chan_name(chan
), op
, i
);
877 /* srcbuf and dstbuf are allocated by the thread itself */
879 list_add_tail(&thread
->node
, &dtc
->threads
);
885 static int dmatest_add_channel(struct dmatest_info
*info
,
886 struct dma_chan
*chan
)
888 struct dmatest_chan
*dtc
;
889 struct dma_device
*dma_dev
= chan
->device
;
890 unsigned int thread_count
= 0;
893 dtc
= kmalloc(sizeof(struct dmatest_chan
), GFP_KERNEL
);
895 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan
));
900 INIT_LIST_HEAD(&dtc
->threads
);
902 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
)) {
903 cnt
= dmatest_add_threads(info
, dtc
, DMA_MEMCPY
);
904 thread_count
+= cnt
> 0 ? cnt
: 0;
906 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
907 cnt
= dmatest_add_threads(info
, dtc
, DMA_XOR
);
908 thread_count
+= cnt
> 0 ? cnt
: 0;
910 if (dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
)) {
911 cnt
= dmatest_add_threads(info
, dtc
, DMA_PQ
);
912 thread_count
+= cnt
> 0 ? cnt
: 0;
915 pr_info("dmatest: Started %u threads using %s\n",
916 thread_count
, dma_chan_name(chan
));
918 list_add_tail(&dtc
->node
, &info
->channels
);
924 static bool filter(struct dma_chan
*chan
, void *param
)
926 struct dmatest_params
*params
= param
;
928 if (!dmatest_match_channel(params
, chan
) ||
929 !dmatest_match_device(params
, chan
->device
))
935 static int __run_threaded_test(struct dmatest_info
*info
)
938 struct dma_chan
*chan
;
939 struct dmatest_params
*params
= &info
->params
;
943 dma_cap_set(DMA_MEMCPY
, mask
);
945 chan
= dma_request_channel(mask
, filter
, params
);
947 err
= dmatest_add_channel(info
, chan
);
949 dma_release_channel(chan
);
950 break; /* add_channel failed, punt */
953 break; /* no more channels available */
954 if (params
->max_channels
&&
955 info
->nr_channels
>= params
->max_channels
)
956 break; /* we have all we need */
962 static int run_threaded_test(struct dmatest_info
*info
)
966 mutex_lock(&info
->lock
);
967 ret
= __run_threaded_test(info
);
968 mutex_unlock(&info
->lock
);
973 static void __stop_threaded_test(struct dmatest_info
*info
)
975 struct dmatest_chan
*dtc
, *_dtc
;
976 struct dma_chan
*chan
;
978 list_for_each_entry_safe(dtc
, _dtc
, &info
->channels
, node
) {
979 list_del(&dtc
->node
);
981 dmatest_cleanup_channel(dtc
);
982 pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan
));
983 dma_release_channel(chan
);
986 info
->nr_channels
= 0;
989 static void stop_threaded_test(struct dmatest_info
*info
)
991 mutex_lock(&info
->lock
);
992 __stop_threaded_test(info
);
993 mutex_unlock(&info
->lock
);
996 static int __restart_threaded_test(struct dmatest_info
*info
, bool run
)
998 struct dmatest_params
*params
= &info
->params
;
1000 /* Stop any running test first */
1001 __stop_threaded_test(info
);
1006 /* Clear results from previous run */
1007 result_free(info
, NULL
);
1009 /* Copy test parameters */
1010 memcpy(params
, &info
->dbgfs_params
, sizeof(*params
));
1012 /* Run test with new parameters */
1013 return __run_threaded_test(info
);
1016 static bool __is_threaded_test_run(struct dmatest_info
*info
)
1018 struct dmatest_chan
*dtc
;
1020 list_for_each_entry(dtc
, &info
->channels
, node
) {
1021 struct dmatest_thread
*thread
;
1023 list_for_each_entry(thread
, &dtc
->threads
, node
) {
1032 static ssize_t
dtf_write_string(void *to
, size_t available
, loff_t
*ppos
,
1033 const void __user
*from
, size_t count
)
1038 len
= simple_write_to_buffer(tmp
, sizeof(tmp
) - 1, ppos
, from
, count
);
1041 strlcpy(to
, strim(tmp
), available
);
1047 static ssize_t
dtf_read_channel(struct file
*file
, char __user
*buf
,
1048 size_t count
, loff_t
*ppos
)
1050 struct dmatest_info
*info
= file
->private_data
;
1051 return simple_read_from_buffer(buf
, count
, ppos
,
1052 info
->dbgfs_params
.channel
,
1053 strlen(info
->dbgfs_params
.channel
));
1056 static ssize_t
dtf_write_channel(struct file
*file
, const char __user
*buf
,
1057 size_t size
, loff_t
*ppos
)
1059 struct dmatest_info
*info
= file
->private_data
;
1060 return dtf_write_string(info
->dbgfs_params
.channel
,
1061 sizeof(info
->dbgfs_params
.channel
),
1065 static const struct file_operations dtf_channel_fops
= {
1066 .read
= dtf_read_channel
,
1067 .write
= dtf_write_channel
,
1068 .open
= simple_open
,
1069 .llseek
= default_llseek
,
1072 static ssize_t
dtf_read_device(struct file
*file
, char __user
*buf
,
1073 size_t count
, loff_t
*ppos
)
1075 struct dmatest_info
*info
= file
->private_data
;
1076 return simple_read_from_buffer(buf
, count
, ppos
,
1077 info
->dbgfs_params
.device
,
1078 strlen(info
->dbgfs_params
.device
));
1081 static ssize_t
dtf_write_device(struct file
*file
, const char __user
*buf
,
1082 size_t size
, loff_t
*ppos
)
1084 struct dmatest_info
*info
= file
->private_data
;
1085 return dtf_write_string(info
->dbgfs_params
.device
,
1086 sizeof(info
->dbgfs_params
.device
),
1090 static const struct file_operations dtf_device_fops
= {
1091 .read
= dtf_read_device
,
1092 .write
= dtf_write_device
,
1093 .open
= simple_open
,
1094 .llseek
= default_llseek
,
1097 static ssize_t
dtf_read_run(struct file
*file
, char __user
*user_buf
,
1098 size_t count
, loff_t
*ppos
)
1100 struct dmatest_info
*info
= file
->private_data
;
1103 mutex_lock(&info
->lock
);
1105 if (__is_threaded_test_run(info
)) {
1108 __stop_threaded_test(info
);
1112 mutex_unlock(&info
->lock
);
1115 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
1118 static ssize_t
dtf_write_run(struct file
*file
, const char __user
*user_buf
,
1119 size_t count
, loff_t
*ppos
)
1121 struct dmatest_info
*info
= file
->private_data
;
1126 if (copy_from_user(buf
, user_buf
, min(count
, (sizeof(buf
) - 1))))
1129 if (strtobool(buf
, &bv
) == 0) {
1130 mutex_lock(&info
->lock
);
1132 if (__is_threaded_test_run(info
))
1135 ret
= __restart_threaded_test(info
, bv
);
1137 mutex_unlock(&info
->lock
);
1140 return ret
? ret
: count
;
1143 static const struct file_operations dtf_run_fops
= {
1144 .read
= dtf_read_run
,
1145 .write
= dtf_write_run
,
1146 .open
= simple_open
,
1147 .llseek
= default_llseek
,
1150 static int dtf_results_show(struct seq_file
*sf
, void *data
)
1152 struct dmatest_info
*info
= sf
->private;
1153 struct dmatest_result
*result
;
1154 struct dmatest_thread_result
*tr
;
1157 mutex_lock(&info
->results_lock
);
1158 list_for_each_entry(result
, &info
->results
, node
) {
1159 list_for_each_entry(tr
, &result
->results
, node
) {
1160 seq_printf(sf
, "%s\n",
1161 thread_result_get(result
->name
, tr
));
1162 if (tr
->type
== DMATEST_ET_VERIFY_BUF
) {
1163 for (i
= 0; i
< tr
->vr
->error_count
; i
++) {
1164 seq_printf(sf
, "\t%s\n",
1165 verify_result_get_one(tr
->vr
, i
));
1171 mutex_unlock(&info
->results_lock
);
1175 static int dtf_results_open(struct inode
*inode
, struct file
*file
)
1177 return single_open(file
, dtf_results_show
, inode
->i_private
);
1180 static const struct file_operations dtf_results_fops
= {
1181 .open
= dtf_results_open
,
1183 .llseek
= seq_lseek
,
1184 .release
= single_release
,
1187 static int dmatest_register_dbgfs(struct dmatest_info
*info
)
1190 struct dmatest_params
*params
= &info
->dbgfs_params
;
1193 d
= debugfs_create_dir("dmatest", NULL
);
1201 /* Copy initial values */
1202 memcpy(params
, &info
->params
, sizeof(*params
));
1204 /* Test parameters */
1206 d
= debugfs_create_u32("test_buf_size", S_IWUSR
| S_IRUGO
, info
->root
,
1207 (u32
*)¶ms
->buf_size
);
1208 if (IS_ERR_OR_NULL(d
))
1211 d
= debugfs_create_file("channel", S_IRUGO
| S_IWUSR
, info
->root
,
1212 info
, &dtf_channel_fops
);
1213 if (IS_ERR_OR_NULL(d
))
1216 d
= debugfs_create_file("device", S_IRUGO
| S_IWUSR
, info
->root
,
1217 info
, &dtf_device_fops
);
1218 if (IS_ERR_OR_NULL(d
))
1221 d
= debugfs_create_u32("threads_per_chan", S_IWUSR
| S_IRUGO
, info
->root
,
1222 (u32
*)¶ms
->threads_per_chan
);
1223 if (IS_ERR_OR_NULL(d
))
1226 d
= debugfs_create_u32("max_channels", S_IWUSR
| S_IRUGO
, info
->root
,
1227 (u32
*)¶ms
->max_channels
);
1228 if (IS_ERR_OR_NULL(d
))
1231 d
= debugfs_create_u32("iterations", S_IWUSR
| S_IRUGO
, info
->root
,
1232 (u32
*)¶ms
->iterations
);
1233 if (IS_ERR_OR_NULL(d
))
1236 d
= debugfs_create_u32("xor_sources", S_IWUSR
| S_IRUGO
, info
->root
,
1237 (u32
*)¶ms
->xor_sources
);
1238 if (IS_ERR_OR_NULL(d
))
1241 d
= debugfs_create_u32("pq_sources", S_IWUSR
| S_IRUGO
, info
->root
,
1242 (u32
*)¶ms
->pq_sources
);
1243 if (IS_ERR_OR_NULL(d
))
1246 d
= debugfs_create_u32("timeout", S_IWUSR
| S_IRUGO
, info
->root
,
1247 (u32
*)¶ms
->timeout
);
1248 if (IS_ERR_OR_NULL(d
))
1251 /* Run or stop threaded test */
1252 d
= debugfs_create_file("run", S_IWUSR
| S_IRUGO
, info
->root
,
1253 info
, &dtf_run_fops
);
1254 if (IS_ERR_OR_NULL(d
))
1257 /* Results of test in progress */
1258 d
= debugfs_create_file("results", S_IRUGO
, info
->root
, info
,
1260 if (IS_ERR_OR_NULL(d
))
1266 debugfs_remove_recursive(info
->root
);
1268 pr_err("dmatest: Failed to initialize debugfs\n");
1272 static int __init
dmatest_init(void)
1274 struct dmatest_info
*info
= &test_info
;
1275 struct dmatest_params
*params
= &info
->params
;
1278 memset(info
, 0, sizeof(*info
));
1280 mutex_init(&info
->lock
);
1281 INIT_LIST_HEAD(&info
->channels
);
1283 mutex_init(&info
->results_lock
);
1284 INIT_LIST_HEAD(&info
->results
);
1286 /* Set default parameters */
1287 params
->buf_size
= test_buf_size
;
1288 strlcpy(params
->channel
, test_channel
, sizeof(params
->channel
));
1289 strlcpy(params
->device
, test_device
, sizeof(params
->device
));
1290 params
->threads_per_chan
= threads_per_chan
;
1291 params
->max_channels
= max_channels
;
1292 params
->iterations
= iterations
;
1293 params
->xor_sources
= xor_sources
;
1294 params
->pq_sources
= pq_sources
;
1295 params
->timeout
= timeout
;
1297 ret
= dmatest_register_dbgfs(info
);
1304 return run_threaded_test(info
);
1307 /* when compiled-in wait for drivers to load first */
1308 late_initcall(dmatest_init
);
1310 static void __exit
dmatest_exit(void)
1312 struct dmatest_info
*info
= &test_info
;
1314 debugfs_remove_recursive(info
->root
);
1315 stop_threaded_test(info
);
1316 result_free(info
, NULL
);
1318 module_exit(dmatest_exit
);
1320 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1321 MODULE_LICENSE("GPL v2");