2 * linux/drivers/mmc/card/mmc_test.c
4 * Copyright 2007-2008 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
28 #define RESULT_UNSUP_HOST 2
29 #define RESULT_UNSUP_CARD 3
31 #define BUFFER_ORDER 2
32 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
35 * Limit the test area size to the maximum MMC HC erase group size. Note that
36 * the maximum SD allocation unit size is just 4MiB.
38 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
41 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
42 * @page: first page in the allocation
43 * @order: order of the number of pages allocated
45 struct mmc_test_pages
{
51 * struct mmc_test_mem - allocated memory.
52 * @arr: array of allocations
53 * @cnt: number of allocations
56 struct mmc_test_pages
*arr
;
61 * struct mmc_test_area - information for performance tests.
62 * @max_sz: test area size (in bytes)
63 * @dev_addr: address on card at which to do performance tests
64 * @max_tfr: maximum transfer size allowed by driver (in bytes)
65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
66 * @max_seg_sz: maximum segment size allowed by driver
67 * @blocks: number of (512 byte) blocks currently mapped by @sg
68 * @sg_len: length of currently mapped scatterlist @sg
69 * @mem: allocated memory
72 struct mmc_test_area
{
74 unsigned int dev_addr
;
76 unsigned int max_segs
;
77 unsigned int max_seg_sz
;
80 struct mmc_test_mem
*mem
;
81 struct scatterlist
*sg
;
85 * struct mmc_test_transfer_result - transfer results for performance tests.
86 * @link: double-linked list
87 * @count: amount of group of sectors to check
88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer
90 * @rate: calculated transfer rate
92 struct mmc_test_transfer_result
{
93 struct list_head link
;
101 * struct mmc_test_general_result - results for tests.
102 * @link: double-linked list
103 * @card: card under test
104 * @testcase: number of test case
105 * @result: result of test run
106 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
108 struct mmc_test_general_result
{
109 struct list_head link
;
110 struct mmc_card
*card
;
113 struct list_head tr_lst
;
117 * struct mmc_test_dbgfs_file - debugfs related file.
118 * @link: double-linked list
119 * @card: card under test
120 * @file: file created under debugfs
122 struct mmc_test_dbgfs_file
{
123 struct list_head link
;
124 struct mmc_card
*card
;
129 * struct mmc_test_card - test information.
130 * @card: card under test
131 * @scratch: transfer buffer
132 * @buffer: transfer buffer
133 * @highmem: buffer for highmem tests
134 * @area: information for performance tests
135 * @gr: pointer to results of current testcase
137 struct mmc_test_card
{
138 struct mmc_card
*card
;
140 u8 scratch
[BUFFER_SIZE
];
142 #ifdef CONFIG_HIGHMEM
143 struct page
*highmem
;
145 struct mmc_test_area area
;
146 struct mmc_test_general_result
*gr
;
149 /*******************************************************************/
150 /* General helper functions */
151 /*******************************************************************/
154 * Configure correct block size in card
156 static int mmc_test_set_blksize(struct mmc_test_card
*test
, unsigned size
)
158 return mmc_set_blocklen(test
->card
, size
);
162 * Fill in the mmc_request structure given a set of transfer parameters.
164 static void mmc_test_prepare_mrq(struct mmc_test_card
*test
,
165 struct mmc_request
*mrq
, struct scatterlist
*sg
, unsigned sg_len
,
166 unsigned dev_addr
, unsigned blocks
, unsigned blksz
, int write
)
168 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
|| !mrq
->stop
);
171 mrq
->cmd
->opcode
= write
?
172 MMC_WRITE_MULTIPLE_BLOCK
: MMC_READ_MULTIPLE_BLOCK
;
174 mrq
->cmd
->opcode
= write
?
175 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
178 mrq
->cmd
->arg
= dev_addr
;
179 if (!mmc_card_blockaddr(test
->card
))
182 mrq
->cmd
->flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
187 mrq
->stop
->opcode
= MMC_STOP_TRANSMISSION
;
189 mrq
->stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
192 mrq
->data
->blksz
= blksz
;
193 mrq
->data
->blocks
= blocks
;
194 mrq
->data
->flags
= write
? MMC_DATA_WRITE
: MMC_DATA_READ
;
196 mrq
->data
->sg_len
= sg_len
;
198 mmc_set_data_timeout(mrq
->data
, test
->card
);
201 static int mmc_test_busy(struct mmc_command
*cmd
)
203 return !(cmd
->resp
[0] & R1_READY_FOR_DATA
) ||
204 (R1_CURRENT_STATE(cmd
->resp
[0]) == 7);
208 * Wait for the card to finish the busy state
210 static int mmc_test_wait_busy(struct mmc_test_card
*test
)
213 struct mmc_command cmd
;
217 memset(&cmd
, 0, sizeof(struct mmc_command
));
219 cmd
.opcode
= MMC_SEND_STATUS
;
220 cmd
.arg
= test
->card
->rca
<< 16;
221 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
223 ret
= mmc_wait_for_cmd(test
->card
->host
, &cmd
, 0);
227 if (!busy
&& mmc_test_busy(&cmd
)) {
229 if (test
->card
->host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
)
230 printk(KERN_INFO
"%s: Warning: Host did not "
231 "wait for busy state to end.\n",
232 mmc_hostname(test
->card
->host
));
234 } while (mmc_test_busy(&cmd
));
240 * Transfer a single sector of kernel addressable data
242 static int mmc_test_buffer_transfer(struct mmc_test_card
*test
,
243 u8
*buffer
, unsigned addr
, unsigned blksz
, int write
)
247 struct mmc_request mrq
;
248 struct mmc_command cmd
;
249 struct mmc_command stop
;
250 struct mmc_data data
;
252 struct scatterlist sg
;
254 memset(&mrq
, 0, sizeof(struct mmc_request
));
255 memset(&cmd
, 0, sizeof(struct mmc_command
));
256 memset(&data
, 0, sizeof(struct mmc_data
));
257 memset(&stop
, 0, sizeof(struct mmc_command
));
263 sg_init_one(&sg
, buffer
, blksz
);
265 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, addr
, 1, blksz
, write
);
267 mmc_wait_for_req(test
->card
->host
, &mrq
);
274 ret
= mmc_test_wait_busy(test
);
281 static void mmc_test_free_mem(struct mmc_test_mem
*mem
)
286 __free_pages(mem
->arr
[mem
->cnt
].page
,
287 mem
->arr
[mem
->cnt
].order
);
293 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
294 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
295 * not exceed a maximum number of segments and try not to make segments much
296 * bigger than maximum segment size.
298 static struct mmc_test_mem
*mmc_test_alloc_mem(unsigned long min_sz
,
299 unsigned long max_sz
,
300 unsigned int max_segs
,
301 unsigned int max_seg_sz
)
303 unsigned long max_page_cnt
= DIV_ROUND_UP(max_sz
, PAGE_SIZE
);
304 unsigned long min_page_cnt
= DIV_ROUND_UP(min_sz
, PAGE_SIZE
);
305 unsigned long max_seg_page_cnt
= DIV_ROUND_UP(max_seg_sz
, PAGE_SIZE
);
306 unsigned long page_cnt
= 0;
307 unsigned long limit
= nr_free_buffer_pages() >> 4;
308 struct mmc_test_mem
*mem
;
310 if (max_page_cnt
> limit
)
311 max_page_cnt
= limit
;
312 if (min_page_cnt
> max_page_cnt
)
313 min_page_cnt
= max_page_cnt
;
315 if (max_seg_page_cnt
> max_page_cnt
)
316 max_seg_page_cnt
= max_page_cnt
;
318 if (max_segs
> max_page_cnt
)
319 max_segs
= max_page_cnt
;
321 mem
= kzalloc(sizeof(struct mmc_test_mem
), GFP_KERNEL
);
325 mem
->arr
= kzalloc(sizeof(struct mmc_test_pages
) * max_segs
,
330 while (max_page_cnt
) {
333 gfp_t flags
= GFP_KERNEL
| GFP_DMA
| __GFP_NOWARN
|
336 order
= get_order(max_seg_page_cnt
<< PAGE_SHIFT
);
338 page
= alloc_pages(flags
, order
);
344 if (page_cnt
< min_page_cnt
)
348 mem
->arr
[mem
->cnt
].page
= page
;
349 mem
->arr
[mem
->cnt
].order
= order
;
351 if (max_page_cnt
<= (1UL << order
))
353 max_page_cnt
-= 1UL << order
;
354 page_cnt
+= 1UL << order
;
355 if (mem
->cnt
>= max_segs
) {
356 if (page_cnt
< min_page_cnt
)
365 mmc_test_free_mem(mem
);
370 * Map memory into a scatterlist. Optionally allow the same memory to be
371 * mapped more than once.
373 static int mmc_test_map_sg(struct mmc_test_mem
*mem
, unsigned long sz
,
374 struct scatterlist
*sglist
, int repeat
,
375 unsigned int max_segs
, unsigned int max_seg_sz
,
376 unsigned int *sg_len
)
378 struct scatterlist
*sg
= NULL
;
381 sg_init_table(sglist
, max_segs
);
385 for (i
= 0; i
< mem
->cnt
; i
++) {
386 unsigned long len
= PAGE_SIZE
<< mem
->arr
[i
].order
;
390 if (len
> max_seg_sz
)
398 sg_set_page(sg
, mem
->arr
[i
].page
, len
, 0);
404 } while (sz
&& repeat
);
416 * Map memory into a scatterlist so that no pages are contiguous. Allow the
417 * same memory to be mapped more than once.
419 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem
*mem
,
421 struct scatterlist
*sglist
,
422 unsigned int max_segs
,
423 unsigned int max_seg_sz
,
424 unsigned int *sg_len
)
426 struct scatterlist
*sg
= NULL
;
427 unsigned int i
= mem
->cnt
, cnt
;
429 void *base
, *addr
, *last_addr
= NULL
;
431 sg_init_table(sglist
, max_segs
);
435 base
= page_address(mem
->arr
[--i
].page
);
436 cnt
= 1 << mem
->arr
[i
].order
;
438 addr
= base
+ PAGE_SIZE
* --cnt
;
439 if (last_addr
&& last_addr
+ PAGE_SIZE
== addr
)
443 if (len
> max_seg_sz
)
453 sg_set_page(sg
, virt_to_page(addr
), len
, 0);
468 * Calculate transfer rate in bytes per second.
470 static unsigned int mmc_test_rate(uint64_t bytes
, struct timespec
*ts
)
480 while (ns
> UINT_MAX
) {
488 do_div(bytes
, (uint32_t)ns
);
494 * Save transfer results for future usage
496 static void mmc_test_save_transfer_result(struct mmc_test_card
*test
,
497 unsigned int count
, unsigned int sectors
, struct timespec ts
,
500 struct mmc_test_transfer_result
*tr
;
505 tr
= kmalloc(sizeof(struct mmc_test_transfer_result
), GFP_KERNEL
);
510 tr
->sectors
= sectors
;
514 list_add_tail(&tr
->link
, &test
->gr
->tr_lst
);
518 * Print the transfer rate.
520 static void mmc_test_print_rate(struct mmc_test_card
*test
, uint64_t bytes
,
521 struct timespec
*ts1
, struct timespec
*ts2
)
523 unsigned int rate
, sectors
= bytes
>> 9;
526 ts
= timespec_sub(*ts2
, *ts1
);
528 rate
= mmc_test_rate(bytes
, &ts
);
530 printk(KERN_INFO
"%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
531 "seconds (%u kB/s, %u KiB/s)\n",
532 mmc_hostname(test
->card
->host
), sectors
, sectors
>> 1,
533 (sectors
& 1 ? ".5" : ""), (unsigned long)ts
.tv_sec
,
534 (unsigned long)ts
.tv_nsec
, rate
/ 1000, rate
/ 1024);
536 mmc_test_save_transfer_result(test
, 1, sectors
, ts
, rate
);
540 * Print the average transfer rate.
542 static void mmc_test_print_avg_rate(struct mmc_test_card
*test
, uint64_t bytes
,
543 unsigned int count
, struct timespec
*ts1
,
544 struct timespec
*ts2
)
546 unsigned int rate
, sectors
= bytes
>> 9;
547 uint64_t tot
= bytes
* count
;
550 ts
= timespec_sub(*ts2
, *ts1
);
552 rate
= mmc_test_rate(tot
, &ts
);
554 printk(KERN_INFO
"%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
555 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
556 mmc_hostname(test
->card
->host
), count
, sectors
, count
,
557 sectors
>> 1, (sectors
& 1 ? ".5" : ""),
558 (unsigned long)ts
.tv_sec
, (unsigned long)ts
.tv_nsec
,
559 rate
/ 1000, rate
/ 1024);
561 mmc_test_save_transfer_result(test
, count
, sectors
, ts
, rate
);
565 * Return the card size in sectors.
567 static unsigned int mmc_test_capacity(struct mmc_card
*card
)
569 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
))
570 return card
->ext_csd
.sectors
;
572 return card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9);
575 /*******************************************************************/
576 /* Test preparation and cleanup */
577 /*******************************************************************/
580 * Fill the first couple of sectors of the card with known data
581 * so that bad reads/writes can be detected
583 static int __mmc_test_prepare(struct mmc_test_card
*test
, int write
)
587 ret
= mmc_test_set_blksize(test
, 512);
592 memset(test
->buffer
, 0xDF, 512);
594 for (i
= 0;i
< 512;i
++)
598 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
599 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
607 static int mmc_test_prepare_write(struct mmc_test_card
*test
)
609 return __mmc_test_prepare(test
, 1);
612 static int mmc_test_prepare_read(struct mmc_test_card
*test
)
614 return __mmc_test_prepare(test
, 0);
617 static int mmc_test_cleanup(struct mmc_test_card
*test
)
621 ret
= mmc_test_set_blksize(test
, 512);
625 memset(test
->buffer
, 0, 512);
627 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
628 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
636 /*******************************************************************/
637 /* Test execution helpers */
638 /*******************************************************************/
641 * Modifies the mmc_request to perform the "short transfer" tests
643 static void mmc_test_prepare_broken_mrq(struct mmc_test_card
*test
,
644 struct mmc_request
*mrq
, int write
)
646 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
648 if (mrq
->data
->blocks
> 1) {
649 mrq
->cmd
->opcode
= write
?
650 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
653 mrq
->cmd
->opcode
= MMC_SEND_STATUS
;
654 mrq
->cmd
->arg
= test
->card
->rca
<< 16;
659 * Checks that a normal transfer didn't have any errors
661 static int mmc_test_check_result(struct mmc_test_card
*test
,
662 struct mmc_request
*mrq
)
666 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
670 if (!ret
&& mrq
->cmd
->error
)
671 ret
= mrq
->cmd
->error
;
672 if (!ret
&& mrq
->data
->error
)
673 ret
= mrq
->data
->error
;
674 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
675 ret
= mrq
->stop
->error
;
676 if (!ret
&& mrq
->data
->bytes_xfered
!=
677 mrq
->data
->blocks
* mrq
->data
->blksz
)
681 ret
= RESULT_UNSUP_HOST
;
687 * Checks that a "short transfer" behaved as expected
689 static int mmc_test_check_broken_result(struct mmc_test_card
*test
,
690 struct mmc_request
*mrq
)
694 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
698 if (!ret
&& mrq
->cmd
->error
)
699 ret
= mrq
->cmd
->error
;
700 if (!ret
&& mrq
->data
->error
== 0)
702 if (!ret
&& mrq
->data
->error
!= -ETIMEDOUT
)
703 ret
= mrq
->data
->error
;
704 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
705 ret
= mrq
->stop
->error
;
706 if (mrq
->data
->blocks
> 1) {
707 if (!ret
&& mrq
->data
->bytes_xfered
> mrq
->data
->blksz
)
710 if (!ret
&& mrq
->data
->bytes_xfered
> 0)
715 ret
= RESULT_UNSUP_HOST
;
721 * Tests a basic transfer with certain parameters
723 static int mmc_test_simple_transfer(struct mmc_test_card
*test
,
724 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
725 unsigned blocks
, unsigned blksz
, int write
)
727 struct mmc_request mrq
;
728 struct mmc_command cmd
;
729 struct mmc_command stop
;
730 struct mmc_data data
;
732 memset(&mrq
, 0, sizeof(struct mmc_request
));
733 memset(&cmd
, 0, sizeof(struct mmc_command
));
734 memset(&data
, 0, sizeof(struct mmc_data
));
735 memset(&stop
, 0, sizeof(struct mmc_command
));
741 mmc_test_prepare_mrq(test
, &mrq
, sg
, sg_len
, dev_addr
,
742 blocks
, blksz
, write
);
744 mmc_wait_for_req(test
->card
->host
, &mrq
);
746 mmc_test_wait_busy(test
);
748 return mmc_test_check_result(test
, &mrq
);
752 * Tests a transfer where the card will fail completely or partly
754 static int mmc_test_broken_transfer(struct mmc_test_card
*test
,
755 unsigned blocks
, unsigned blksz
, int write
)
757 struct mmc_request mrq
;
758 struct mmc_command cmd
;
759 struct mmc_command stop
;
760 struct mmc_data data
;
762 struct scatterlist sg
;
764 memset(&mrq
, 0, sizeof(struct mmc_request
));
765 memset(&cmd
, 0, sizeof(struct mmc_command
));
766 memset(&data
, 0, sizeof(struct mmc_data
));
767 memset(&stop
, 0, sizeof(struct mmc_command
));
773 sg_init_one(&sg
, test
->buffer
, blocks
* blksz
);
775 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, 0, blocks
, blksz
, write
);
776 mmc_test_prepare_broken_mrq(test
, &mrq
, write
);
778 mmc_wait_for_req(test
->card
->host
, &mrq
);
780 mmc_test_wait_busy(test
);
782 return mmc_test_check_broken_result(test
, &mrq
);
786 * Does a complete transfer test where data is also validated
788 * Note: mmc_test_prepare() must have been done before this call
790 static int mmc_test_transfer(struct mmc_test_card
*test
,
791 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
792 unsigned blocks
, unsigned blksz
, int write
)
798 for (i
= 0;i
< blocks
* blksz
;i
++)
799 test
->scratch
[i
] = i
;
801 memset(test
->scratch
, 0, BUFFER_SIZE
);
803 local_irq_save(flags
);
804 sg_copy_from_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
805 local_irq_restore(flags
);
807 ret
= mmc_test_set_blksize(test
, blksz
);
811 ret
= mmc_test_simple_transfer(test
, sg
, sg_len
, dev_addr
,
812 blocks
, blksz
, write
);
819 ret
= mmc_test_set_blksize(test
, 512);
823 sectors
= (blocks
* blksz
+ 511) / 512;
824 if ((sectors
* 512) == (blocks
* blksz
))
827 if ((sectors
* 512) > BUFFER_SIZE
)
830 memset(test
->buffer
, 0, sectors
* 512);
832 for (i
= 0;i
< sectors
;i
++) {
833 ret
= mmc_test_buffer_transfer(test
,
834 test
->buffer
+ i
* 512,
835 dev_addr
+ i
, 512, 0);
840 for (i
= 0;i
< blocks
* blksz
;i
++) {
841 if (test
->buffer
[i
] != (u8
)i
)
845 for (;i
< sectors
* 512;i
++) {
846 if (test
->buffer
[i
] != 0xDF)
850 local_irq_save(flags
);
851 sg_copy_to_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
852 local_irq_restore(flags
);
853 for (i
= 0;i
< blocks
* blksz
;i
++) {
854 if (test
->scratch
[i
] != (u8
)i
)
862 /*******************************************************************/
864 /*******************************************************************/
866 struct mmc_test_case
{
869 int (*prepare
)(struct mmc_test_card
*);
870 int (*run
)(struct mmc_test_card
*);
871 int (*cleanup
)(struct mmc_test_card
*);
874 static int mmc_test_basic_write(struct mmc_test_card
*test
)
877 struct scatterlist sg
;
879 ret
= mmc_test_set_blksize(test
, 512);
883 sg_init_one(&sg
, test
->buffer
, 512);
885 ret
= mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 1);
892 static int mmc_test_basic_read(struct mmc_test_card
*test
)
895 struct scatterlist sg
;
897 ret
= mmc_test_set_blksize(test
, 512);
901 sg_init_one(&sg
, test
->buffer
, 512);
903 ret
= mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 0);
910 static int mmc_test_verify_write(struct mmc_test_card
*test
)
913 struct scatterlist sg
;
915 sg_init_one(&sg
, test
->buffer
, 512);
917 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
924 static int mmc_test_verify_read(struct mmc_test_card
*test
)
927 struct scatterlist sg
;
929 sg_init_one(&sg
, test
->buffer
, 512);
931 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
938 static int mmc_test_multi_write(struct mmc_test_card
*test
)
942 struct scatterlist sg
;
944 if (test
->card
->host
->max_blk_count
== 1)
945 return RESULT_UNSUP_HOST
;
947 size
= PAGE_SIZE
* 2;
948 size
= min(size
, test
->card
->host
->max_req_size
);
949 size
= min(size
, test
->card
->host
->max_seg_size
);
950 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
953 return RESULT_UNSUP_HOST
;
955 sg_init_one(&sg
, test
->buffer
, size
);
957 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
964 static int mmc_test_multi_read(struct mmc_test_card
*test
)
968 struct scatterlist sg
;
970 if (test
->card
->host
->max_blk_count
== 1)
971 return RESULT_UNSUP_HOST
;
973 size
= PAGE_SIZE
* 2;
974 size
= min(size
, test
->card
->host
->max_req_size
);
975 size
= min(size
, test
->card
->host
->max_seg_size
);
976 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
979 return RESULT_UNSUP_HOST
;
981 sg_init_one(&sg
, test
->buffer
, size
);
983 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
990 static int mmc_test_pow2_write(struct mmc_test_card
*test
)
993 struct scatterlist sg
;
995 if (!test
->card
->csd
.write_partial
)
996 return RESULT_UNSUP_CARD
;
998 for (i
= 1; i
< 512;i
<<= 1) {
999 sg_init_one(&sg
, test
->buffer
, i
);
1000 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
1008 static int mmc_test_pow2_read(struct mmc_test_card
*test
)
1011 struct scatterlist sg
;
1013 if (!test
->card
->csd
.read_partial
)
1014 return RESULT_UNSUP_CARD
;
1016 for (i
= 1; i
< 512;i
<<= 1) {
1017 sg_init_one(&sg
, test
->buffer
, i
);
1018 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
1026 static int mmc_test_weird_write(struct mmc_test_card
*test
)
1029 struct scatterlist sg
;
1031 if (!test
->card
->csd
.write_partial
)
1032 return RESULT_UNSUP_CARD
;
1034 for (i
= 3; i
< 512;i
+= 7) {
1035 sg_init_one(&sg
, test
->buffer
, i
);
1036 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
1044 static int mmc_test_weird_read(struct mmc_test_card
*test
)
1047 struct scatterlist sg
;
1049 if (!test
->card
->csd
.read_partial
)
1050 return RESULT_UNSUP_CARD
;
1052 for (i
= 3; i
< 512;i
+= 7) {
1053 sg_init_one(&sg
, test
->buffer
, i
);
1054 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
1062 static int mmc_test_align_write(struct mmc_test_card
*test
)
1065 struct scatterlist sg
;
1067 for (i
= 1;i
< 4;i
++) {
1068 sg_init_one(&sg
, test
->buffer
+ i
, 512);
1069 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1077 static int mmc_test_align_read(struct mmc_test_card
*test
)
1080 struct scatterlist sg
;
1082 for (i
= 1;i
< 4;i
++) {
1083 sg_init_one(&sg
, test
->buffer
+ i
, 512);
1084 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1092 static int mmc_test_align_multi_write(struct mmc_test_card
*test
)
1096 struct scatterlist sg
;
1098 if (test
->card
->host
->max_blk_count
== 1)
1099 return RESULT_UNSUP_HOST
;
1101 size
= PAGE_SIZE
* 2;
1102 size
= min(size
, test
->card
->host
->max_req_size
);
1103 size
= min(size
, test
->card
->host
->max_seg_size
);
1104 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1107 return RESULT_UNSUP_HOST
;
1109 for (i
= 1;i
< 4;i
++) {
1110 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1111 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1119 static int mmc_test_align_multi_read(struct mmc_test_card
*test
)
1123 struct scatterlist sg
;
1125 if (test
->card
->host
->max_blk_count
== 1)
1126 return RESULT_UNSUP_HOST
;
1128 size
= PAGE_SIZE
* 2;
1129 size
= min(size
, test
->card
->host
->max_req_size
);
1130 size
= min(size
, test
->card
->host
->max_seg_size
);
1131 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1134 return RESULT_UNSUP_HOST
;
1136 for (i
= 1;i
< 4;i
++) {
1137 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1138 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1146 static int mmc_test_xfersize_write(struct mmc_test_card
*test
)
1150 ret
= mmc_test_set_blksize(test
, 512);
1154 ret
= mmc_test_broken_transfer(test
, 1, 512, 1);
1161 static int mmc_test_xfersize_read(struct mmc_test_card
*test
)
1165 ret
= mmc_test_set_blksize(test
, 512);
1169 ret
= mmc_test_broken_transfer(test
, 1, 512, 0);
1176 static int mmc_test_multi_xfersize_write(struct mmc_test_card
*test
)
1180 if (test
->card
->host
->max_blk_count
== 1)
1181 return RESULT_UNSUP_HOST
;
1183 ret
= mmc_test_set_blksize(test
, 512);
1187 ret
= mmc_test_broken_transfer(test
, 2, 512, 1);
1194 static int mmc_test_multi_xfersize_read(struct mmc_test_card
*test
)
1198 if (test
->card
->host
->max_blk_count
== 1)
1199 return RESULT_UNSUP_HOST
;
1201 ret
= mmc_test_set_blksize(test
, 512);
1205 ret
= mmc_test_broken_transfer(test
, 2, 512, 0);
1212 #ifdef CONFIG_HIGHMEM
1214 static int mmc_test_write_high(struct mmc_test_card
*test
)
1217 struct scatterlist sg
;
1219 sg_init_table(&sg
, 1);
1220 sg_set_page(&sg
, test
->highmem
, 512, 0);
1222 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1229 static int mmc_test_read_high(struct mmc_test_card
*test
)
1232 struct scatterlist sg
;
1234 sg_init_table(&sg
, 1);
1235 sg_set_page(&sg
, test
->highmem
, 512, 0);
1237 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1244 static int mmc_test_multi_write_high(struct mmc_test_card
*test
)
1248 struct scatterlist sg
;
1250 if (test
->card
->host
->max_blk_count
== 1)
1251 return RESULT_UNSUP_HOST
;
1253 size
= PAGE_SIZE
* 2;
1254 size
= min(size
, test
->card
->host
->max_req_size
);
1255 size
= min(size
, test
->card
->host
->max_seg_size
);
1256 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1259 return RESULT_UNSUP_HOST
;
1261 sg_init_table(&sg
, 1);
1262 sg_set_page(&sg
, test
->highmem
, size
, 0);
1264 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1271 static int mmc_test_multi_read_high(struct mmc_test_card
*test
)
1275 struct scatterlist sg
;
1277 if (test
->card
->host
->max_blk_count
== 1)
1278 return RESULT_UNSUP_HOST
;
1280 size
= PAGE_SIZE
* 2;
1281 size
= min(size
, test
->card
->host
->max_req_size
);
1282 size
= min(size
, test
->card
->host
->max_seg_size
);
1283 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1286 return RESULT_UNSUP_HOST
;
1288 sg_init_table(&sg
, 1);
1289 sg_set_page(&sg
, test
->highmem
, size
, 0);
1291 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1300 static int mmc_test_no_highmem(struct mmc_test_card
*test
)
1302 printk(KERN_INFO
"%s: Highmem not configured - test skipped\n",
1303 mmc_hostname(test
->card
->host
));
1307 #endif /* CONFIG_HIGHMEM */
1310 * Map sz bytes so that it can be transferred.
1312 static int mmc_test_area_map(struct mmc_test_card
*test
, unsigned long sz
,
1315 struct mmc_test_area
*t
= &test
->area
;
1318 t
->blocks
= sz
>> 9;
1321 err
= mmc_test_map_sg_max_scatter(t
->mem
, sz
, t
->sg
,
1322 t
->max_segs
, t
->max_seg_sz
,
1325 err
= mmc_test_map_sg(t
->mem
, sz
, t
->sg
, 1, t
->max_segs
,
1326 t
->max_seg_sz
, &t
->sg_len
);
1329 printk(KERN_INFO
"%s: Failed to map sg list\n",
1330 mmc_hostname(test
->card
->host
));
1335 * Transfer bytes mapped by mmc_test_area_map().
1337 static int mmc_test_area_transfer(struct mmc_test_card
*test
,
1338 unsigned int dev_addr
, int write
)
1340 struct mmc_test_area
*t
= &test
->area
;
1342 return mmc_test_simple_transfer(test
, t
->sg
, t
->sg_len
, dev_addr
,
1343 t
->blocks
, 512, write
);
1347 * Map and transfer bytes.
1349 static int mmc_test_area_io(struct mmc_test_card
*test
, unsigned long sz
,
1350 unsigned int dev_addr
, int write
, int max_scatter
,
1353 struct timespec ts1
, ts2
;
1357 * In the case of a maximally scattered transfer, the maximum transfer
1358 * size is further limited by using PAGE_SIZE segments.
1361 struct mmc_test_area
*t
= &test
->area
;
1362 unsigned long max_tfr
;
1364 if (t
->max_seg_sz
>= PAGE_SIZE
)
1365 max_tfr
= t
->max_segs
* PAGE_SIZE
;
1367 max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1372 ret
= mmc_test_area_map(test
, sz
, max_scatter
);
1377 getnstimeofday(&ts1
);
1379 ret
= mmc_test_area_transfer(test
, dev_addr
, write
);
1384 getnstimeofday(&ts2
);
1387 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1393 * Write the test area entirely.
1395 static int mmc_test_area_fill(struct mmc_test_card
*test
)
1397 return mmc_test_area_io(test
, test
->area
.max_tfr
, test
->area
.dev_addr
,
1402 * Erase the test area entirely.
1404 static int mmc_test_area_erase(struct mmc_test_card
*test
)
1406 struct mmc_test_area
*t
= &test
->area
;
1408 if (!mmc_can_erase(test
->card
))
1411 return mmc_erase(test
->card
, t
->dev_addr
, test
->area
.max_sz
>> 9,
1416 * Cleanup struct mmc_test_area.
1418 static int mmc_test_area_cleanup(struct mmc_test_card
*test
)
1420 struct mmc_test_area
*t
= &test
->area
;
1423 mmc_test_free_mem(t
->mem
);
1429 * Initialize an area for testing large transfers. The size of the area is the
1430 * preferred erase size which is a good size for optimal transfer speed. Note
1431 * that is typically 4MiB for modern cards. The test area is set to the middle
1432 * of the card because cards may have different charateristics at the front
1433 * (for FAT file system optimization). Optionally, the area is erased (if the
1434 * card supports it) which may improve write performance. Optionally, the area
1435 * is filled with data for subsequent read tests.
1437 static int mmc_test_area_init(struct mmc_test_card
*test
, int erase
, int fill
)
1439 struct mmc_test_area
*t
= &test
->area
;
1440 unsigned long min_sz
= 64 * 1024;
1443 ret
= mmc_test_set_blksize(test
, 512);
1447 if (test
->card
->pref_erase
> TEST_AREA_MAX_SIZE
>> 9)
1448 t
->max_sz
= TEST_AREA_MAX_SIZE
;
1450 t
->max_sz
= (unsigned long)test
->card
->pref_erase
<< 9;
1452 t
->max_segs
= test
->card
->host
->max_segs
;
1453 t
->max_seg_sz
= test
->card
->host
->max_seg_size
;
1455 t
->max_tfr
= t
->max_sz
;
1456 if (t
->max_tfr
>> 9 > test
->card
->host
->max_blk_count
)
1457 t
->max_tfr
= test
->card
->host
->max_blk_count
<< 9;
1458 if (t
->max_tfr
> test
->card
->host
->max_req_size
)
1459 t
->max_tfr
= test
->card
->host
->max_req_size
;
1460 if (t
->max_tfr
/ t
->max_seg_sz
> t
->max_segs
)
1461 t
->max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1464 * Try to allocate enough memory for a max. sized transfer. Less is OK
1465 * because the same memory can be mapped into the scatterlist more than
1466 * once. Also, take into account the limits imposed on scatterlist
1467 * segments by the host driver.
1469 t
->mem
= mmc_test_alloc_mem(min_sz
, t
->max_tfr
, t
->max_segs
,
1474 t
->sg
= kmalloc(sizeof(struct scatterlist
) * t
->max_segs
, GFP_KERNEL
);
1480 t
->dev_addr
= mmc_test_capacity(test
->card
) / 2;
1481 t
->dev_addr
-= t
->dev_addr
% (t
->max_sz
>> 9);
1484 ret
= mmc_test_area_erase(test
);
1490 ret
= mmc_test_area_fill(test
);
1498 mmc_test_area_cleanup(test
);
1503 * Prepare for large transfers. Do not erase the test area.
1505 static int mmc_test_area_prepare(struct mmc_test_card
*test
)
1507 return mmc_test_area_init(test
, 0, 0);
1511 * Prepare for large transfers. Do erase the test area.
1513 static int mmc_test_area_prepare_erase(struct mmc_test_card
*test
)
1515 return mmc_test_area_init(test
, 1, 0);
1519 * Prepare for large transfers. Erase and fill the test area.
1521 static int mmc_test_area_prepare_fill(struct mmc_test_card
*test
)
1523 return mmc_test_area_init(test
, 1, 1);
1527 * Test best-case performance. Best-case performance is expected from
1528 * a single large transfer.
1530 * An additional option (max_scatter) allows the measurement of the same
1531 * transfer but with no contiguous pages in the scatter list. This tests
1532 * the efficiency of DMA to handle scattered pages.
1534 static int mmc_test_best_performance(struct mmc_test_card
*test
, int write
,
1537 return mmc_test_area_io(test
, test
->area
.max_tfr
, test
->area
.dev_addr
,
1538 write
, max_scatter
, 1);
1542 * Best-case read performance.
1544 static int mmc_test_best_read_performance(struct mmc_test_card
*test
)
1546 return mmc_test_best_performance(test
, 0, 0);
1550 * Best-case write performance.
1552 static int mmc_test_best_write_performance(struct mmc_test_card
*test
)
1554 return mmc_test_best_performance(test
, 1, 0);
1558 * Best-case read performance into scattered pages.
1560 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card
*test
)
1562 return mmc_test_best_performance(test
, 0, 1);
1566 * Best-case write performance from scattered pages.
1568 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card
*test
)
1570 return mmc_test_best_performance(test
, 1, 1);
1574 * Single read performance by transfer size.
1576 static int mmc_test_profile_read_perf(struct mmc_test_card
*test
)
1579 unsigned int dev_addr
;
1582 for (sz
= 512; sz
< test
->area
.max_tfr
; sz
<<= 1) {
1583 dev_addr
= test
->area
.dev_addr
+ (sz
>> 9);
1584 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1588 sz
= test
->area
.max_tfr
;
1589 dev_addr
= test
->area
.dev_addr
;
1590 return mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1594 * Single write performance by transfer size.
1596 static int mmc_test_profile_write_perf(struct mmc_test_card
*test
)
1599 unsigned int dev_addr
;
1602 ret
= mmc_test_area_erase(test
);
1605 for (sz
= 512; sz
< test
->area
.max_tfr
; sz
<<= 1) {
1606 dev_addr
= test
->area
.dev_addr
+ (sz
>> 9);
1607 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1611 ret
= mmc_test_area_erase(test
);
1614 sz
= test
->area
.max_tfr
;
1615 dev_addr
= test
->area
.dev_addr
;
1616 return mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1620 * Single trim performance by transfer size.
1622 static int mmc_test_profile_trim_perf(struct mmc_test_card
*test
)
1625 unsigned int dev_addr
;
1626 struct timespec ts1
, ts2
;
1629 if (!mmc_can_trim(test
->card
))
1630 return RESULT_UNSUP_CARD
;
1632 if (!mmc_can_erase(test
->card
))
1633 return RESULT_UNSUP_HOST
;
1635 for (sz
= 512; sz
< test
->area
.max_sz
; sz
<<= 1) {
1636 dev_addr
= test
->area
.dev_addr
+ (sz
>> 9);
1637 getnstimeofday(&ts1
);
1638 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1641 getnstimeofday(&ts2
);
1642 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1644 dev_addr
= test
->area
.dev_addr
;
1645 getnstimeofday(&ts1
);
1646 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1649 getnstimeofday(&ts2
);
1650 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1654 static int mmc_test_seq_read_perf(struct mmc_test_card
*test
, unsigned long sz
)
1656 unsigned int dev_addr
, i
, cnt
;
1657 struct timespec ts1
, ts2
;
1660 cnt
= test
->area
.max_sz
/ sz
;
1661 dev_addr
= test
->area
.dev_addr
;
1662 getnstimeofday(&ts1
);
1663 for (i
= 0; i
< cnt
; i
++) {
1664 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 0);
1667 dev_addr
+= (sz
>> 9);
1669 getnstimeofday(&ts2
);
1670 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1675 * Consecutive read performance by transfer size.
1677 static int mmc_test_profile_seq_read_perf(struct mmc_test_card
*test
)
1682 for (sz
= 512; sz
< test
->area
.max_tfr
; sz
<<= 1) {
1683 ret
= mmc_test_seq_read_perf(test
, sz
);
1687 sz
= test
->area
.max_tfr
;
1688 return mmc_test_seq_read_perf(test
, sz
);
1691 static int mmc_test_seq_write_perf(struct mmc_test_card
*test
, unsigned long sz
)
1693 unsigned int dev_addr
, i
, cnt
;
1694 struct timespec ts1
, ts2
;
1697 ret
= mmc_test_area_erase(test
);
1700 cnt
= test
->area
.max_sz
/ sz
;
1701 dev_addr
= test
->area
.dev_addr
;
1702 getnstimeofday(&ts1
);
1703 for (i
= 0; i
< cnt
; i
++) {
1704 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 0);
1707 dev_addr
+= (sz
>> 9);
1709 getnstimeofday(&ts2
);
1710 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1715 * Consecutive write performance by transfer size.
1717 static int mmc_test_profile_seq_write_perf(struct mmc_test_card
*test
)
1722 for (sz
= 512; sz
< test
->area
.max_tfr
; sz
<<= 1) {
1723 ret
= mmc_test_seq_write_perf(test
, sz
);
1727 sz
= test
->area
.max_tfr
;
1728 return mmc_test_seq_write_perf(test
, sz
);
1732 * Consecutive trim performance by transfer size.
1734 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card
*test
)
1737 unsigned int dev_addr
, i
, cnt
;
1738 struct timespec ts1
, ts2
;
1741 if (!mmc_can_trim(test
->card
))
1742 return RESULT_UNSUP_CARD
;
1744 if (!mmc_can_erase(test
->card
))
1745 return RESULT_UNSUP_HOST
;
1747 for (sz
= 512; sz
<= test
->area
.max_sz
; sz
<<= 1) {
1748 ret
= mmc_test_area_erase(test
);
1751 ret
= mmc_test_area_fill(test
);
1754 cnt
= test
->area
.max_sz
/ sz
;
1755 dev_addr
= test
->area
.dev_addr
;
1756 getnstimeofday(&ts1
);
1757 for (i
= 0; i
< cnt
; i
++) {
1758 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9,
1762 dev_addr
+= (sz
>> 9);
1764 getnstimeofday(&ts2
);
1765 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1770 static const struct mmc_test_case mmc_test_cases
[] = {
1772 .name
= "Basic write (no data verification)",
1773 .run
= mmc_test_basic_write
,
1777 .name
= "Basic read (no data verification)",
1778 .run
= mmc_test_basic_read
,
1782 .name
= "Basic write (with data verification)",
1783 .prepare
= mmc_test_prepare_write
,
1784 .run
= mmc_test_verify_write
,
1785 .cleanup
= mmc_test_cleanup
,
1789 .name
= "Basic read (with data verification)",
1790 .prepare
= mmc_test_prepare_read
,
1791 .run
= mmc_test_verify_read
,
1792 .cleanup
= mmc_test_cleanup
,
1796 .name
= "Multi-block write",
1797 .prepare
= mmc_test_prepare_write
,
1798 .run
= mmc_test_multi_write
,
1799 .cleanup
= mmc_test_cleanup
,
1803 .name
= "Multi-block read",
1804 .prepare
= mmc_test_prepare_read
,
1805 .run
= mmc_test_multi_read
,
1806 .cleanup
= mmc_test_cleanup
,
1810 .name
= "Power of two block writes",
1811 .prepare
= mmc_test_prepare_write
,
1812 .run
= mmc_test_pow2_write
,
1813 .cleanup
= mmc_test_cleanup
,
1817 .name
= "Power of two block reads",
1818 .prepare
= mmc_test_prepare_read
,
1819 .run
= mmc_test_pow2_read
,
1820 .cleanup
= mmc_test_cleanup
,
1824 .name
= "Weird sized block writes",
1825 .prepare
= mmc_test_prepare_write
,
1826 .run
= mmc_test_weird_write
,
1827 .cleanup
= mmc_test_cleanup
,
1831 .name
= "Weird sized block reads",
1832 .prepare
= mmc_test_prepare_read
,
1833 .run
= mmc_test_weird_read
,
1834 .cleanup
= mmc_test_cleanup
,
1838 .name
= "Badly aligned write",
1839 .prepare
= mmc_test_prepare_write
,
1840 .run
= mmc_test_align_write
,
1841 .cleanup
= mmc_test_cleanup
,
1845 .name
= "Badly aligned read",
1846 .prepare
= mmc_test_prepare_read
,
1847 .run
= mmc_test_align_read
,
1848 .cleanup
= mmc_test_cleanup
,
1852 .name
= "Badly aligned multi-block write",
1853 .prepare
= mmc_test_prepare_write
,
1854 .run
= mmc_test_align_multi_write
,
1855 .cleanup
= mmc_test_cleanup
,
1859 .name
= "Badly aligned multi-block read",
1860 .prepare
= mmc_test_prepare_read
,
1861 .run
= mmc_test_align_multi_read
,
1862 .cleanup
= mmc_test_cleanup
,
1866 .name
= "Correct xfer_size at write (start failure)",
1867 .run
= mmc_test_xfersize_write
,
1871 .name
= "Correct xfer_size at read (start failure)",
1872 .run
= mmc_test_xfersize_read
,
1876 .name
= "Correct xfer_size at write (midway failure)",
1877 .run
= mmc_test_multi_xfersize_write
,
1881 .name
= "Correct xfer_size at read (midway failure)",
1882 .run
= mmc_test_multi_xfersize_read
,
1885 #ifdef CONFIG_HIGHMEM
1888 .name
= "Highmem write",
1889 .prepare
= mmc_test_prepare_write
,
1890 .run
= mmc_test_write_high
,
1891 .cleanup
= mmc_test_cleanup
,
1895 .name
= "Highmem read",
1896 .prepare
= mmc_test_prepare_read
,
1897 .run
= mmc_test_read_high
,
1898 .cleanup
= mmc_test_cleanup
,
1902 .name
= "Multi-block highmem write",
1903 .prepare
= mmc_test_prepare_write
,
1904 .run
= mmc_test_multi_write_high
,
1905 .cleanup
= mmc_test_cleanup
,
1909 .name
= "Multi-block highmem read",
1910 .prepare
= mmc_test_prepare_read
,
1911 .run
= mmc_test_multi_read_high
,
1912 .cleanup
= mmc_test_cleanup
,
1918 .name
= "Highmem write",
1919 .run
= mmc_test_no_highmem
,
1923 .name
= "Highmem read",
1924 .run
= mmc_test_no_highmem
,
1928 .name
= "Multi-block highmem write",
1929 .run
= mmc_test_no_highmem
,
1933 .name
= "Multi-block highmem read",
1934 .run
= mmc_test_no_highmem
,
1937 #endif /* CONFIG_HIGHMEM */
1940 .name
= "Best-case read performance",
1941 .prepare
= mmc_test_area_prepare_fill
,
1942 .run
= mmc_test_best_read_performance
,
1943 .cleanup
= mmc_test_area_cleanup
,
1947 .name
= "Best-case write performance",
1948 .prepare
= mmc_test_area_prepare_erase
,
1949 .run
= mmc_test_best_write_performance
,
1950 .cleanup
= mmc_test_area_cleanup
,
1954 .name
= "Best-case read performance into scattered pages",
1955 .prepare
= mmc_test_area_prepare_fill
,
1956 .run
= mmc_test_best_read_perf_max_scatter
,
1957 .cleanup
= mmc_test_area_cleanup
,
1961 .name
= "Best-case write performance from scattered pages",
1962 .prepare
= mmc_test_area_prepare_erase
,
1963 .run
= mmc_test_best_write_perf_max_scatter
,
1964 .cleanup
= mmc_test_area_cleanup
,
1968 .name
= "Single read performance by transfer size",
1969 .prepare
= mmc_test_area_prepare_fill
,
1970 .run
= mmc_test_profile_read_perf
,
1971 .cleanup
= mmc_test_area_cleanup
,
1975 .name
= "Single write performance by transfer size",
1976 .prepare
= mmc_test_area_prepare
,
1977 .run
= mmc_test_profile_write_perf
,
1978 .cleanup
= mmc_test_area_cleanup
,
1982 .name
= "Single trim performance by transfer size",
1983 .prepare
= mmc_test_area_prepare_fill
,
1984 .run
= mmc_test_profile_trim_perf
,
1985 .cleanup
= mmc_test_area_cleanup
,
1989 .name
= "Consecutive read performance by transfer size",
1990 .prepare
= mmc_test_area_prepare_fill
,
1991 .run
= mmc_test_profile_seq_read_perf
,
1992 .cleanup
= mmc_test_area_cleanup
,
1996 .name
= "Consecutive write performance by transfer size",
1997 .prepare
= mmc_test_area_prepare
,
1998 .run
= mmc_test_profile_seq_write_perf
,
1999 .cleanup
= mmc_test_area_cleanup
,
2003 .name
= "Consecutive trim performance by transfer size",
2004 .prepare
= mmc_test_area_prepare
,
2005 .run
= mmc_test_profile_seq_trim_perf
,
2006 .cleanup
= mmc_test_area_cleanup
,
2011 static DEFINE_MUTEX(mmc_test_lock
);
2013 static LIST_HEAD(mmc_test_result
);
2015 static void mmc_test_run(struct mmc_test_card
*test
, int testcase
)
2019 printk(KERN_INFO
"%s: Starting tests of card %s...\n",
2020 mmc_hostname(test
->card
->host
), mmc_card_id(test
->card
));
2022 mmc_claim_host(test
->card
->host
);
2024 for (i
= 0;i
< ARRAY_SIZE(mmc_test_cases
);i
++) {
2025 struct mmc_test_general_result
*gr
;
2027 if (testcase
&& ((i
+ 1) != testcase
))
2030 printk(KERN_INFO
"%s: Test case %d. %s...\n",
2031 mmc_hostname(test
->card
->host
), i
+ 1,
2032 mmc_test_cases
[i
].name
);
2034 if (mmc_test_cases
[i
].prepare
) {
2035 ret
= mmc_test_cases
[i
].prepare(test
);
2037 printk(KERN_INFO
"%s: Result: Prepare "
2038 "stage failed! (%d)\n",
2039 mmc_hostname(test
->card
->host
),
2045 gr
= kzalloc(sizeof(struct mmc_test_general_result
),
2048 INIT_LIST_HEAD(&gr
->tr_lst
);
2050 /* Assign data what we know already */
2051 gr
->card
= test
->card
;
2054 /* Append container to global one */
2055 list_add_tail(&gr
->link
, &mmc_test_result
);
2058 * Save the pointer to created container in our private
2064 ret
= mmc_test_cases
[i
].run(test
);
2067 printk(KERN_INFO
"%s: Result: OK\n",
2068 mmc_hostname(test
->card
->host
));
2071 printk(KERN_INFO
"%s: Result: FAILED\n",
2072 mmc_hostname(test
->card
->host
));
2074 case RESULT_UNSUP_HOST
:
2075 printk(KERN_INFO
"%s: Result: UNSUPPORTED "
2077 mmc_hostname(test
->card
->host
));
2079 case RESULT_UNSUP_CARD
:
2080 printk(KERN_INFO
"%s: Result: UNSUPPORTED "
2082 mmc_hostname(test
->card
->host
));
2085 printk(KERN_INFO
"%s: Result: ERROR (%d)\n",
2086 mmc_hostname(test
->card
->host
), ret
);
2089 /* Save the result */
2093 if (mmc_test_cases
[i
].cleanup
) {
2094 ret
= mmc_test_cases
[i
].cleanup(test
);
2096 printk(KERN_INFO
"%s: Warning: Cleanup "
2097 "stage failed! (%d)\n",
2098 mmc_hostname(test
->card
->host
),
2104 mmc_release_host(test
->card
->host
);
2106 printk(KERN_INFO
"%s: Tests completed.\n",
2107 mmc_hostname(test
->card
->host
));
2110 static void mmc_test_free_result(struct mmc_card
*card
)
2112 struct mmc_test_general_result
*gr
, *grs
;
2114 mutex_lock(&mmc_test_lock
);
2116 list_for_each_entry_safe(gr
, grs
, &mmc_test_result
, link
) {
2117 struct mmc_test_transfer_result
*tr
, *trs
;
2119 if (card
&& gr
->card
!= card
)
2122 list_for_each_entry_safe(tr
, trs
, &gr
->tr_lst
, link
) {
2123 list_del(&tr
->link
);
2127 list_del(&gr
->link
);
2131 mutex_unlock(&mmc_test_lock
);
2134 static LIST_HEAD(mmc_test_file_test
);
2136 static int mtf_test_show(struct seq_file
*sf
, void *data
)
2138 struct mmc_card
*card
= (struct mmc_card
*)sf
->private;
2139 struct mmc_test_general_result
*gr
;
2141 mutex_lock(&mmc_test_lock
);
2143 list_for_each_entry(gr
, &mmc_test_result
, link
) {
2144 struct mmc_test_transfer_result
*tr
;
2146 if (gr
->card
!= card
)
2149 seq_printf(sf
, "Test %d: %d\n", gr
->testcase
+ 1, gr
->result
);
2151 list_for_each_entry(tr
, &gr
->tr_lst
, link
) {
2152 seq_printf(sf
, "%u %d %lu.%09lu %u\n",
2153 tr
->count
, tr
->sectors
,
2154 (unsigned long)tr
->ts
.tv_sec
,
2155 (unsigned long)tr
->ts
.tv_nsec
,
2160 mutex_unlock(&mmc_test_lock
);
2165 static int mtf_test_open(struct inode
*inode
, struct file
*file
)
2167 return single_open(file
, mtf_test_show
, inode
->i_private
);
2170 static ssize_t
mtf_test_write(struct file
*file
, const char __user
*buf
,
2171 size_t count
, loff_t
*pos
)
2173 struct seq_file
*sf
= (struct seq_file
*)file
->private_data
;
2174 struct mmc_card
*card
= (struct mmc_card
*)sf
->private;
2175 struct mmc_test_card
*test
;
2179 if (count
>= sizeof(lbuf
))
2182 if (copy_from_user(lbuf
, buf
, count
))
2186 if (strict_strtol(lbuf
, 10, &testcase
))
2189 test
= kzalloc(sizeof(struct mmc_test_card
), GFP_KERNEL
);
2194 * Remove all test cases associated with given card. Thus we have only
2195 * actual data of the last run.
2197 mmc_test_free_result(card
);
2201 test
->buffer
= kzalloc(BUFFER_SIZE
, GFP_KERNEL
);
2202 #ifdef CONFIG_HIGHMEM
2203 test
->highmem
= alloc_pages(GFP_KERNEL
| __GFP_HIGHMEM
, BUFFER_ORDER
);
2206 #ifdef CONFIG_HIGHMEM
2207 if (test
->buffer
&& test
->highmem
) {
2211 mutex_lock(&mmc_test_lock
);
2212 mmc_test_run(test
, testcase
);
2213 mutex_unlock(&mmc_test_lock
);
2216 #ifdef CONFIG_HIGHMEM
2217 __free_pages(test
->highmem
, BUFFER_ORDER
);
2219 kfree(test
->buffer
);
2225 static const struct file_operations mmc_test_fops_test
= {
2226 .open
= mtf_test_open
,
2228 .write
= mtf_test_write
,
2229 .llseek
= seq_lseek
,
2230 .release
= single_release
,
2233 static void mmc_test_free_file_test(struct mmc_card
*card
)
2235 struct mmc_test_dbgfs_file
*df
, *dfs
;
2237 mutex_lock(&mmc_test_lock
);
2239 list_for_each_entry_safe(df
, dfs
, &mmc_test_file_test
, link
) {
2240 if (card
&& df
->card
!= card
)
2242 debugfs_remove(df
->file
);
2243 list_del(&df
->link
);
2247 mutex_unlock(&mmc_test_lock
);
2250 static int mmc_test_register_file_test(struct mmc_card
*card
)
2252 struct dentry
*file
= NULL
;
2253 struct mmc_test_dbgfs_file
*df
;
2256 mutex_lock(&mmc_test_lock
);
2258 if (card
->debugfs_root
)
2259 file
= debugfs_create_file("test", S_IWUSR
| S_IRUGO
,
2260 card
->debugfs_root
, card
, &mmc_test_fops_test
);
2262 if (IS_ERR_OR_NULL(file
)) {
2264 "Can't create file. Perhaps debugfs is disabled.\n");
2269 df
= kmalloc(sizeof(struct mmc_test_dbgfs_file
), GFP_KERNEL
);
2271 debugfs_remove(file
);
2273 "Can't allocate memory for internal usage.\n");
2281 list_add(&df
->link
, &mmc_test_file_test
);
2284 mutex_unlock(&mmc_test_lock
);
2289 static int mmc_test_probe(struct mmc_card
*card
)
2293 if (!mmc_card_mmc(card
) && !mmc_card_sd(card
))
2296 ret
= mmc_test_register_file_test(card
);
2300 dev_info(&card
->dev
, "Card claimed for testing.\n");
2305 static void mmc_test_remove(struct mmc_card
*card
)
2307 mmc_test_free_result(card
);
2308 mmc_test_free_file_test(card
);
2311 static struct mmc_driver mmc_driver
= {
2315 .probe
= mmc_test_probe
,
2316 .remove
= mmc_test_remove
,
2319 static int __init
mmc_test_init(void)
2321 return mmc_register_driver(&mmc_driver
);
2324 static void __exit
mmc_test_exit(void)
2326 /* Clear stalled data if card is still plugged */
2327 mmc_test_free_result(NULL
);
2328 mmc_test_free_file_test(NULL
);
2330 mmc_unregister_driver(&mmc_driver
);
2333 module_init(mmc_test_init
);
2334 module_exit(mmc_test_exit
);
2336 MODULE_LICENSE("GPL");
2337 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2338 MODULE_AUTHOR("Pierre Ossman");