2 * linux/drivers/mmc/card/mmc_test.c
4 * Copyright 2007-2008 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
23 #define RESULT_UNSUP_HOST 2
24 #define RESULT_UNSUP_CARD 3
26 #define BUFFER_ORDER 2
27 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
30 * Limit the test area size to the maximum MMC HC erase group size. Note that
31 * the maximum SD allocation unit size is just 4MiB.
33 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
36 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
37 * @page: first page in the allocation
38 * @order: order of the number of pages allocated
40 struct mmc_test_pages
{
46 * struct mmc_test_mem - allocated memory.
47 * @arr: array of allocations
48 * @cnt: number of allocations
51 struct mmc_test_pages
*arr
;
56 * struct mmc_test_area - information for performance tests.
57 * @max_sz: test area size (in bytes)
58 * @dev_addr: address on card at which to do performance tests
59 * @max_segs: maximum segments in scatterlist @sg
60 * @blocks: number of (512 byte) blocks currently mapped by @sg
61 * @sg_len: length of currently mapped scatterlist @sg
62 * @mem: allocated memory
65 struct mmc_test_area
{
67 unsigned int dev_addr
;
68 unsigned int max_segs
;
71 struct mmc_test_mem
*mem
;
72 struct scatterlist
*sg
;
76 * struct mmc_test_card - test information.
77 * @card: card under test
78 * @scratch: transfer buffer
79 * @buffer: transfer buffer
80 * @highmem: buffer for highmem tests
81 * @area: information for performance tests
83 struct mmc_test_card
{
84 struct mmc_card
*card
;
86 u8 scratch
[BUFFER_SIZE
];
91 struct mmc_test_area area
;
94 /*******************************************************************/
95 /* General helper functions */
96 /*******************************************************************/
99 * Configure correct block size in card
101 static int mmc_test_set_blksize(struct mmc_test_card
*test
, unsigned size
)
103 struct mmc_command cmd
;
106 cmd
.opcode
= MMC_SET_BLOCKLEN
;
108 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
109 ret
= mmc_wait_for_cmd(test
->card
->host
, &cmd
, 0);
117 * Fill in the mmc_request structure given a set of transfer parameters.
119 static void mmc_test_prepare_mrq(struct mmc_test_card
*test
,
120 struct mmc_request
*mrq
, struct scatterlist
*sg
, unsigned sg_len
,
121 unsigned dev_addr
, unsigned blocks
, unsigned blksz
, int write
)
123 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
|| !mrq
->stop
);
126 mrq
->cmd
->opcode
= write
?
127 MMC_WRITE_MULTIPLE_BLOCK
: MMC_READ_MULTIPLE_BLOCK
;
129 mrq
->cmd
->opcode
= write
?
130 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
133 mrq
->cmd
->arg
= dev_addr
;
134 if (!mmc_card_blockaddr(test
->card
))
137 mrq
->cmd
->flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
142 mrq
->stop
->opcode
= MMC_STOP_TRANSMISSION
;
144 mrq
->stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
147 mrq
->data
->blksz
= blksz
;
148 mrq
->data
->blocks
= blocks
;
149 mrq
->data
->flags
= write
? MMC_DATA_WRITE
: MMC_DATA_READ
;
151 mrq
->data
->sg_len
= sg_len
;
153 mmc_set_data_timeout(mrq
->data
, test
->card
);
156 static int mmc_test_busy(struct mmc_command
*cmd
)
158 return !(cmd
->resp
[0] & R1_READY_FOR_DATA
) ||
159 (R1_CURRENT_STATE(cmd
->resp
[0]) == 7);
163 * Wait for the card to finish the busy state
165 static int mmc_test_wait_busy(struct mmc_test_card
*test
)
168 struct mmc_command cmd
;
172 memset(&cmd
, 0, sizeof(struct mmc_command
));
174 cmd
.opcode
= MMC_SEND_STATUS
;
175 cmd
.arg
= test
->card
->rca
<< 16;
176 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
178 ret
= mmc_wait_for_cmd(test
->card
->host
, &cmd
, 0);
182 if (!busy
&& mmc_test_busy(&cmd
)) {
184 printk(KERN_INFO
"%s: Warning: Host did not "
185 "wait for busy state to end.\n",
186 mmc_hostname(test
->card
->host
));
188 } while (mmc_test_busy(&cmd
));
194 * Transfer a single sector of kernel addressable data
196 static int mmc_test_buffer_transfer(struct mmc_test_card
*test
,
197 u8
*buffer
, unsigned addr
, unsigned blksz
, int write
)
201 struct mmc_request mrq
;
202 struct mmc_command cmd
;
203 struct mmc_command stop
;
204 struct mmc_data data
;
206 struct scatterlist sg
;
208 memset(&mrq
, 0, sizeof(struct mmc_request
));
209 memset(&cmd
, 0, sizeof(struct mmc_command
));
210 memset(&data
, 0, sizeof(struct mmc_data
));
211 memset(&stop
, 0, sizeof(struct mmc_command
));
217 sg_init_one(&sg
, buffer
, blksz
);
219 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, addr
, 1, blksz
, write
);
221 mmc_wait_for_req(test
->card
->host
, &mrq
);
228 ret
= mmc_test_wait_busy(test
);
235 static void mmc_test_free_mem(struct mmc_test_mem
*mem
)
240 __free_pages(mem
->arr
[mem
->cnt
].page
,
241 mem
->arr
[mem
->cnt
].order
);
247 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
248 * there isn't much memory do not exceed 1/16th total lowmem pages.
250 static struct mmc_test_mem
*mmc_test_alloc_mem(unsigned long min_sz
,
251 unsigned long max_sz
)
253 unsigned long max_page_cnt
= DIV_ROUND_UP(max_sz
, PAGE_SIZE
);
254 unsigned long min_page_cnt
= DIV_ROUND_UP(min_sz
, PAGE_SIZE
);
255 unsigned long page_cnt
= 0;
256 unsigned long limit
= nr_free_buffer_pages() >> 4;
257 struct mmc_test_mem
*mem
;
259 if (max_page_cnt
> limit
)
260 max_page_cnt
= limit
;
261 if (max_page_cnt
< min_page_cnt
)
262 max_page_cnt
= min_page_cnt
;
264 mem
= kzalloc(sizeof(struct mmc_test_mem
), GFP_KERNEL
);
268 mem
->arr
= kzalloc(sizeof(struct mmc_test_pages
) * max_page_cnt
,
273 while (max_page_cnt
) {
276 gfp_t flags
= GFP_KERNEL
| GFP_DMA
| __GFP_NOWARN
|
279 order
= get_order(max_page_cnt
<< PAGE_SHIFT
);
281 page
= alloc_pages(flags
, order
);
287 if (page_cnt
< min_page_cnt
)
291 mem
->arr
[mem
->cnt
].page
= page
;
292 mem
->arr
[mem
->cnt
].order
= order
;
294 if (max_page_cnt
<= (1UL << order
))
296 max_page_cnt
-= 1UL << order
;
297 page_cnt
+= 1UL << order
;
303 mmc_test_free_mem(mem
);
308 * Map memory into a scatterlist. Optionally allow the same memory to be
309 * mapped more than once.
311 static int mmc_test_map_sg(struct mmc_test_mem
*mem
, unsigned long sz
,
312 struct scatterlist
*sglist
, int repeat
,
313 unsigned int max_segs
, unsigned int *sg_len
)
315 struct scatterlist
*sg
= NULL
;
318 sg_init_table(sglist
, max_segs
);
322 for (i
= 0; i
< mem
->cnt
; i
++) {
323 unsigned long len
= PAGE_SIZE
<< mem
->arr
[i
].order
;
333 sg_set_page(sg
, mem
->arr
[i
].page
, len
, 0);
339 } while (sz
&& repeat
);
351 * Map memory into a scatterlist so that no pages are contiguous. Allow the
352 * same memory to be mapped more than once.
354 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem
*mem
,
356 struct scatterlist
*sglist
,
357 unsigned int max_segs
,
358 unsigned int *sg_len
)
360 struct scatterlist
*sg
= NULL
;
361 unsigned int i
= mem
->cnt
, cnt
;
363 void *base
, *addr
, *last_addr
= NULL
;
365 sg_init_table(sglist
, max_segs
);
369 base
= page_address(mem
->arr
[--i
].page
);
370 cnt
= 1 << mem
->arr
[i
].order
;
372 addr
= base
+ PAGE_SIZE
* --cnt
;
373 if (last_addr
&& last_addr
+ PAGE_SIZE
== addr
)
385 sg_set_page(sg
, virt_to_page(addr
), len
, 0);
398 * Calculate transfer rate in bytes per second.
400 static unsigned int mmc_test_rate(uint64_t bytes
, struct timespec
*ts
)
410 while (ns
> UINT_MAX
) {
418 do_div(bytes
, (uint32_t)ns
);
424 * Print the transfer rate.
426 static void mmc_test_print_rate(struct mmc_test_card
*test
, uint64_t bytes
,
427 struct timespec
*ts1
, struct timespec
*ts2
)
429 unsigned int rate
, sectors
= bytes
>> 9;
432 ts
= timespec_sub(*ts2
, *ts1
);
434 rate
= mmc_test_rate(bytes
, &ts
);
436 printk(KERN_INFO
"%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
437 "seconds (%u kB/s, %u KiB/s)\n",
438 mmc_hostname(test
->card
->host
), sectors
, sectors
>> 1,
439 (sectors
== 1 ? ".5" : ""), (unsigned long)ts
.tv_sec
,
440 (unsigned long)ts
.tv_nsec
, rate
/ 1000, rate
/ 1024);
444 * Print the average transfer rate.
446 static void mmc_test_print_avg_rate(struct mmc_test_card
*test
, uint64_t bytes
,
447 unsigned int count
, struct timespec
*ts1
,
448 struct timespec
*ts2
)
450 unsigned int rate
, sectors
= bytes
>> 9;
451 uint64_t tot
= bytes
* count
;
454 ts
= timespec_sub(*ts2
, *ts1
);
456 rate
= mmc_test_rate(tot
, &ts
);
458 printk(KERN_INFO
"%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
459 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
460 mmc_hostname(test
->card
->host
), count
, sectors
, count
,
461 sectors
>> 1, (sectors
== 1 ? ".5" : ""),
462 (unsigned long)ts
.tv_sec
, (unsigned long)ts
.tv_nsec
,
463 rate
/ 1000, rate
/ 1024);
467 * Return the card size in sectors.
469 static unsigned int mmc_test_capacity(struct mmc_card
*card
)
471 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
))
472 return card
->ext_csd
.sectors
;
474 return card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9);
477 /*******************************************************************/
478 /* Test preparation and cleanup */
479 /*******************************************************************/
482 * Fill the first couple of sectors of the card with known data
483 * so that bad reads/writes can be detected
485 static int __mmc_test_prepare(struct mmc_test_card
*test
, int write
)
489 ret
= mmc_test_set_blksize(test
, 512);
494 memset(test
->buffer
, 0xDF, 512);
496 for (i
= 0;i
< 512;i
++)
500 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
501 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
509 static int mmc_test_prepare_write(struct mmc_test_card
*test
)
511 return __mmc_test_prepare(test
, 1);
514 static int mmc_test_prepare_read(struct mmc_test_card
*test
)
516 return __mmc_test_prepare(test
, 0);
519 static int mmc_test_cleanup(struct mmc_test_card
*test
)
523 ret
= mmc_test_set_blksize(test
, 512);
527 memset(test
->buffer
, 0, 512);
529 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
530 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
538 /*******************************************************************/
539 /* Test execution helpers */
540 /*******************************************************************/
543 * Modifies the mmc_request to perform the "short transfer" tests
545 static void mmc_test_prepare_broken_mrq(struct mmc_test_card
*test
,
546 struct mmc_request
*mrq
, int write
)
548 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
550 if (mrq
->data
->blocks
> 1) {
551 mrq
->cmd
->opcode
= write
?
552 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
555 mrq
->cmd
->opcode
= MMC_SEND_STATUS
;
556 mrq
->cmd
->arg
= test
->card
->rca
<< 16;
561 * Checks that a normal transfer didn't have any errors
563 static int mmc_test_check_result(struct mmc_test_card
*test
,
564 struct mmc_request
*mrq
)
568 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
572 if (!ret
&& mrq
->cmd
->error
)
573 ret
= mrq
->cmd
->error
;
574 if (!ret
&& mrq
->data
->error
)
575 ret
= mrq
->data
->error
;
576 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
577 ret
= mrq
->stop
->error
;
578 if (!ret
&& mrq
->data
->bytes_xfered
!=
579 mrq
->data
->blocks
* mrq
->data
->blksz
)
583 ret
= RESULT_UNSUP_HOST
;
589 * Checks that a "short transfer" behaved as expected
591 static int mmc_test_check_broken_result(struct mmc_test_card
*test
,
592 struct mmc_request
*mrq
)
596 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
600 if (!ret
&& mrq
->cmd
->error
)
601 ret
= mrq
->cmd
->error
;
602 if (!ret
&& mrq
->data
->error
== 0)
604 if (!ret
&& mrq
->data
->error
!= -ETIMEDOUT
)
605 ret
= mrq
->data
->error
;
606 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
607 ret
= mrq
->stop
->error
;
608 if (mrq
->data
->blocks
> 1) {
609 if (!ret
&& mrq
->data
->bytes_xfered
> mrq
->data
->blksz
)
612 if (!ret
&& mrq
->data
->bytes_xfered
> 0)
617 ret
= RESULT_UNSUP_HOST
;
623 * Tests a basic transfer with certain parameters
625 static int mmc_test_simple_transfer(struct mmc_test_card
*test
,
626 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
627 unsigned blocks
, unsigned blksz
, int write
)
629 struct mmc_request mrq
;
630 struct mmc_command cmd
;
631 struct mmc_command stop
;
632 struct mmc_data data
;
634 memset(&mrq
, 0, sizeof(struct mmc_request
));
635 memset(&cmd
, 0, sizeof(struct mmc_command
));
636 memset(&data
, 0, sizeof(struct mmc_data
));
637 memset(&stop
, 0, sizeof(struct mmc_command
));
643 mmc_test_prepare_mrq(test
, &mrq
, sg
, sg_len
, dev_addr
,
644 blocks
, blksz
, write
);
646 mmc_wait_for_req(test
->card
->host
, &mrq
);
648 mmc_test_wait_busy(test
);
650 return mmc_test_check_result(test
, &mrq
);
654 * Tests a transfer where the card will fail completely or partly
656 static int mmc_test_broken_transfer(struct mmc_test_card
*test
,
657 unsigned blocks
, unsigned blksz
, int write
)
659 struct mmc_request mrq
;
660 struct mmc_command cmd
;
661 struct mmc_command stop
;
662 struct mmc_data data
;
664 struct scatterlist sg
;
666 memset(&mrq
, 0, sizeof(struct mmc_request
));
667 memset(&cmd
, 0, sizeof(struct mmc_command
));
668 memset(&data
, 0, sizeof(struct mmc_data
));
669 memset(&stop
, 0, sizeof(struct mmc_command
));
675 sg_init_one(&sg
, test
->buffer
, blocks
* blksz
);
677 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, 0, blocks
, blksz
, write
);
678 mmc_test_prepare_broken_mrq(test
, &mrq
, write
);
680 mmc_wait_for_req(test
->card
->host
, &mrq
);
682 mmc_test_wait_busy(test
);
684 return mmc_test_check_broken_result(test
, &mrq
);
688 * Does a complete transfer test where data is also validated
690 * Note: mmc_test_prepare() must have been done before this call
692 static int mmc_test_transfer(struct mmc_test_card
*test
,
693 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
694 unsigned blocks
, unsigned blksz
, int write
)
700 for (i
= 0;i
< blocks
* blksz
;i
++)
701 test
->scratch
[i
] = i
;
703 memset(test
->scratch
, 0, BUFFER_SIZE
);
705 local_irq_save(flags
);
706 sg_copy_from_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
707 local_irq_restore(flags
);
709 ret
= mmc_test_set_blksize(test
, blksz
);
713 ret
= mmc_test_simple_transfer(test
, sg
, sg_len
, dev_addr
,
714 blocks
, blksz
, write
);
721 ret
= mmc_test_set_blksize(test
, 512);
725 sectors
= (blocks
* blksz
+ 511) / 512;
726 if ((sectors
* 512) == (blocks
* blksz
))
729 if ((sectors
* 512) > BUFFER_SIZE
)
732 memset(test
->buffer
, 0, sectors
* 512);
734 for (i
= 0;i
< sectors
;i
++) {
735 ret
= mmc_test_buffer_transfer(test
,
736 test
->buffer
+ i
* 512,
737 dev_addr
+ i
, 512, 0);
742 for (i
= 0;i
< blocks
* blksz
;i
++) {
743 if (test
->buffer
[i
] != (u8
)i
)
747 for (;i
< sectors
* 512;i
++) {
748 if (test
->buffer
[i
] != 0xDF)
752 local_irq_save(flags
);
753 sg_copy_to_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
754 local_irq_restore(flags
);
755 for (i
= 0;i
< blocks
* blksz
;i
++) {
756 if (test
->scratch
[i
] != (u8
)i
)
764 /*******************************************************************/
766 /*******************************************************************/
768 struct mmc_test_case
{
771 int (*prepare
)(struct mmc_test_card
*);
772 int (*run
)(struct mmc_test_card
*);
773 int (*cleanup
)(struct mmc_test_card
*);
776 static int mmc_test_basic_write(struct mmc_test_card
*test
)
779 struct scatterlist sg
;
781 ret
= mmc_test_set_blksize(test
, 512);
785 sg_init_one(&sg
, test
->buffer
, 512);
787 ret
= mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 1);
794 static int mmc_test_basic_read(struct mmc_test_card
*test
)
797 struct scatterlist sg
;
799 ret
= mmc_test_set_blksize(test
, 512);
803 sg_init_one(&sg
, test
->buffer
, 512);
805 ret
= mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 0);
812 static int mmc_test_verify_write(struct mmc_test_card
*test
)
815 struct scatterlist sg
;
817 sg_init_one(&sg
, test
->buffer
, 512);
819 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
826 static int mmc_test_verify_read(struct mmc_test_card
*test
)
829 struct scatterlist sg
;
831 sg_init_one(&sg
, test
->buffer
, 512);
833 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
840 static int mmc_test_multi_write(struct mmc_test_card
*test
)
844 struct scatterlist sg
;
846 if (test
->card
->host
->max_blk_count
== 1)
847 return RESULT_UNSUP_HOST
;
849 size
= PAGE_SIZE
* 2;
850 size
= min(size
, test
->card
->host
->max_req_size
);
851 size
= min(size
, test
->card
->host
->max_seg_size
);
852 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
855 return RESULT_UNSUP_HOST
;
857 sg_init_one(&sg
, test
->buffer
, size
);
859 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
866 static int mmc_test_multi_read(struct mmc_test_card
*test
)
870 struct scatterlist sg
;
872 if (test
->card
->host
->max_blk_count
== 1)
873 return RESULT_UNSUP_HOST
;
875 size
= PAGE_SIZE
* 2;
876 size
= min(size
, test
->card
->host
->max_req_size
);
877 size
= min(size
, test
->card
->host
->max_seg_size
);
878 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
881 return RESULT_UNSUP_HOST
;
883 sg_init_one(&sg
, test
->buffer
, size
);
885 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
892 static int mmc_test_pow2_write(struct mmc_test_card
*test
)
895 struct scatterlist sg
;
897 if (!test
->card
->csd
.write_partial
)
898 return RESULT_UNSUP_CARD
;
900 for (i
= 1; i
< 512;i
<<= 1) {
901 sg_init_one(&sg
, test
->buffer
, i
);
902 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
910 static int mmc_test_pow2_read(struct mmc_test_card
*test
)
913 struct scatterlist sg
;
915 if (!test
->card
->csd
.read_partial
)
916 return RESULT_UNSUP_CARD
;
918 for (i
= 1; i
< 512;i
<<= 1) {
919 sg_init_one(&sg
, test
->buffer
, i
);
920 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
928 static int mmc_test_weird_write(struct mmc_test_card
*test
)
931 struct scatterlist sg
;
933 if (!test
->card
->csd
.write_partial
)
934 return RESULT_UNSUP_CARD
;
936 for (i
= 3; i
< 512;i
+= 7) {
937 sg_init_one(&sg
, test
->buffer
, i
);
938 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
946 static int mmc_test_weird_read(struct mmc_test_card
*test
)
949 struct scatterlist sg
;
951 if (!test
->card
->csd
.read_partial
)
952 return RESULT_UNSUP_CARD
;
954 for (i
= 3; i
< 512;i
+= 7) {
955 sg_init_one(&sg
, test
->buffer
, i
);
956 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
964 static int mmc_test_align_write(struct mmc_test_card
*test
)
967 struct scatterlist sg
;
969 for (i
= 1;i
< 4;i
++) {
970 sg_init_one(&sg
, test
->buffer
+ i
, 512);
971 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
979 static int mmc_test_align_read(struct mmc_test_card
*test
)
982 struct scatterlist sg
;
984 for (i
= 1;i
< 4;i
++) {
985 sg_init_one(&sg
, test
->buffer
+ i
, 512);
986 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
994 static int mmc_test_align_multi_write(struct mmc_test_card
*test
)
998 struct scatterlist sg
;
1000 if (test
->card
->host
->max_blk_count
== 1)
1001 return RESULT_UNSUP_HOST
;
1003 size
= PAGE_SIZE
* 2;
1004 size
= min(size
, test
->card
->host
->max_req_size
);
1005 size
= min(size
, test
->card
->host
->max_seg_size
);
1006 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1009 return RESULT_UNSUP_HOST
;
1011 for (i
= 1;i
< 4;i
++) {
1012 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1013 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1021 static int mmc_test_align_multi_read(struct mmc_test_card
*test
)
1025 struct scatterlist sg
;
1027 if (test
->card
->host
->max_blk_count
== 1)
1028 return RESULT_UNSUP_HOST
;
1030 size
= PAGE_SIZE
* 2;
1031 size
= min(size
, test
->card
->host
->max_req_size
);
1032 size
= min(size
, test
->card
->host
->max_seg_size
);
1033 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1036 return RESULT_UNSUP_HOST
;
1038 for (i
= 1;i
< 4;i
++) {
1039 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1040 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1048 static int mmc_test_xfersize_write(struct mmc_test_card
*test
)
1052 ret
= mmc_test_set_blksize(test
, 512);
1056 ret
= mmc_test_broken_transfer(test
, 1, 512, 1);
1063 static int mmc_test_xfersize_read(struct mmc_test_card
*test
)
1067 ret
= mmc_test_set_blksize(test
, 512);
1071 ret
= mmc_test_broken_transfer(test
, 1, 512, 0);
1078 static int mmc_test_multi_xfersize_write(struct mmc_test_card
*test
)
1082 if (test
->card
->host
->max_blk_count
== 1)
1083 return RESULT_UNSUP_HOST
;
1085 ret
= mmc_test_set_blksize(test
, 512);
1089 ret
= mmc_test_broken_transfer(test
, 2, 512, 1);
1096 static int mmc_test_multi_xfersize_read(struct mmc_test_card
*test
)
1100 if (test
->card
->host
->max_blk_count
== 1)
1101 return RESULT_UNSUP_HOST
;
1103 ret
= mmc_test_set_blksize(test
, 512);
1107 ret
= mmc_test_broken_transfer(test
, 2, 512, 0);
1114 #ifdef CONFIG_HIGHMEM
1116 static int mmc_test_write_high(struct mmc_test_card
*test
)
1119 struct scatterlist sg
;
1121 sg_init_table(&sg
, 1);
1122 sg_set_page(&sg
, test
->highmem
, 512, 0);
1124 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1131 static int mmc_test_read_high(struct mmc_test_card
*test
)
1134 struct scatterlist sg
;
1136 sg_init_table(&sg
, 1);
1137 sg_set_page(&sg
, test
->highmem
, 512, 0);
1139 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1146 static int mmc_test_multi_write_high(struct mmc_test_card
*test
)
1150 struct scatterlist sg
;
1152 if (test
->card
->host
->max_blk_count
== 1)
1153 return RESULT_UNSUP_HOST
;
1155 size
= PAGE_SIZE
* 2;
1156 size
= min(size
, test
->card
->host
->max_req_size
);
1157 size
= min(size
, test
->card
->host
->max_seg_size
);
1158 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1161 return RESULT_UNSUP_HOST
;
1163 sg_init_table(&sg
, 1);
1164 sg_set_page(&sg
, test
->highmem
, size
, 0);
1166 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1173 static int mmc_test_multi_read_high(struct mmc_test_card
*test
)
1177 struct scatterlist sg
;
1179 if (test
->card
->host
->max_blk_count
== 1)
1180 return RESULT_UNSUP_HOST
;
1182 size
= PAGE_SIZE
* 2;
1183 size
= min(size
, test
->card
->host
->max_req_size
);
1184 size
= min(size
, test
->card
->host
->max_seg_size
);
1185 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1188 return RESULT_UNSUP_HOST
;
1190 sg_init_table(&sg
, 1);
1191 sg_set_page(&sg
, test
->highmem
, size
, 0);
1193 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1202 static int mmc_test_no_highmem(struct mmc_test_card
*test
)
1204 printk(KERN_INFO
"%s: Highmem not configured - test skipped\n",
1205 mmc_hostname(test
->card
->host
));
1209 #endif /* CONFIG_HIGHMEM */
1212 * Map sz bytes so that it can be transferred.
1214 static int mmc_test_area_map(struct mmc_test_card
*test
, unsigned long sz
,
1217 struct mmc_test_area
*t
= &test
->area
;
1219 t
->blocks
= sz
>> 9;
1222 return mmc_test_map_sg_max_scatter(t
->mem
, sz
, t
->sg
,
1223 t
->max_segs
, &t
->sg_len
);
1225 return mmc_test_map_sg(t
->mem
, sz
, t
->sg
, 1, t
->max_segs
,
1231 * Transfer bytes mapped by mmc_test_area_map().
1233 static int mmc_test_area_transfer(struct mmc_test_card
*test
,
1234 unsigned int dev_addr
, int write
)
1236 struct mmc_test_area
*t
= &test
->area
;
1238 return mmc_test_simple_transfer(test
, t
->sg
, t
->sg_len
, dev_addr
,
1239 t
->blocks
, 512, write
);
1243 * Map and transfer bytes.
1245 static int mmc_test_area_io(struct mmc_test_card
*test
, unsigned long sz
,
1246 unsigned int dev_addr
, int write
, int max_scatter
,
1249 struct timespec ts1
, ts2
;
1252 ret
= mmc_test_area_map(test
, sz
, max_scatter
);
1257 getnstimeofday(&ts1
);
1259 ret
= mmc_test_area_transfer(test
, dev_addr
, write
);
1264 getnstimeofday(&ts2
);
1267 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1273 * Write the test area entirely.
1275 static int mmc_test_area_fill(struct mmc_test_card
*test
)
1277 return mmc_test_area_io(test
, test
->area
.max_sz
, test
->area
.dev_addr
,
1282 * Erase the test area entirely.
1284 static int mmc_test_area_erase(struct mmc_test_card
*test
)
1286 struct mmc_test_area
*t
= &test
->area
;
1288 if (!mmc_can_erase(test
->card
))
1291 return mmc_erase(test
->card
, t
->dev_addr
, test
->area
.max_sz
>> 9,
1296 * Cleanup struct mmc_test_area.
1298 static int mmc_test_area_cleanup(struct mmc_test_card
*test
)
1300 struct mmc_test_area
*t
= &test
->area
;
1303 mmc_test_free_mem(t
->mem
);
1309 * Initialize an area for testing large transfers. The size of the area is the
1310 * preferred erase size which is a good size for optimal transfer speed. Note
1311 * that is typically 4MiB for modern cards. The test area is set to the middle
1312 * of the card because cards may have different charateristics at the front
1313 * (for FAT file system optimization). Optionally, the area is erased (if the
1314 * card supports it) which may improve write performance. Optionally, the area
1315 * is filled with data for subsequent read tests.
1317 static int mmc_test_area_init(struct mmc_test_card
*test
, int erase
, int fill
)
1319 struct mmc_test_area
*t
= &test
->area
;
1320 unsigned long min_sz
= 64 * 1024;
1323 ret
= mmc_test_set_blksize(test
, 512);
1327 if (test
->card
->pref_erase
> TEST_AREA_MAX_SIZE
>> 9)
1328 t
->max_sz
= TEST_AREA_MAX_SIZE
;
1330 t
->max_sz
= (unsigned long)test
->card
->pref_erase
<< 9;
1332 * Try to allocate enough memory for the whole area. Less is OK
1333 * because the same memory can be mapped into the scatterlist more than
1336 t
->mem
= mmc_test_alloc_mem(min_sz
, t
->max_sz
);
1340 t
->max_segs
= DIV_ROUND_UP(t
->max_sz
, PAGE_SIZE
);
1341 t
->sg
= kmalloc(sizeof(struct scatterlist
) * t
->max_segs
, GFP_KERNEL
);
1347 t
->dev_addr
= mmc_test_capacity(test
->card
) / 2;
1348 t
->dev_addr
-= t
->dev_addr
% (t
->max_sz
>> 9);
1351 ret
= mmc_test_area_erase(test
);
1357 ret
= mmc_test_area_fill(test
);
1365 mmc_test_area_cleanup(test
);
1370 * Prepare for large transfers. Do not erase the test area.
1372 static int mmc_test_area_prepare(struct mmc_test_card
*test
)
1374 return mmc_test_area_init(test
, 0, 0);
1378 * Prepare for large transfers. Do erase the test area.
1380 static int mmc_test_area_prepare_erase(struct mmc_test_card
*test
)
1382 return mmc_test_area_init(test
, 1, 0);
1386 * Prepare for large transfers. Erase and fill the test area.
1388 static int mmc_test_area_prepare_fill(struct mmc_test_card
*test
)
1390 return mmc_test_area_init(test
, 1, 1);
1394 * Test best-case performance. Best-case performance is expected from
1395 * a single large transfer.
1397 * An additional option (max_scatter) allows the measurement of the same
1398 * transfer but with no contiguous pages in the scatter list. This tests
1399 * the efficiency of DMA to handle scattered pages.
1401 static int mmc_test_best_performance(struct mmc_test_card
*test
, int write
,
1404 return mmc_test_area_io(test
, test
->area
.max_sz
, test
->area
.dev_addr
,
1405 write
, max_scatter
, 1);
1409 * Best-case read performance.
1411 static int mmc_test_best_read_performance(struct mmc_test_card
*test
)
1413 return mmc_test_best_performance(test
, 0, 0);
1417 * Best-case write performance.
1419 static int mmc_test_best_write_performance(struct mmc_test_card
*test
)
1421 return mmc_test_best_performance(test
, 1, 0);
1425 * Best-case read performance into scattered pages.
1427 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card
*test
)
1429 return mmc_test_best_performance(test
, 0, 1);
1433 * Best-case write performance from scattered pages.
1435 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card
*test
)
1437 return mmc_test_best_performance(test
, 1, 1);
1441 * Single read performance by transfer size.
1443 static int mmc_test_profile_read_perf(struct mmc_test_card
*test
)
1446 unsigned int dev_addr
;
1449 for (sz
= 512; sz
< test
->area
.max_sz
; sz
<<= 1) {
1450 dev_addr
= test
->area
.dev_addr
+ (sz
>> 9);
1451 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1455 dev_addr
= test
->area
.dev_addr
;
1456 return mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1460 * Single write performance by transfer size.
1462 static int mmc_test_profile_write_perf(struct mmc_test_card
*test
)
1465 unsigned int dev_addr
;
1468 ret
= mmc_test_area_erase(test
);
1471 for (sz
= 512; sz
< test
->area
.max_sz
; sz
<<= 1) {
1472 dev_addr
= test
->area
.dev_addr
+ (sz
>> 9);
1473 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1477 ret
= mmc_test_area_erase(test
);
1480 dev_addr
= test
->area
.dev_addr
;
1481 return mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1485 * Single trim performance by transfer size.
1487 static int mmc_test_profile_trim_perf(struct mmc_test_card
*test
)
1490 unsigned int dev_addr
;
1491 struct timespec ts1
, ts2
;
1494 if (!mmc_can_trim(test
->card
))
1495 return RESULT_UNSUP_CARD
;
1497 if (!mmc_can_erase(test
->card
))
1498 return RESULT_UNSUP_HOST
;
1500 for (sz
= 512; sz
< test
->area
.max_sz
; sz
<<= 1) {
1501 dev_addr
= test
->area
.dev_addr
+ (sz
>> 9);
1502 getnstimeofday(&ts1
);
1503 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1506 getnstimeofday(&ts2
);
1507 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1509 dev_addr
= test
->area
.dev_addr
;
1510 getnstimeofday(&ts1
);
1511 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1514 getnstimeofday(&ts2
);
1515 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1520 * Consecutive read performance by transfer size.
1522 static int mmc_test_profile_seq_read_perf(struct mmc_test_card
*test
)
1525 unsigned int dev_addr
, i
, cnt
;
1526 struct timespec ts1
, ts2
;
1529 for (sz
= 512; sz
<= test
->area
.max_sz
; sz
<<= 1) {
1530 cnt
= test
->area
.max_sz
/ sz
;
1531 dev_addr
= test
->area
.dev_addr
;
1532 getnstimeofday(&ts1
);
1533 for (i
= 0; i
< cnt
; i
++) {
1534 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 0);
1537 dev_addr
+= (sz
>> 9);
1539 getnstimeofday(&ts2
);
1540 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1546 * Consecutive write performance by transfer size.
1548 static int mmc_test_profile_seq_write_perf(struct mmc_test_card
*test
)
1551 unsigned int dev_addr
, i
, cnt
;
1552 struct timespec ts1
, ts2
;
1555 for (sz
= 512; sz
<= test
->area
.max_sz
; sz
<<= 1) {
1556 ret
= mmc_test_area_erase(test
);
1559 cnt
= test
->area
.max_sz
/ sz
;
1560 dev_addr
= test
->area
.dev_addr
;
1561 getnstimeofday(&ts1
);
1562 for (i
= 0; i
< cnt
; i
++) {
1563 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 0);
1566 dev_addr
+= (sz
>> 9);
1568 getnstimeofday(&ts2
);
1569 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1575 * Consecutive trim performance by transfer size.
1577 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card
*test
)
1580 unsigned int dev_addr
, i
, cnt
;
1581 struct timespec ts1
, ts2
;
1584 if (!mmc_can_trim(test
->card
))
1585 return RESULT_UNSUP_CARD
;
1587 if (!mmc_can_erase(test
->card
))
1588 return RESULT_UNSUP_HOST
;
1590 for (sz
= 512; sz
<= test
->area
.max_sz
; sz
<<= 1) {
1591 ret
= mmc_test_area_erase(test
);
1594 ret
= mmc_test_area_fill(test
);
1597 cnt
= test
->area
.max_sz
/ sz
;
1598 dev_addr
= test
->area
.dev_addr
;
1599 getnstimeofday(&ts1
);
1600 for (i
= 0; i
< cnt
; i
++) {
1601 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9,
1605 dev_addr
+= (sz
>> 9);
1607 getnstimeofday(&ts2
);
1608 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1613 static const struct mmc_test_case mmc_test_cases
[] = {
1615 .name
= "Basic write (no data verification)",
1616 .run
= mmc_test_basic_write
,
1620 .name
= "Basic read (no data verification)",
1621 .run
= mmc_test_basic_read
,
1625 .name
= "Basic write (with data verification)",
1626 .prepare
= mmc_test_prepare_write
,
1627 .run
= mmc_test_verify_write
,
1628 .cleanup
= mmc_test_cleanup
,
1632 .name
= "Basic read (with data verification)",
1633 .prepare
= mmc_test_prepare_read
,
1634 .run
= mmc_test_verify_read
,
1635 .cleanup
= mmc_test_cleanup
,
1639 .name
= "Multi-block write",
1640 .prepare
= mmc_test_prepare_write
,
1641 .run
= mmc_test_multi_write
,
1642 .cleanup
= mmc_test_cleanup
,
1646 .name
= "Multi-block read",
1647 .prepare
= mmc_test_prepare_read
,
1648 .run
= mmc_test_multi_read
,
1649 .cleanup
= mmc_test_cleanup
,
1653 .name
= "Power of two block writes",
1654 .prepare
= mmc_test_prepare_write
,
1655 .run
= mmc_test_pow2_write
,
1656 .cleanup
= mmc_test_cleanup
,
1660 .name
= "Power of two block reads",
1661 .prepare
= mmc_test_prepare_read
,
1662 .run
= mmc_test_pow2_read
,
1663 .cleanup
= mmc_test_cleanup
,
1667 .name
= "Weird sized block writes",
1668 .prepare
= mmc_test_prepare_write
,
1669 .run
= mmc_test_weird_write
,
1670 .cleanup
= mmc_test_cleanup
,
1674 .name
= "Weird sized block reads",
1675 .prepare
= mmc_test_prepare_read
,
1676 .run
= mmc_test_weird_read
,
1677 .cleanup
= mmc_test_cleanup
,
1681 .name
= "Badly aligned write",
1682 .prepare
= mmc_test_prepare_write
,
1683 .run
= mmc_test_align_write
,
1684 .cleanup
= mmc_test_cleanup
,
1688 .name
= "Badly aligned read",
1689 .prepare
= mmc_test_prepare_read
,
1690 .run
= mmc_test_align_read
,
1691 .cleanup
= mmc_test_cleanup
,
1695 .name
= "Badly aligned multi-block write",
1696 .prepare
= mmc_test_prepare_write
,
1697 .run
= mmc_test_align_multi_write
,
1698 .cleanup
= mmc_test_cleanup
,
1702 .name
= "Badly aligned multi-block read",
1703 .prepare
= mmc_test_prepare_read
,
1704 .run
= mmc_test_align_multi_read
,
1705 .cleanup
= mmc_test_cleanup
,
1709 .name
= "Correct xfer_size at write (start failure)",
1710 .run
= mmc_test_xfersize_write
,
1714 .name
= "Correct xfer_size at read (start failure)",
1715 .run
= mmc_test_xfersize_read
,
1719 .name
= "Correct xfer_size at write (midway failure)",
1720 .run
= mmc_test_multi_xfersize_write
,
1724 .name
= "Correct xfer_size at read (midway failure)",
1725 .run
= mmc_test_multi_xfersize_read
,
1728 #ifdef CONFIG_HIGHMEM
1731 .name
= "Highmem write",
1732 .prepare
= mmc_test_prepare_write
,
1733 .run
= mmc_test_write_high
,
1734 .cleanup
= mmc_test_cleanup
,
1738 .name
= "Highmem read",
1739 .prepare
= mmc_test_prepare_read
,
1740 .run
= mmc_test_read_high
,
1741 .cleanup
= mmc_test_cleanup
,
1745 .name
= "Multi-block highmem write",
1746 .prepare
= mmc_test_prepare_write
,
1747 .run
= mmc_test_multi_write_high
,
1748 .cleanup
= mmc_test_cleanup
,
1752 .name
= "Multi-block highmem read",
1753 .prepare
= mmc_test_prepare_read
,
1754 .run
= mmc_test_multi_read_high
,
1755 .cleanup
= mmc_test_cleanup
,
1761 .name
= "Highmem write",
1762 .run
= mmc_test_no_highmem
,
1766 .name
= "Highmem read",
1767 .run
= mmc_test_no_highmem
,
1771 .name
= "Multi-block highmem write",
1772 .run
= mmc_test_no_highmem
,
1776 .name
= "Multi-block highmem read",
1777 .run
= mmc_test_no_highmem
,
1780 #endif /* CONFIG_HIGHMEM */
1783 .name
= "Best-case read performance",
1784 .prepare
= mmc_test_area_prepare_fill
,
1785 .run
= mmc_test_best_read_performance
,
1786 .cleanup
= mmc_test_area_cleanup
,
1790 .name
= "Best-case write performance",
1791 .prepare
= mmc_test_area_prepare_erase
,
1792 .run
= mmc_test_best_write_performance
,
1793 .cleanup
= mmc_test_area_cleanup
,
1797 .name
= "Best-case read performance into scattered pages",
1798 .prepare
= mmc_test_area_prepare_fill
,
1799 .run
= mmc_test_best_read_perf_max_scatter
,
1800 .cleanup
= mmc_test_area_cleanup
,
1804 .name
= "Best-case write performance from scattered pages",
1805 .prepare
= mmc_test_area_prepare_erase
,
1806 .run
= mmc_test_best_write_perf_max_scatter
,
1807 .cleanup
= mmc_test_area_cleanup
,
1811 .name
= "Single read performance by transfer size",
1812 .prepare
= mmc_test_area_prepare_fill
,
1813 .run
= mmc_test_profile_read_perf
,
1814 .cleanup
= mmc_test_area_cleanup
,
1818 .name
= "Single write performance by transfer size",
1819 .prepare
= mmc_test_area_prepare
,
1820 .run
= mmc_test_profile_write_perf
,
1821 .cleanup
= mmc_test_area_cleanup
,
1825 .name
= "Single trim performance by transfer size",
1826 .prepare
= mmc_test_area_prepare_fill
,
1827 .run
= mmc_test_profile_trim_perf
,
1828 .cleanup
= mmc_test_area_cleanup
,
1832 .name
= "Consecutive read performance by transfer size",
1833 .prepare
= mmc_test_area_prepare_fill
,
1834 .run
= mmc_test_profile_seq_read_perf
,
1835 .cleanup
= mmc_test_area_cleanup
,
1839 .name
= "Consecutive write performance by transfer size",
1840 .prepare
= mmc_test_area_prepare
,
1841 .run
= mmc_test_profile_seq_write_perf
,
1842 .cleanup
= mmc_test_area_cleanup
,
1846 .name
= "Consecutive trim performance by transfer size",
1847 .prepare
= mmc_test_area_prepare
,
1848 .run
= mmc_test_profile_seq_trim_perf
,
1849 .cleanup
= mmc_test_area_cleanup
,
1854 static DEFINE_MUTEX(mmc_test_lock
);
1856 static void mmc_test_run(struct mmc_test_card
*test
, int testcase
)
1860 printk(KERN_INFO
"%s: Starting tests of card %s...\n",
1861 mmc_hostname(test
->card
->host
), mmc_card_id(test
->card
));
1863 mmc_claim_host(test
->card
->host
);
1865 for (i
= 0;i
< ARRAY_SIZE(mmc_test_cases
);i
++) {
1866 if (testcase
&& ((i
+ 1) != testcase
))
1869 printk(KERN_INFO
"%s: Test case %d. %s...\n",
1870 mmc_hostname(test
->card
->host
), i
+ 1,
1871 mmc_test_cases
[i
].name
);
1873 if (mmc_test_cases
[i
].prepare
) {
1874 ret
= mmc_test_cases
[i
].prepare(test
);
1876 printk(KERN_INFO
"%s: Result: Prepare "
1877 "stage failed! (%d)\n",
1878 mmc_hostname(test
->card
->host
),
1884 ret
= mmc_test_cases
[i
].run(test
);
1887 printk(KERN_INFO
"%s: Result: OK\n",
1888 mmc_hostname(test
->card
->host
));
1891 printk(KERN_INFO
"%s: Result: FAILED\n",
1892 mmc_hostname(test
->card
->host
));
1894 case RESULT_UNSUP_HOST
:
1895 printk(KERN_INFO
"%s: Result: UNSUPPORTED "
1897 mmc_hostname(test
->card
->host
));
1899 case RESULT_UNSUP_CARD
:
1900 printk(KERN_INFO
"%s: Result: UNSUPPORTED "
1902 mmc_hostname(test
->card
->host
));
1905 printk(KERN_INFO
"%s: Result: ERROR (%d)\n",
1906 mmc_hostname(test
->card
->host
), ret
);
1909 if (mmc_test_cases
[i
].cleanup
) {
1910 ret
= mmc_test_cases
[i
].cleanup(test
);
1912 printk(KERN_INFO
"%s: Warning: Cleanup "
1913 "stage failed! (%d)\n",
1914 mmc_hostname(test
->card
->host
),
1920 mmc_release_host(test
->card
->host
);
1922 printk(KERN_INFO
"%s: Tests completed.\n",
1923 mmc_hostname(test
->card
->host
));
1926 static ssize_t
mmc_test_show(struct device
*dev
,
1927 struct device_attribute
*attr
, char *buf
)
1929 mutex_lock(&mmc_test_lock
);
1930 mutex_unlock(&mmc_test_lock
);
1935 static ssize_t
mmc_test_store(struct device
*dev
,
1936 struct device_attribute
*attr
, const char *buf
, size_t count
)
1938 struct mmc_card
*card
;
1939 struct mmc_test_card
*test
;
1942 card
= container_of(dev
, struct mmc_card
, dev
);
1944 testcase
= simple_strtol(buf
, NULL
, 10);
1946 test
= kzalloc(sizeof(struct mmc_test_card
), GFP_KERNEL
);
1952 test
->buffer
= kzalloc(BUFFER_SIZE
, GFP_KERNEL
);
1953 #ifdef CONFIG_HIGHMEM
1954 test
->highmem
= alloc_pages(GFP_KERNEL
| __GFP_HIGHMEM
, BUFFER_ORDER
);
1957 #ifdef CONFIG_HIGHMEM
1958 if (test
->buffer
&& test
->highmem
) {
1962 mutex_lock(&mmc_test_lock
);
1963 mmc_test_run(test
, testcase
);
1964 mutex_unlock(&mmc_test_lock
);
1967 #ifdef CONFIG_HIGHMEM
1968 __free_pages(test
->highmem
, BUFFER_ORDER
);
1970 kfree(test
->buffer
);
1976 static DEVICE_ATTR(test
, S_IWUSR
| S_IRUGO
, mmc_test_show
, mmc_test_store
);
1978 static int mmc_test_probe(struct mmc_card
*card
)
1982 if ((card
->type
!= MMC_TYPE_MMC
) && (card
->type
!= MMC_TYPE_SD
))
1985 ret
= device_create_file(&card
->dev
, &dev_attr_test
);
1989 dev_info(&card
->dev
, "Card claimed for testing.\n");
1994 static void mmc_test_remove(struct mmc_card
*card
)
1996 device_remove_file(&card
->dev
, &dev_attr_test
);
1999 static struct mmc_driver mmc_driver
= {
2003 .probe
= mmc_test_probe
,
2004 .remove
= mmc_test_remove
,
2007 static int __init
mmc_test_init(void)
2009 return mmc_register_driver(&mmc_driver
);
2012 static void __exit
mmc_test_exit(void)
2014 mmc_unregister_driver(&mmc_driver
);
2017 module_init(mmc_test_init
);
2018 module_exit(mmc_test_exit
);
2020 MODULE_LICENSE("GPL");
2021 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2022 MODULE_AUTHOR("Pierre Ossman");