mmc: mmc_test: Only warn about not waiting for busy if it's supported
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / mmc / card / mmc_test.c
blob0131c6c0d8f6f0e1851cef08a7266e1888444cff
1 /*
2 * linux/drivers/mmc/card/mmc_test.c
4 * Copyright 2007-2008 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
26 #define RESULT_OK 0
27 #define RESULT_FAIL 1
28 #define RESULT_UNSUP_HOST 2
29 #define RESULT_UNSUP_CARD 3
31 #define BUFFER_ORDER 2
32 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
35 * Limit the test area size to the maximum MMC HC erase group size. Note that
36 * the maximum SD allocation unit size is just 4MiB.
38 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
40 /**
41 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
42 * @page: first page in the allocation
43 * @order: order of the number of pages allocated
45 struct mmc_test_pages {
46 struct page *page;
47 unsigned int order;
50 /**
51 * struct mmc_test_mem - allocated memory.
52 * @arr: array of allocations
53 * @cnt: number of allocations
55 struct mmc_test_mem {
56 struct mmc_test_pages *arr;
57 unsigned int cnt;
60 /**
61 * struct mmc_test_area - information for performance tests.
62 * @max_sz: test area size (in bytes)
63 * @dev_addr: address on card at which to do performance tests
64 * @max_tfr: maximum transfer size allowed by driver (in bytes)
65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
66 * @max_seg_sz: maximum segment size allowed by driver
67 * @blocks: number of (512 byte) blocks currently mapped by @sg
68 * @sg_len: length of currently mapped scatterlist @sg
69 * @mem: allocated memory
70 * @sg: scatterlist
72 struct mmc_test_area {
73 unsigned long max_sz;
74 unsigned int dev_addr;
75 unsigned int max_tfr;
76 unsigned int max_segs;
77 unsigned int max_seg_sz;
78 unsigned int blocks;
79 unsigned int sg_len;
80 struct mmc_test_mem *mem;
81 struct scatterlist *sg;
84 /**
85 * struct mmc_test_transfer_result - transfer results for performance tests.
86 * @link: double-linked list
87 * @count: amount of group of sectors to check
88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer
90 * @rate: calculated transfer rate
92 struct mmc_test_transfer_result {
93 struct list_head link;
94 unsigned int count;
95 unsigned int sectors;
96 struct timespec ts;
97 unsigned int rate;
101 * struct mmc_test_general_result - results for tests.
102 * @link: double-linked list
103 * @card: card under test
104 * @testcase: number of test case
105 * @result: result of test run
106 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
108 struct mmc_test_general_result {
109 struct list_head link;
110 struct mmc_card *card;
111 int testcase;
112 int result;
113 struct list_head tr_lst;
117 * struct mmc_test_dbgfs_file - debugfs related file.
118 * @link: double-linked list
119 * @card: card under test
120 * @file: file created under debugfs
122 struct mmc_test_dbgfs_file {
123 struct list_head link;
124 struct mmc_card *card;
125 struct dentry *file;
129 * struct mmc_test_card - test information.
130 * @card: card under test
131 * @scratch: transfer buffer
132 * @buffer: transfer buffer
133 * @highmem: buffer for highmem tests
134 * @area: information for performance tests
135 * @gr: pointer to results of current testcase
137 struct mmc_test_card {
138 struct mmc_card *card;
140 u8 scratch[BUFFER_SIZE];
141 u8 *buffer;
142 #ifdef CONFIG_HIGHMEM
143 struct page *highmem;
144 #endif
145 struct mmc_test_area area;
146 struct mmc_test_general_result *gr;
149 /*******************************************************************/
150 /* General helper functions */
151 /*******************************************************************/
154 * Configure correct block size in card
156 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
158 return mmc_set_blocklen(test->card, size);
162 * Fill in the mmc_request structure given a set of transfer parameters.
164 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
165 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
166 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
168 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
170 if (blocks > 1) {
171 mrq->cmd->opcode = write ?
172 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
173 } else {
174 mrq->cmd->opcode = write ?
175 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
178 mrq->cmd->arg = dev_addr;
179 if (!mmc_card_blockaddr(test->card))
180 mrq->cmd->arg <<= 9;
182 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
184 if (blocks == 1)
185 mrq->stop = NULL;
186 else {
187 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
188 mrq->stop->arg = 0;
189 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
192 mrq->data->blksz = blksz;
193 mrq->data->blocks = blocks;
194 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
195 mrq->data->sg = sg;
196 mrq->data->sg_len = sg_len;
198 mmc_set_data_timeout(mrq->data, test->card);
201 static int mmc_test_busy(struct mmc_command *cmd)
203 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
204 (R1_CURRENT_STATE(cmd->resp[0]) == 7);
208 * Wait for the card to finish the busy state
210 static int mmc_test_wait_busy(struct mmc_test_card *test)
212 int ret, busy;
213 struct mmc_command cmd;
215 busy = 0;
216 do {
217 memset(&cmd, 0, sizeof(struct mmc_command));
219 cmd.opcode = MMC_SEND_STATUS;
220 cmd.arg = test->card->rca << 16;
221 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
223 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
224 if (ret)
225 break;
227 if (!busy && mmc_test_busy(&cmd)) {
228 busy = 1;
229 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
230 printk(KERN_INFO "%s: Warning: Host did not "
231 "wait for busy state to end.\n",
232 mmc_hostname(test->card->host));
234 } while (mmc_test_busy(&cmd));
236 return ret;
240 * Transfer a single sector of kernel addressable data
242 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
243 u8 *buffer, unsigned addr, unsigned blksz, int write)
245 int ret;
247 struct mmc_request mrq;
248 struct mmc_command cmd;
249 struct mmc_command stop;
250 struct mmc_data data;
252 struct scatterlist sg;
254 memset(&mrq, 0, sizeof(struct mmc_request));
255 memset(&cmd, 0, sizeof(struct mmc_command));
256 memset(&data, 0, sizeof(struct mmc_data));
257 memset(&stop, 0, sizeof(struct mmc_command));
259 mrq.cmd = &cmd;
260 mrq.data = &data;
261 mrq.stop = &stop;
263 sg_init_one(&sg, buffer, blksz);
265 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
267 mmc_wait_for_req(test->card->host, &mrq);
269 if (cmd.error)
270 return cmd.error;
271 if (data.error)
272 return data.error;
274 ret = mmc_test_wait_busy(test);
275 if (ret)
276 return ret;
278 return 0;
281 static void mmc_test_free_mem(struct mmc_test_mem *mem)
283 if (!mem)
284 return;
285 while (mem->cnt--)
286 __free_pages(mem->arr[mem->cnt].page,
287 mem->arr[mem->cnt].order);
288 kfree(mem->arr);
289 kfree(mem);
293 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
294 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
295 * not exceed a maximum number of segments and try not to make segments much
296 * bigger than maximum segment size.
298 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
299 unsigned long max_sz,
300 unsigned int max_segs,
301 unsigned int max_seg_sz)
303 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
304 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
305 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
306 unsigned long page_cnt = 0;
307 unsigned long limit = nr_free_buffer_pages() >> 4;
308 struct mmc_test_mem *mem;
310 if (max_page_cnt > limit)
311 max_page_cnt = limit;
312 if (min_page_cnt > max_page_cnt)
313 min_page_cnt = max_page_cnt;
315 if (max_seg_page_cnt > max_page_cnt)
316 max_seg_page_cnt = max_page_cnt;
318 if (max_segs > max_page_cnt)
319 max_segs = max_page_cnt;
321 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
322 if (!mem)
323 return NULL;
325 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
326 GFP_KERNEL);
327 if (!mem->arr)
328 goto out_free;
330 while (max_page_cnt) {
331 struct page *page;
332 unsigned int order;
333 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
334 __GFP_NORETRY;
336 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
337 while (1) {
338 page = alloc_pages(flags, order);
339 if (page || !order)
340 break;
341 order -= 1;
343 if (!page) {
344 if (page_cnt < min_page_cnt)
345 goto out_free;
346 break;
348 mem->arr[mem->cnt].page = page;
349 mem->arr[mem->cnt].order = order;
350 mem->cnt += 1;
351 if (max_page_cnt <= (1UL << order))
352 break;
353 max_page_cnt -= 1UL << order;
354 page_cnt += 1UL << order;
355 if (mem->cnt >= max_segs) {
356 if (page_cnt < min_page_cnt)
357 goto out_free;
358 break;
362 return mem;
364 out_free:
365 mmc_test_free_mem(mem);
366 return NULL;
370 * Map memory into a scatterlist. Optionally allow the same memory to be
371 * mapped more than once.
373 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
374 struct scatterlist *sglist, int repeat,
375 unsigned int max_segs, unsigned int max_seg_sz,
376 unsigned int *sg_len)
378 struct scatterlist *sg = NULL;
379 unsigned int i;
381 sg_init_table(sglist, max_segs);
383 *sg_len = 0;
384 do {
385 for (i = 0; i < mem->cnt; i++) {
386 unsigned long len = PAGE_SIZE << mem->arr[i].order;
388 if (len > sz)
389 len = sz;
390 if (len > max_seg_sz)
391 len = max_seg_sz;
392 if (sg)
393 sg = sg_next(sg);
394 else
395 sg = sglist;
396 if (!sg)
397 return -EINVAL;
398 sg_set_page(sg, mem->arr[i].page, len, 0);
399 sz -= len;
400 *sg_len += 1;
401 if (!sz)
402 break;
404 } while (sz && repeat);
406 if (sz)
407 return -EINVAL;
409 if (sg)
410 sg_mark_end(sg);
412 return 0;
416 * Map memory into a scatterlist so that no pages are contiguous. Allow the
417 * same memory to be mapped more than once.
419 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
420 unsigned long sz,
421 struct scatterlist *sglist,
422 unsigned int max_segs,
423 unsigned int max_seg_sz,
424 unsigned int *sg_len)
426 struct scatterlist *sg = NULL;
427 unsigned int i = mem->cnt, cnt;
428 unsigned long len;
429 void *base, *addr, *last_addr = NULL;
431 sg_init_table(sglist, max_segs);
433 *sg_len = 0;
434 while (sz) {
435 base = page_address(mem->arr[--i].page);
436 cnt = 1 << mem->arr[i].order;
437 while (sz && cnt) {
438 addr = base + PAGE_SIZE * --cnt;
439 if (last_addr && last_addr + PAGE_SIZE == addr)
440 continue;
441 last_addr = addr;
442 len = PAGE_SIZE;
443 if (len > max_seg_sz)
444 len = max_seg_sz;
445 if (len > sz)
446 len = sz;
447 if (sg)
448 sg = sg_next(sg);
449 else
450 sg = sglist;
451 if (!sg)
452 return -EINVAL;
453 sg_set_page(sg, virt_to_page(addr), len, 0);
454 sz -= len;
455 *sg_len += 1;
457 if (i == 0)
458 i = mem->cnt;
461 if (sg)
462 sg_mark_end(sg);
464 return 0;
468 * Calculate transfer rate in bytes per second.
470 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
472 uint64_t ns;
474 ns = ts->tv_sec;
475 ns *= 1000000000;
476 ns += ts->tv_nsec;
478 bytes *= 1000000000;
480 while (ns > UINT_MAX) {
481 bytes >>= 1;
482 ns >>= 1;
485 if (!ns)
486 return 0;
488 do_div(bytes, (uint32_t)ns);
490 return bytes;
494 * Save transfer results for future usage
496 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
497 unsigned int count, unsigned int sectors, struct timespec ts,
498 unsigned int rate)
500 struct mmc_test_transfer_result *tr;
502 if (!test->gr)
503 return;
505 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
506 if (!tr)
507 return;
509 tr->count = count;
510 tr->sectors = sectors;
511 tr->ts = ts;
512 tr->rate = rate;
514 list_add_tail(&tr->link, &test->gr->tr_lst);
518 * Print the transfer rate.
520 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
521 struct timespec *ts1, struct timespec *ts2)
523 unsigned int rate, sectors = bytes >> 9;
524 struct timespec ts;
526 ts = timespec_sub(*ts2, *ts1);
528 rate = mmc_test_rate(bytes, &ts);
530 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
531 "seconds (%u kB/s, %u KiB/s)\n",
532 mmc_hostname(test->card->host), sectors, sectors >> 1,
533 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
534 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
536 mmc_test_save_transfer_result(test, 1, sectors, ts, rate);
540 * Print the average transfer rate.
542 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
543 unsigned int count, struct timespec *ts1,
544 struct timespec *ts2)
546 unsigned int rate, sectors = bytes >> 9;
547 uint64_t tot = bytes * count;
548 struct timespec ts;
550 ts = timespec_sub(*ts2, *ts1);
552 rate = mmc_test_rate(tot, &ts);
554 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
555 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
556 mmc_hostname(test->card->host), count, sectors, count,
557 sectors >> 1, (sectors & 1 ? ".5" : ""),
558 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
559 rate / 1000, rate / 1024);
561 mmc_test_save_transfer_result(test, count, sectors, ts, rate);
565 * Return the card size in sectors.
567 static unsigned int mmc_test_capacity(struct mmc_card *card)
569 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
570 return card->ext_csd.sectors;
571 else
572 return card->csd.capacity << (card->csd.read_blkbits - 9);
575 /*******************************************************************/
576 /* Test preparation and cleanup */
577 /*******************************************************************/
580 * Fill the first couple of sectors of the card with known data
581 * so that bad reads/writes can be detected
583 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
585 int ret, i;
587 ret = mmc_test_set_blksize(test, 512);
588 if (ret)
589 return ret;
591 if (write)
592 memset(test->buffer, 0xDF, 512);
593 else {
594 for (i = 0;i < 512;i++)
595 test->buffer[i] = i;
598 for (i = 0;i < BUFFER_SIZE / 512;i++) {
599 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
600 if (ret)
601 return ret;
604 return 0;
607 static int mmc_test_prepare_write(struct mmc_test_card *test)
609 return __mmc_test_prepare(test, 1);
612 static int mmc_test_prepare_read(struct mmc_test_card *test)
614 return __mmc_test_prepare(test, 0);
617 static int mmc_test_cleanup(struct mmc_test_card *test)
619 int ret, i;
621 ret = mmc_test_set_blksize(test, 512);
622 if (ret)
623 return ret;
625 memset(test->buffer, 0, 512);
627 for (i = 0;i < BUFFER_SIZE / 512;i++) {
628 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
629 if (ret)
630 return ret;
633 return 0;
636 /*******************************************************************/
637 /* Test execution helpers */
638 /*******************************************************************/
641 * Modifies the mmc_request to perform the "short transfer" tests
643 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
644 struct mmc_request *mrq, int write)
646 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
648 if (mrq->data->blocks > 1) {
649 mrq->cmd->opcode = write ?
650 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
651 mrq->stop = NULL;
652 } else {
653 mrq->cmd->opcode = MMC_SEND_STATUS;
654 mrq->cmd->arg = test->card->rca << 16;
659 * Checks that a normal transfer didn't have any errors
661 static int mmc_test_check_result(struct mmc_test_card *test,
662 struct mmc_request *mrq)
664 int ret;
666 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
668 ret = 0;
670 if (!ret && mrq->cmd->error)
671 ret = mrq->cmd->error;
672 if (!ret && mrq->data->error)
673 ret = mrq->data->error;
674 if (!ret && mrq->stop && mrq->stop->error)
675 ret = mrq->stop->error;
676 if (!ret && mrq->data->bytes_xfered !=
677 mrq->data->blocks * mrq->data->blksz)
678 ret = RESULT_FAIL;
680 if (ret == -EINVAL)
681 ret = RESULT_UNSUP_HOST;
683 return ret;
687 * Checks that a "short transfer" behaved as expected
689 static int mmc_test_check_broken_result(struct mmc_test_card *test,
690 struct mmc_request *mrq)
692 int ret;
694 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
696 ret = 0;
698 if (!ret && mrq->cmd->error)
699 ret = mrq->cmd->error;
700 if (!ret && mrq->data->error == 0)
701 ret = RESULT_FAIL;
702 if (!ret && mrq->data->error != -ETIMEDOUT)
703 ret = mrq->data->error;
704 if (!ret && mrq->stop && mrq->stop->error)
705 ret = mrq->stop->error;
706 if (mrq->data->blocks > 1) {
707 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
708 ret = RESULT_FAIL;
709 } else {
710 if (!ret && mrq->data->bytes_xfered > 0)
711 ret = RESULT_FAIL;
714 if (ret == -EINVAL)
715 ret = RESULT_UNSUP_HOST;
717 return ret;
721 * Tests a basic transfer with certain parameters
723 static int mmc_test_simple_transfer(struct mmc_test_card *test,
724 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
725 unsigned blocks, unsigned blksz, int write)
727 struct mmc_request mrq;
728 struct mmc_command cmd;
729 struct mmc_command stop;
730 struct mmc_data data;
732 memset(&mrq, 0, sizeof(struct mmc_request));
733 memset(&cmd, 0, sizeof(struct mmc_command));
734 memset(&data, 0, sizeof(struct mmc_data));
735 memset(&stop, 0, sizeof(struct mmc_command));
737 mrq.cmd = &cmd;
738 mrq.data = &data;
739 mrq.stop = &stop;
741 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
742 blocks, blksz, write);
744 mmc_wait_for_req(test->card->host, &mrq);
746 mmc_test_wait_busy(test);
748 return mmc_test_check_result(test, &mrq);
752 * Tests a transfer where the card will fail completely or partly
754 static int mmc_test_broken_transfer(struct mmc_test_card *test,
755 unsigned blocks, unsigned blksz, int write)
757 struct mmc_request mrq;
758 struct mmc_command cmd;
759 struct mmc_command stop;
760 struct mmc_data data;
762 struct scatterlist sg;
764 memset(&mrq, 0, sizeof(struct mmc_request));
765 memset(&cmd, 0, sizeof(struct mmc_command));
766 memset(&data, 0, sizeof(struct mmc_data));
767 memset(&stop, 0, sizeof(struct mmc_command));
769 mrq.cmd = &cmd;
770 mrq.data = &data;
771 mrq.stop = &stop;
773 sg_init_one(&sg, test->buffer, blocks * blksz);
775 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
776 mmc_test_prepare_broken_mrq(test, &mrq, write);
778 mmc_wait_for_req(test->card->host, &mrq);
780 mmc_test_wait_busy(test);
782 return mmc_test_check_broken_result(test, &mrq);
786 * Does a complete transfer test where data is also validated
788 * Note: mmc_test_prepare() must have been done before this call
790 static int mmc_test_transfer(struct mmc_test_card *test,
791 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
792 unsigned blocks, unsigned blksz, int write)
794 int ret, i;
795 unsigned long flags;
797 if (write) {
798 for (i = 0;i < blocks * blksz;i++)
799 test->scratch[i] = i;
800 } else {
801 memset(test->scratch, 0, BUFFER_SIZE);
803 local_irq_save(flags);
804 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
805 local_irq_restore(flags);
807 ret = mmc_test_set_blksize(test, blksz);
808 if (ret)
809 return ret;
811 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
812 blocks, blksz, write);
813 if (ret)
814 return ret;
816 if (write) {
817 int sectors;
819 ret = mmc_test_set_blksize(test, 512);
820 if (ret)
821 return ret;
823 sectors = (blocks * blksz + 511) / 512;
824 if ((sectors * 512) == (blocks * blksz))
825 sectors++;
827 if ((sectors * 512) > BUFFER_SIZE)
828 return -EINVAL;
830 memset(test->buffer, 0, sectors * 512);
832 for (i = 0;i < sectors;i++) {
833 ret = mmc_test_buffer_transfer(test,
834 test->buffer + i * 512,
835 dev_addr + i, 512, 0);
836 if (ret)
837 return ret;
840 for (i = 0;i < blocks * blksz;i++) {
841 if (test->buffer[i] != (u8)i)
842 return RESULT_FAIL;
845 for (;i < sectors * 512;i++) {
846 if (test->buffer[i] != 0xDF)
847 return RESULT_FAIL;
849 } else {
850 local_irq_save(flags);
851 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
852 local_irq_restore(flags);
853 for (i = 0;i < blocks * blksz;i++) {
854 if (test->scratch[i] != (u8)i)
855 return RESULT_FAIL;
859 return 0;
862 /*******************************************************************/
863 /* Tests */
864 /*******************************************************************/
866 struct mmc_test_case {
867 const char *name;
869 int (*prepare)(struct mmc_test_card *);
870 int (*run)(struct mmc_test_card *);
871 int (*cleanup)(struct mmc_test_card *);
874 static int mmc_test_basic_write(struct mmc_test_card *test)
876 int ret;
877 struct scatterlist sg;
879 ret = mmc_test_set_blksize(test, 512);
880 if (ret)
881 return ret;
883 sg_init_one(&sg, test->buffer, 512);
885 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
886 if (ret)
887 return ret;
889 return 0;
892 static int mmc_test_basic_read(struct mmc_test_card *test)
894 int ret;
895 struct scatterlist sg;
897 ret = mmc_test_set_blksize(test, 512);
898 if (ret)
899 return ret;
901 sg_init_one(&sg, test->buffer, 512);
903 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
904 if (ret)
905 return ret;
907 return 0;
910 static int mmc_test_verify_write(struct mmc_test_card *test)
912 int ret;
913 struct scatterlist sg;
915 sg_init_one(&sg, test->buffer, 512);
917 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
918 if (ret)
919 return ret;
921 return 0;
924 static int mmc_test_verify_read(struct mmc_test_card *test)
926 int ret;
927 struct scatterlist sg;
929 sg_init_one(&sg, test->buffer, 512);
931 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
932 if (ret)
933 return ret;
935 return 0;
938 static int mmc_test_multi_write(struct mmc_test_card *test)
940 int ret;
941 unsigned int size;
942 struct scatterlist sg;
944 if (test->card->host->max_blk_count == 1)
945 return RESULT_UNSUP_HOST;
947 size = PAGE_SIZE * 2;
948 size = min(size, test->card->host->max_req_size);
949 size = min(size, test->card->host->max_seg_size);
950 size = min(size, test->card->host->max_blk_count * 512);
952 if (size < 1024)
953 return RESULT_UNSUP_HOST;
955 sg_init_one(&sg, test->buffer, size);
957 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
958 if (ret)
959 return ret;
961 return 0;
964 static int mmc_test_multi_read(struct mmc_test_card *test)
966 int ret;
967 unsigned int size;
968 struct scatterlist sg;
970 if (test->card->host->max_blk_count == 1)
971 return RESULT_UNSUP_HOST;
973 size = PAGE_SIZE * 2;
974 size = min(size, test->card->host->max_req_size);
975 size = min(size, test->card->host->max_seg_size);
976 size = min(size, test->card->host->max_blk_count * 512);
978 if (size < 1024)
979 return RESULT_UNSUP_HOST;
981 sg_init_one(&sg, test->buffer, size);
983 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
984 if (ret)
985 return ret;
987 return 0;
990 static int mmc_test_pow2_write(struct mmc_test_card *test)
992 int ret, i;
993 struct scatterlist sg;
995 if (!test->card->csd.write_partial)
996 return RESULT_UNSUP_CARD;
998 for (i = 1; i < 512;i <<= 1) {
999 sg_init_one(&sg, test->buffer, i);
1000 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1001 if (ret)
1002 return ret;
1005 return 0;
1008 static int mmc_test_pow2_read(struct mmc_test_card *test)
1010 int ret, i;
1011 struct scatterlist sg;
1013 if (!test->card->csd.read_partial)
1014 return RESULT_UNSUP_CARD;
1016 for (i = 1; i < 512;i <<= 1) {
1017 sg_init_one(&sg, test->buffer, i);
1018 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1019 if (ret)
1020 return ret;
1023 return 0;
1026 static int mmc_test_weird_write(struct mmc_test_card *test)
1028 int ret, i;
1029 struct scatterlist sg;
1031 if (!test->card->csd.write_partial)
1032 return RESULT_UNSUP_CARD;
1034 for (i = 3; i < 512;i += 7) {
1035 sg_init_one(&sg, test->buffer, i);
1036 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1037 if (ret)
1038 return ret;
1041 return 0;
1044 static int mmc_test_weird_read(struct mmc_test_card *test)
1046 int ret, i;
1047 struct scatterlist sg;
1049 if (!test->card->csd.read_partial)
1050 return RESULT_UNSUP_CARD;
1052 for (i = 3; i < 512;i += 7) {
1053 sg_init_one(&sg, test->buffer, i);
1054 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1055 if (ret)
1056 return ret;
1059 return 0;
1062 static int mmc_test_align_write(struct mmc_test_card *test)
1064 int ret, i;
1065 struct scatterlist sg;
1067 for (i = 1;i < 4;i++) {
1068 sg_init_one(&sg, test->buffer + i, 512);
1069 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1070 if (ret)
1071 return ret;
1074 return 0;
1077 static int mmc_test_align_read(struct mmc_test_card *test)
1079 int ret, i;
1080 struct scatterlist sg;
1082 for (i = 1;i < 4;i++) {
1083 sg_init_one(&sg, test->buffer + i, 512);
1084 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1085 if (ret)
1086 return ret;
1089 return 0;
1092 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1094 int ret, i;
1095 unsigned int size;
1096 struct scatterlist sg;
1098 if (test->card->host->max_blk_count == 1)
1099 return RESULT_UNSUP_HOST;
1101 size = PAGE_SIZE * 2;
1102 size = min(size, test->card->host->max_req_size);
1103 size = min(size, test->card->host->max_seg_size);
1104 size = min(size, test->card->host->max_blk_count * 512);
1106 if (size < 1024)
1107 return RESULT_UNSUP_HOST;
1109 for (i = 1;i < 4;i++) {
1110 sg_init_one(&sg, test->buffer + i, size);
1111 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1112 if (ret)
1113 return ret;
1116 return 0;
1119 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1121 int ret, i;
1122 unsigned int size;
1123 struct scatterlist sg;
1125 if (test->card->host->max_blk_count == 1)
1126 return RESULT_UNSUP_HOST;
1128 size = PAGE_SIZE * 2;
1129 size = min(size, test->card->host->max_req_size);
1130 size = min(size, test->card->host->max_seg_size);
1131 size = min(size, test->card->host->max_blk_count * 512);
1133 if (size < 1024)
1134 return RESULT_UNSUP_HOST;
1136 for (i = 1;i < 4;i++) {
1137 sg_init_one(&sg, test->buffer + i, size);
1138 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1139 if (ret)
1140 return ret;
1143 return 0;
1146 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1148 int ret;
1150 ret = mmc_test_set_blksize(test, 512);
1151 if (ret)
1152 return ret;
1154 ret = mmc_test_broken_transfer(test, 1, 512, 1);
1155 if (ret)
1156 return ret;
1158 return 0;
1161 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1163 int ret;
1165 ret = mmc_test_set_blksize(test, 512);
1166 if (ret)
1167 return ret;
1169 ret = mmc_test_broken_transfer(test, 1, 512, 0);
1170 if (ret)
1171 return ret;
1173 return 0;
1176 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1178 int ret;
1180 if (test->card->host->max_blk_count == 1)
1181 return RESULT_UNSUP_HOST;
1183 ret = mmc_test_set_blksize(test, 512);
1184 if (ret)
1185 return ret;
1187 ret = mmc_test_broken_transfer(test, 2, 512, 1);
1188 if (ret)
1189 return ret;
1191 return 0;
1194 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1196 int ret;
1198 if (test->card->host->max_blk_count == 1)
1199 return RESULT_UNSUP_HOST;
1201 ret = mmc_test_set_blksize(test, 512);
1202 if (ret)
1203 return ret;
1205 ret = mmc_test_broken_transfer(test, 2, 512, 0);
1206 if (ret)
1207 return ret;
1209 return 0;
1212 #ifdef CONFIG_HIGHMEM
1214 static int mmc_test_write_high(struct mmc_test_card *test)
1216 int ret;
1217 struct scatterlist sg;
1219 sg_init_table(&sg, 1);
1220 sg_set_page(&sg, test->highmem, 512, 0);
1222 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1223 if (ret)
1224 return ret;
1226 return 0;
1229 static int mmc_test_read_high(struct mmc_test_card *test)
1231 int ret;
1232 struct scatterlist sg;
1234 sg_init_table(&sg, 1);
1235 sg_set_page(&sg, test->highmem, 512, 0);
1237 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1238 if (ret)
1239 return ret;
1241 return 0;
1244 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1246 int ret;
1247 unsigned int size;
1248 struct scatterlist sg;
1250 if (test->card->host->max_blk_count == 1)
1251 return RESULT_UNSUP_HOST;
1253 size = PAGE_SIZE * 2;
1254 size = min(size, test->card->host->max_req_size);
1255 size = min(size, test->card->host->max_seg_size);
1256 size = min(size, test->card->host->max_blk_count * 512);
1258 if (size < 1024)
1259 return RESULT_UNSUP_HOST;
1261 sg_init_table(&sg, 1);
1262 sg_set_page(&sg, test->highmem, size, 0);
1264 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1265 if (ret)
1266 return ret;
1268 return 0;
1271 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1273 int ret;
1274 unsigned int size;
1275 struct scatterlist sg;
1277 if (test->card->host->max_blk_count == 1)
1278 return RESULT_UNSUP_HOST;
1280 size = PAGE_SIZE * 2;
1281 size = min(size, test->card->host->max_req_size);
1282 size = min(size, test->card->host->max_seg_size);
1283 size = min(size, test->card->host->max_blk_count * 512);
1285 if (size < 1024)
1286 return RESULT_UNSUP_HOST;
1288 sg_init_table(&sg, 1);
1289 sg_set_page(&sg, test->highmem, size, 0);
1291 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1292 if (ret)
1293 return ret;
1295 return 0;
1298 #else
1300 static int mmc_test_no_highmem(struct mmc_test_card *test)
1302 printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1303 mmc_hostname(test->card->host));
1304 return 0;
1307 #endif /* CONFIG_HIGHMEM */
1310 * Map sz bytes so that it can be transferred.
1312 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1313 int max_scatter)
1315 struct mmc_test_area *t = &test->area;
1316 int err;
1318 t->blocks = sz >> 9;
1320 if (max_scatter) {
1321 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1322 t->max_segs, t->max_seg_sz,
1323 &t->sg_len);
1324 } else {
1325 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1326 t->max_seg_sz, &t->sg_len);
1328 if (err)
1329 printk(KERN_INFO "%s: Failed to map sg list\n",
1330 mmc_hostname(test->card->host));
1331 return err;
1335 * Transfer bytes mapped by mmc_test_area_map().
1337 static int mmc_test_area_transfer(struct mmc_test_card *test,
1338 unsigned int dev_addr, int write)
1340 struct mmc_test_area *t = &test->area;
1342 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1343 t->blocks, 512, write);
1347 * Map and transfer bytes.
1349 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1350 unsigned int dev_addr, int write, int max_scatter,
1351 int timed)
1353 struct timespec ts1, ts2;
1354 int ret;
1357 * In the case of a maximally scattered transfer, the maximum transfer
1358 * size is further limited by using PAGE_SIZE segments.
1360 if (max_scatter) {
1361 struct mmc_test_area *t = &test->area;
1362 unsigned long max_tfr;
1364 if (t->max_seg_sz >= PAGE_SIZE)
1365 max_tfr = t->max_segs * PAGE_SIZE;
1366 else
1367 max_tfr = t->max_segs * t->max_seg_sz;
1368 if (sz > max_tfr)
1369 sz = max_tfr;
1372 ret = mmc_test_area_map(test, sz, max_scatter);
1373 if (ret)
1374 return ret;
1376 if (timed)
1377 getnstimeofday(&ts1);
1379 ret = mmc_test_area_transfer(test, dev_addr, write);
1380 if (ret)
1381 return ret;
1383 if (timed)
1384 getnstimeofday(&ts2);
1386 if (timed)
1387 mmc_test_print_rate(test, sz, &ts1, &ts2);
1389 return 0;
1393 * Write the test area entirely.
1395 static int mmc_test_area_fill(struct mmc_test_card *test)
1397 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1398 1, 0, 0);
1402 * Erase the test area entirely.
1404 static int mmc_test_area_erase(struct mmc_test_card *test)
1406 struct mmc_test_area *t = &test->area;
1408 if (!mmc_can_erase(test->card))
1409 return 0;
1411 return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
1412 MMC_ERASE_ARG);
1416 * Cleanup struct mmc_test_area.
1418 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1420 struct mmc_test_area *t = &test->area;
1422 kfree(t->sg);
1423 mmc_test_free_mem(t->mem);
1425 return 0;
1429 * Initialize an area for testing large transfers. The size of the area is the
1430 * preferred erase size which is a good size for optimal transfer speed. Note
1431 * that is typically 4MiB for modern cards. The test area is set to the middle
1432 * of the card because cards may have different charateristics at the front
1433 * (for FAT file system optimization). Optionally, the area is erased (if the
1434 * card supports it) which may improve write performance. Optionally, the area
1435 * is filled with data for subsequent read tests.
1437 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1439 struct mmc_test_area *t = &test->area;
1440 unsigned long min_sz = 64 * 1024;
1441 int ret;
1443 ret = mmc_test_set_blksize(test, 512);
1444 if (ret)
1445 return ret;
1447 if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9)
1448 t->max_sz = TEST_AREA_MAX_SIZE;
1449 else
1450 t->max_sz = (unsigned long)test->card->pref_erase << 9;
1452 t->max_segs = test->card->host->max_segs;
1453 t->max_seg_sz = test->card->host->max_seg_size;
1455 t->max_tfr = t->max_sz;
1456 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1457 t->max_tfr = test->card->host->max_blk_count << 9;
1458 if (t->max_tfr > test->card->host->max_req_size)
1459 t->max_tfr = test->card->host->max_req_size;
1460 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1461 t->max_tfr = t->max_segs * t->max_seg_sz;
1464 * Try to allocate enough memory for a max. sized transfer. Less is OK
1465 * because the same memory can be mapped into the scatterlist more than
1466 * once. Also, take into account the limits imposed on scatterlist
1467 * segments by the host driver.
1469 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1470 t->max_seg_sz);
1471 if (!t->mem)
1472 return -ENOMEM;
1474 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1475 if (!t->sg) {
1476 ret = -ENOMEM;
1477 goto out_free;
1480 t->dev_addr = mmc_test_capacity(test->card) / 2;
1481 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1483 if (erase) {
1484 ret = mmc_test_area_erase(test);
1485 if (ret)
1486 goto out_free;
1489 if (fill) {
1490 ret = mmc_test_area_fill(test);
1491 if (ret)
1492 goto out_free;
1495 return 0;
1497 out_free:
1498 mmc_test_area_cleanup(test);
1499 return ret;
1503 * Prepare for large transfers. Do not erase the test area.
1505 static int mmc_test_area_prepare(struct mmc_test_card *test)
1507 return mmc_test_area_init(test, 0, 0);
1511 * Prepare for large transfers. Do erase the test area.
1513 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1515 return mmc_test_area_init(test, 1, 0);
1519 * Prepare for large transfers. Erase and fill the test area.
1521 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1523 return mmc_test_area_init(test, 1, 1);
1527 * Test best-case performance. Best-case performance is expected from
1528 * a single large transfer.
1530 * An additional option (max_scatter) allows the measurement of the same
1531 * transfer but with no contiguous pages in the scatter list. This tests
1532 * the efficiency of DMA to handle scattered pages.
1534 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1535 int max_scatter)
1537 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1538 write, max_scatter, 1);
1542 * Best-case read performance.
1544 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1546 return mmc_test_best_performance(test, 0, 0);
1550 * Best-case write performance.
1552 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1554 return mmc_test_best_performance(test, 1, 0);
1558 * Best-case read performance into scattered pages.
1560 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1562 return mmc_test_best_performance(test, 0, 1);
1566 * Best-case write performance from scattered pages.
1568 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1570 return mmc_test_best_performance(test, 1, 1);
1574 * Single read performance by transfer size.
1576 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1578 unsigned long sz;
1579 unsigned int dev_addr;
1580 int ret;
1582 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1583 dev_addr = test->area.dev_addr + (sz >> 9);
1584 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1585 if (ret)
1586 return ret;
1588 sz = test->area.max_tfr;
1589 dev_addr = test->area.dev_addr;
1590 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1594 * Single write performance by transfer size.
1596 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1598 unsigned long sz;
1599 unsigned int dev_addr;
1600 int ret;
1602 ret = mmc_test_area_erase(test);
1603 if (ret)
1604 return ret;
1605 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1606 dev_addr = test->area.dev_addr + (sz >> 9);
1607 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1608 if (ret)
1609 return ret;
1611 ret = mmc_test_area_erase(test);
1612 if (ret)
1613 return ret;
1614 sz = test->area.max_tfr;
1615 dev_addr = test->area.dev_addr;
1616 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1620 * Single trim performance by transfer size.
1622 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1624 unsigned long sz;
1625 unsigned int dev_addr;
1626 struct timespec ts1, ts2;
1627 int ret;
1629 if (!mmc_can_trim(test->card))
1630 return RESULT_UNSUP_CARD;
1632 if (!mmc_can_erase(test->card))
1633 return RESULT_UNSUP_HOST;
1635 for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1636 dev_addr = test->area.dev_addr + (sz >> 9);
1637 getnstimeofday(&ts1);
1638 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1639 if (ret)
1640 return ret;
1641 getnstimeofday(&ts2);
1642 mmc_test_print_rate(test, sz, &ts1, &ts2);
1644 dev_addr = test->area.dev_addr;
1645 getnstimeofday(&ts1);
1646 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1647 if (ret)
1648 return ret;
1649 getnstimeofday(&ts2);
1650 mmc_test_print_rate(test, sz, &ts1, &ts2);
1651 return 0;
1654 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1656 unsigned int dev_addr, i, cnt;
1657 struct timespec ts1, ts2;
1658 int ret;
1660 cnt = test->area.max_sz / sz;
1661 dev_addr = test->area.dev_addr;
1662 getnstimeofday(&ts1);
1663 for (i = 0; i < cnt; i++) {
1664 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1665 if (ret)
1666 return ret;
1667 dev_addr += (sz >> 9);
1669 getnstimeofday(&ts2);
1670 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1671 return 0;
1675 * Consecutive read performance by transfer size.
1677 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1679 unsigned long sz;
1680 int ret;
1682 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1683 ret = mmc_test_seq_read_perf(test, sz);
1684 if (ret)
1685 return ret;
1687 sz = test->area.max_tfr;
1688 return mmc_test_seq_read_perf(test, sz);
1691 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1693 unsigned int dev_addr, i, cnt;
1694 struct timespec ts1, ts2;
1695 int ret;
1697 ret = mmc_test_area_erase(test);
1698 if (ret)
1699 return ret;
1700 cnt = test->area.max_sz / sz;
1701 dev_addr = test->area.dev_addr;
1702 getnstimeofday(&ts1);
1703 for (i = 0; i < cnt; i++) {
1704 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1705 if (ret)
1706 return ret;
1707 dev_addr += (sz >> 9);
1709 getnstimeofday(&ts2);
1710 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1711 return 0;
1715 * Consecutive write performance by transfer size.
1717 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1719 unsigned long sz;
1720 int ret;
1722 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1723 ret = mmc_test_seq_write_perf(test, sz);
1724 if (ret)
1725 return ret;
1727 sz = test->area.max_tfr;
1728 return mmc_test_seq_write_perf(test, sz);
1732 * Consecutive trim performance by transfer size.
1734 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1736 unsigned long sz;
1737 unsigned int dev_addr, i, cnt;
1738 struct timespec ts1, ts2;
1739 int ret;
1741 if (!mmc_can_trim(test->card))
1742 return RESULT_UNSUP_CARD;
1744 if (!mmc_can_erase(test->card))
1745 return RESULT_UNSUP_HOST;
1747 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1748 ret = mmc_test_area_erase(test);
1749 if (ret)
1750 return ret;
1751 ret = mmc_test_area_fill(test);
1752 if (ret)
1753 return ret;
1754 cnt = test->area.max_sz / sz;
1755 dev_addr = test->area.dev_addr;
1756 getnstimeofday(&ts1);
1757 for (i = 0; i < cnt; i++) {
1758 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1759 MMC_TRIM_ARG);
1760 if (ret)
1761 return ret;
1762 dev_addr += (sz >> 9);
1764 getnstimeofday(&ts2);
1765 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1767 return 0;
1770 static const struct mmc_test_case mmc_test_cases[] = {
1772 .name = "Basic write (no data verification)",
1773 .run = mmc_test_basic_write,
1777 .name = "Basic read (no data verification)",
1778 .run = mmc_test_basic_read,
1782 .name = "Basic write (with data verification)",
1783 .prepare = mmc_test_prepare_write,
1784 .run = mmc_test_verify_write,
1785 .cleanup = mmc_test_cleanup,
1789 .name = "Basic read (with data verification)",
1790 .prepare = mmc_test_prepare_read,
1791 .run = mmc_test_verify_read,
1792 .cleanup = mmc_test_cleanup,
1796 .name = "Multi-block write",
1797 .prepare = mmc_test_prepare_write,
1798 .run = mmc_test_multi_write,
1799 .cleanup = mmc_test_cleanup,
1803 .name = "Multi-block read",
1804 .prepare = mmc_test_prepare_read,
1805 .run = mmc_test_multi_read,
1806 .cleanup = mmc_test_cleanup,
1810 .name = "Power of two block writes",
1811 .prepare = mmc_test_prepare_write,
1812 .run = mmc_test_pow2_write,
1813 .cleanup = mmc_test_cleanup,
1817 .name = "Power of two block reads",
1818 .prepare = mmc_test_prepare_read,
1819 .run = mmc_test_pow2_read,
1820 .cleanup = mmc_test_cleanup,
1824 .name = "Weird sized block writes",
1825 .prepare = mmc_test_prepare_write,
1826 .run = mmc_test_weird_write,
1827 .cleanup = mmc_test_cleanup,
1831 .name = "Weird sized block reads",
1832 .prepare = mmc_test_prepare_read,
1833 .run = mmc_test_weird_read,
1834 .cleanup = mmc_test_cleanup,
1838 .name = "Badly aligned write",
1839 .prepare = mmc_test_prepare_write,
1840 .run = mmc_test_align_write,
1841 .cleanup = mmc_test_cleanup,
1845 .name = "Badly aligned read",
1846 .prepare = mmc_test_prepare_read,
1847 .run = mmc_test_align_read,
1848 .cleanup = mmc_test_cleanup,
1852 .name = "Badly aligned multi-block write",
1853 .prepare = mmc_test_prepare_write,
1854 .run = mmc_test_align_multi_write,
1855 .cleanup = mmc_test_cleanup,
1859 .name = "Badly aligned multi-block read",
1860 .prepare = mmc_test_prepare_read,
1861 .run = mmc_test_align_multi_read,
1862 .cleanup = mmc_test_cleanup,
1866 .name = "Correct xfer_size at write (start failure)",
1867 .run = mmc_test_xfersize_write,
1871 .name = "Correct xfer_size at read (start failure)",
1872 .run = mmc_test_xfersize_read,
1876 .name = "Correct xfer_size at write (midway failure)",
1877 .run = mmc_test_multi_xfersize_write,
1881 .name = "Correct xfer_size at read (midway failure)",
1882 .run = mmc_test_multi_xfersize_read,
1885 #ifdef CONFIG_HIGHMEM
1888 .name = "Highmem write",
1889 .prepare = mmc_test_prepare_write,
1890 .run = mmc_test_write_high,
1891 .cleanup = mmc_test_cleanup,
1895 .name = "Highmem read",
1896 .prepare = mmc_test_prepare_read,
1897 .run = mmc_test_read_high,
1898 .cleanup = mmc_test_cleanup,
1902 .name = "Multi-block highmem write",
1903 .prepare = mmc_test_prepare_write,
1904 .run = mmc_test_multi_write_high,
1905 .cleanup = mmc_test_cleanup,
1909 .name = "Multi-block highmem read",
1910 .prepare = mmc_test_prepare_read,
1911 .run = mmc_test_multi_read_high,
1912 .cleanup = mmc_test_cleanup,
1915 #else
1918 .name = "Highmem write",
1919 .run = mmc_test_no_highmem,
1923 .name = "Highmem read",
1924 .run = mmc_test_no_highmem,
1928 .name = "Multi-block highmem write",
1929 .run = mmc_test_no_highmem,
1933 .name = "Multi-block highmem read",
1934 .run = mmc_test_no_highmem,
1937 #endif /* CONFIG_HIGHMEM */
1940 .name = "Best-case read performance",
1941 .prepare = mmc_test_area_prepare_fill,
1942 .run = mmc_test_best_read_performance,
1943 .cleanup = mmc_test_area_cleanup,
1947 .name = "Best-case write performance",
1948 .prepare = mmc_test_area_prepare_erase,
1949 .run = mmc_test_best_write_performance,
1950 .cleanup = mmc_test_area_cleanup,
1954 .name = "Best-case read performance into scattered pages",
1955 .prepare = mmc_test_area_prepare_fill,
1956 .run = mmc_test_best_read_perf_max_scatter,
1957 .cleanup = mmc_test_area_cleanup,
1961 .name = "Best-case write performance from scattered pages",
1962 .prepare = mmc_test_area_prepare_erase,
1963 .run = mmc_test_best_write_perf_max_scatter,
1964 .cleanup = mmc_test_area_cleanup,
1968 .name = "Single read performance by transfer size",
1969 .prepare = mmc_test_area_prepare_fill,
1970 .run = mmc_test_profile_read_perf,
1971 .cleanup = mmc_test_area_cleanup,
1975 .name = "Single write performance by transfer size",
1976 .prepare = mmc_test_area_prepare,
1977 .run = mmc_test_profile_write_perf,
1978 .cleanup = mmc_test_area_cleanup,
1982 .name = "Single trim performance by transfer size",
1983 .prepare = mmc_test_area_prepare_fill,
1984 .run = mmc_test_profile_trim_perf,
1985 .cleanup = mmc_test_area_cleanup,
1989 .name = "Consecutive read performance by transfer size",
1990 .prepare = mmc_test_area_prepare_fill,
1991 .run = mmc_test_profile_seq_read_perf,
1992 .cleanup = mmc_test_area_cleanup,
1996 .name = "Consecutive write performance by transfer size",
1997 .prepare = mmc_test_area_prepare,
1998 .run = mmc_test_profile_seq_write_perf,
1999 .cleanup = mmc_test_area_cleanup,
2003 .name = "Consecutive trim performance by transfer size",
2004 .prepare = mmc_test_area_prepare,
2005 .run = mmc_test_profile_seq_trim_perf,
2006 .cleanup = mmc_test_area_cleanup,
2011 static DEFINE_MUTEX(mmc_test_lock);
2013 static LIST_HEAD(mmc_test_result);
2015 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2017 int i, ret;
2019 printk(KERN_INFO "%s: Starting tests of card %s...\n",
2020 mmc_hostname(test->card->host), mmc_card_id(test->card));
2022 mmc_claim_host(test->card->host);
2024 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2025 struct mmc_test_general_result *gr;
2027 if (testcase && ((i + 1) != testcase))
2028 continue;
2030 printk(KERN_INFO "%s: Test case %d. %s...\n",
2031 mmc_hostname(test->card->host), i + 1,
2032 mmc_test_cases[i].name);
2034 if (mmc_test_cases[i].prepare) {
2035 ret = mmc_test_cases[i].prepare(test);
2036 if (ret) {
2037 printk(KERN_INFO "%s: Result: Prepare "
2038 "stage failed! (%d)\n",
2039 mmc_hostname(test->card->host),
2040 ret);
2041 continue;
2045 gr = kzalloc(sizeof(struct mmc_test_general_result),
2046 GFP_KERNEL);
2047 if (gr) {
2048 INIT_LIST_HEAD(&gr->tr_lst);
2050 /* Assign data what we know already */
2051 gr->card = test->card;
2052 gr->testcase = i;
2054 /* Append container to global one */
2055 list_add_tail(&gr->link, &mmc_test_result);
2058 * Save the pointer to created container in our private
2059 * structure.
2061 test->gr = gr;
2064 ret = mmc_test_cases[i].run(test);
2065 switch (ret) {
2066 case RESULT_OK:
2067 printk(KERN_INFO "%s: Result: OK\n",
2068 mmc_hostname(test->card->host));
2069 break;
2070 case RESULT_FAIL:
2071 printk(KERN_INFO "%s: Result: FAILED\n",
2072 mmc_hostname(test->card->host));
2073 break;
2074 case RESULT_UNSUP_HOST:
2075 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2076 "(by host)\n",
2077 mmc_hostname(test->card->host));
2078 break;
2079 case RESULT_UNSUP_CARD:
2080 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2081 "(by card)\n",
2082 mmc_hostname(test->card->host));
2083 break;
2084 default:
2085 printk(KERN_INFO "%s: Result: ERROR (%d)\n",
2086 mmc_hostname(test->card->host), ret);
2089 /* Save the result */
2090 if (gr)
2091 gr->result = ret;
2093 if (mmc_test_cases[i].cleanup) {
2094 ret = mmc_test_cases[i].cleanup(test);
2095 if (ret) {
2096 printk(KERN_INFO "%s: Warning: Cleanup "
2097 "stage failed! (%d)\n",
2098 mmc_hostname(test->card->host),
2099 ret);
2104 mmc_release_host(test->card->host);
2106 printk(KERN_INFO "%s: Tests completed.\n",
2107 mmc_hostname(test->card->host));
2110 static void mmc_test_free_result(struct mmc_card *card)
2112 struct mmc_test_general_result *gr, *grs;
2114 mutex_lock(&mmc_test_lock);
2116 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2117 struct mmc_test_transfer_result *tr, *trs;
2119 if (card && gr->card != card)
2120 continue;
2122 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2123 list_del(&tr->link);
2124 kfree(tr);
2127 list_del(&gr->link);
2128 kfree(gr);
2131 mutex_unlock(&mmc_test_lock);
2134 static LIST_HEAD(mmc_test_file_test);
2136 static int mtf_test_show(struct seq_file *sf, void *data)
2138 struct mmc_card *card = (struct mmc_card *)sf->private;
2139 struct mmc_test_general_result *gr;
2141 mutex_lock(&mmc_test_lock);
2143 list_for_each_entry(gr, &mmc_test_result, link) {
2144 struct mmc_test_transfer_result *tr;
2146 if (gr->card != card)
2147 continue;
2149 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2151 list_for_each_entry(tr, &gr->tr_lst, link) {
2152 seq_printf(sf, "%u %d %lu.%09lu %u\n",
2153 tr->count, tr->sectors,
2154 (unsigned long)tr->ts.tv_sec,
2155 (unsigned long)tr->ts.tv_nsec,
2156 tr->rate);
2160 mutex_unlock(&mmc_test_lock);
2162 return 0;
2165 static int mtf_test_open(struct inode *inode, struct file *file)
2167 return single_open(file, mtf_test_show, inode->i_private);
2170 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2171 size_t count, loff_t *pos)
2173 struct seq_file *sf = (struct seq_file *)file->private_data;
2174 struct mmc_card *card = (struct mmc_card *)sf->private;
2175 struct mmc_test_card *test;
2176 char lbuf[12];
2177 long testcase;
2179 if (count >= sizeof(lbuf))
2180 return -EINVAL;
2182 if (copy_from_user(lbuf, buf, count))
2183 return -EFAULT;
2184 lbuf[count] = '\0';
2186 if (strict_strtol(lbuf, 10, &testcase))
2187 return -EINVAL;
2189 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2190 if (!test)
2191 return -ENOMEM;
2194 * Remove all test cases associated with given card. Thus we have only
2195 * actual data of the last run.
2197 mmc_test_free_result(card);
2199 test->card = card;
2201 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2202 #ifdef CONFIG_HIGHMEM
2203 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2204 #endif
2206 #ifdef CONFIG_HIGHMEM
2207 if (test->buffer && test->highmem) {
2208 #else
2209 if (test->buffer) {
2210 #endif
2211 mutex_lock(&mmc_test_lock);
2212 mmc_test_run(test, testcase);
2213 mutex_unlock(&mmc_test_lock);
2216 #ifdef CONFIG_HIGHMEM
2217 __free_pages(test->highmem, BUFFER_ORDER);
2218 #endif
2219 kfree(test->buffer);
2220 kfree(test);
2222 return count;
2225 static const struct file_operations mmc_test_fops_test = {
2226 .open = mtf_test_open,
2227 .read = seq_read,
2228 .write = mtf_test_write,
2229 .llseek = seq_lseek,
2230 .release = single_release,
2233 static void mmc_test_free_file_test(struct mmc_card *card)
2235 struct mmc_test_dbgfs_file *df, *dfs;
2237 mutex_lock(&mmc_test_lock);
2239 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2240 if (card && df->card != card)
2241 continue;
2242 debugfs_remove(df->file);
2243 list_del(&df->link);
2244 kfree(df);
2247 mutex_unlock(&mmc_test_lock);
2250 static int mmc_test_register_file_test(struct mmc_card *card)
2252 struct dentry *file = NULL;
2253 struct mmc_test_dbgfs_file *df;
2254 int ret = 0;
2256 mutex_lock(&mmc_test_lock);
2258 if (card->debugfs_root)
2259 file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
2260 card->debugfs_root, card, &mmc_test_fops_test);
2262 if (IS_ERR_OR_NULL(file)) {
2263 dev_err(&card->dev,
2264 "Can't create file. Perhaps debugfs is disabled.\n");
2265 ret = -ENODEV;
2266 goto err;
2269 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2270 if (!df) {
2271 debugfs_remove(file);
2272 dev_err(&card->dev,
2273 "Can't allocate memory for internal usage.\n");
2274 ret = -ENOMEM;
2275 goto err;
2278 df->card = card;
2279 df->file = file;
2281 list_add(&df->link, &mmc_test_file_test);
2283 err:
2284 mutex_unlock(&mmc_test_lock);
2286 return ret;
2289 static int mmc_test_probe(struct mmc_card *card)
2291 int ret;
2293 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2294 return -ENODEV;
2296 ret = mmc_test_register_file_test(card);
2297 if (ret)
2298 return ret;
2300 dev_info(&card->dev, "Card claimed for testing.\n");
2302 return 0;
2305 static void mmc_test_remove(struct mmc_card *card)
2307 mmc_test_free_result(card);
2308 mmc_test_free_file_test(card);
2311 static struct mmc_driver mmc_driver = {
2312 .drv = {
2313 .name = "mmc_test",
2315 .probe = mmc_test_probe,
2316 .remove = mmc_test_remove,
2319 static int __init mmc_test_init(void)
2321 return mmc_register_driver(&mmc_driver);
2324 static void __exit mmc_test_exit(void)
2326 /* Clear stalled data if card is still plugged */
2327 mmc_test_free_result(NULL);
2328 mmc_test_free_file_test(NULL);
2330 mmc_unregister_driver(&mmc_driver);
2333 module_init(mmc_test_init);
2334 module_exit(mmc_test_exit);
2336 MODULE_LICENSE("GPL");
2337 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2338 MODULE_AUTHOR("Pierre Ossman");