Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mmc_test.c
Go to the documentation of this file.
1 /*
2  * linux/drivers/mmc/card/mmc_test.c
3  *
4  * Copyright 2007-2008 Pierre Ossman
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  */
11 
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
17 
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
21 
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
25 #include <linux/module.h>
26 
27 #define RESULT_OK 0
28 #define RESULT_FAIL 1
29 #define RESULT_UNSUP_HOST 2
30 #define RESULT_UNSUP_CARD 3
31 
32 #define BUFFER_ORDER 2
33 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
34 
35 /*
36  * Limit the test area size to the maximum MMC HC erase group size. Note that
37  * the maximum SD allocation unit size is just 4MiB.
38  */
39 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
40 
47  struct page *page;
48  unsigned int order;
49 };
50 
56 struct mmc_test_mem {
58  unsigned int cnt;
59 };
60 
73 struct mmc_test_area {
74  unsigned long max_sz;
75  unsigned int dev_addr;
76  unsigned int max_tfr;
77  unsigned int max_segs;
78  unsigned int max_seg_sz;
79  unsigned int blocks;
80  unsigned int sg_len;
81  struct mmc_test_mem *mem;
82  struct scatterlist *sg;
83 };
84 
95  struct list_head link;
96  unsigned int count;
97  unsigned int sectors;
98  struct timespec ts;
99  unsigned int rate;
100  unsigned int iops;
101 };
102 
112  struct list_head link;
113  struct mmc_card *card;
114  int testcase;
115  int result;
117 };
118 
126  struct list_head link;
127  struct mmc_card *card;
128  struct dentry *file;
129 };
130 
141  struct mmc_card *card;
142 
145 #ifdef CONFIG_HIGHMEM
146  struct page *highmem;
147 #endif
150 };
151 
156 };
157 
159  unsigned int *sg_len;
160  unsigned int *bs;
161  unsigned int len;
162  unsigned int size;
163  bool do_write;
166 };
167 
171 };
172 
173 /*******************************************************************/
174 /* General helper functions */
175 /*******************************************************************/
176 
177 /*
178  * Configure correct block size in card
179  */
180 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
181 {
182  return mmc_set_blocklen(test->card, size);
183 }
184 
185 /*
186  * Fill in the mmc_request structure given a set of transfer parameters.
187  */
188 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
189  struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
190  unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
191 {
192  BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
193 
194  if (blocks > 1) {
195  mrq->cmd->opcode = write ?
197  } else {
198  mrq->cmd->opcode = write ?
200  }
201 
202  mrq->cmd->arg = dev_addr;
203  if (!mmc_card_blockaddr(test->card))
204  mrq->cmd->arg <<= 9;
205 
206  mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
207 
208  if (blocks == 1)
209  mrq->stop = NULL;
210  else {
211  mrq->stop->opcode = MMC_STOP_TRANSMISSION;
212  mrq->stop->arg = 0;
213  mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
214  }
215 
216  mrq->data->blksz = blksz;
217  mrq->data->blocks = blocks;
218  mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
219  mrq->data->sg = sg;
220  mrq->data->sg_len = sg_len;
221 
222  mmc_set_data_timeout(mrq->data, test->card);
223 }
224 
225 static int mmc_test_busy(struct mmc_command *cmd)
226 {
227  return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
228  (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
229 }
230 
231 /*
232  * Wait for the card to finish the busy state
233  */
234 static int mmc_test_wait_busy(struct mmc_test_card *test)
235 {
236  int ret, busy;
237  struct mmc_command cmd = {0};
238 
239  busy = 0;
240  do {
241  memset(&cmd, 0, sizeof(struct mmc_command));
242 
243  cmd.opcode = MMC_SEND_STATUS;
244  cmd.arg = test->card->rca << 16;
245  cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
246 
247  ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
248  if (ret)
249  break;
250 
251  if (!busy && mmc_test_busy(&cmd)) {
252  busy = 1;
253  if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
254  pr_info("%s: Warning: Host did not "
255  "wait for busy state to end.\n",
256  mmc_hostname(test->card->host));
257  }
258  } while (mmc_test_busy(&cmd));
259 
260  return ret;
261 }
262 
263 /*
264  * Transfer a single sector of kernel addressable data
265  */
266 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
267  u8 *buffer, unsigned addr, unsigned blksz, int write)
268 {
269  int ret;
270 
271  struct mmc_request mrq = {0};
272  struct mmc_command cmd = {0};
273  struct mmc_command stop = {0};
274  struct mmc_data data = {0};
275 
276  struct scatterlist sg;
277 
278  mrq.cmd = &cmd;
279  mrq.data = &data;
280  mrq.stop = &stop;
281 
282  sg_init_one(&sg, buffer, blksz);
283 
284  mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
285 
286  mmc_wait_for_req(test->card->host, &mrq);
287 
288  if (cmd.error)
289  return cmd.error;
290  if (data.error)
291  return data.error;
292 
293  ret = mmc_test_wait_busy(test);
294  if (ret)
295  return ret;
296 
297  return 0;
298 }
299 
300 static void mmc_test_free_mem(struct mmc_test_mem *mem)
301 {
302  if (!mem)
303  return;
304  while (mem->cnt--)
305  __free_pages(mem->arr[mem->cnt].page,
306  mem->arr[mem->cnt].order);
307  kfree(mem->arr);
308  kfree(mem);
309 }
310 
311 /*
312  * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
313  * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
314  * not exceed a maximum number of segments and try not to make segments much
315  * bigger than maximum segment size.
316  */
317 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
318  unsigned long max_sz,
319  unsigned int max_segs,
320  unsigned int max_seg_sz)
321 {
322  unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
323  unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
324  unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
325  unsigned long page_cnt = 0;
326  unsigned long limit = nr_free_buffer_pages() >> 4;
327  struct mmc_test_mem *mem;
328 
329  if (max_page_cnt > limit)
330  max_page_cnt = limit;
331  if (min_page_cnt > max_page_cnt)
332  min_page_cnt = max_page_cnt;
333 
334  if (max_seg_page_cnt > max_page_cnt)
335  max_seg_page_cnt = max_page_cnt;
336 
337  if (max_segs > max_page_cnt)
338  max_segs = max_page_cnt;
339 
340  mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
341  if (!mem)
342  return NULL;
343 
344  mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
345  GFP_KERNEL);
346  if (!mem->arr)
347  goto out_free;
348 
349  while (max_page_cnt) {
350  struct page *page;
351  unsigned int order;
354 
355  order = get_order(max_seg_page_cnt << PAGE_SHIFT);
356  while (1) {
357  page = alloc_pages(flags, order);
358  if (page || !order)
359  break;
360  order -= 1;
361  }
362  if (!page) {
363  if (page_cnt < min_page_cnt)
364  goto out_free;
365  break;
366  }
367  mem->arr[mem->cnt].page = page;
368  mem->arr[mem->cnt].order = order;
369  mem->cnt += 1;
370  if (max_page_cnt <= (1UL << order))
371  break;
372  max_page_cnt -= 1UL << order;
373  page_cnt += 1UL << order;
374  if (mem->cnt >= max_segs) {
375  if (page_cnt < min_page_cnt)
376  goto out_free;
377  break;
378  }
379  }
380 
381  return mem;
382 
383 out_free:
384  mmc_test_free_mem(mem);
385  return NULL;
386 }
387 
388 /*
389  * Map memory into a scatterlist. Optionally allow the same memory to be
390  * mapped more than once.
391  */
392 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
393  struct scatterlist *sglist, int repeat,
394  unsigned int max_segs, unsigned int max_seg_sz,
395  unsigned int *sg_len, int min_sg_len)
396 {
397  struct scatterlist *sg = NULL;
398  unsigned int i;
399  unsigned long sz = size;
400 
401  sg_init_table(sglist, max_segs);
402  if (min_sg_len > max_segs)
403  min_sg_len = max_segs;
404 
405  *sg_len = 0;
406  do {
407  for (i = 0; i < mem->cnt; i++) {
408  unsigned long len = PAGE_SIZE << mem->arr[i].order;
409 
410  if (min_sg_len && (size / min_sg_len < len))
411  len = ALIGN(size / min_sg_len, 512);
412  if (len > sz)
413  len = sz;
414  if (len > max_seg_sz)
415  len = max_seg_sz;
416  if (sg)
417  sg = sg_next(sg);
418  else
419  sg = sglist;
420  if (!sg)
421  return -EINVAL;
422  sg_set_page(sg, mem->arr[i].page, len, 0);
423  sz -= len;
424  *sg_len += 1;
425  if (!sz)
426  break;
427  }
428  } while (sz && repeat);
429 
430  if (sz)
431  return -EINVAL;
432 
433  if (sg)
434  sg_mark_end(sg);
435 
436  return 0;
437 }
438 
439 /*
440  * Map memory into a scatterlist so that no pages are contiguous. Allow the
441  * same memory to be mapped more than once.
442  */
443 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
444  unsigned long sz,
445  struct scatterlist *sglist,
446  unsigned int max_segs,
447  unsigned int max_seg_sz,
448  unsigned int *sg_len)
449 {
450  struct scatterlist *sg = NULL;
451  unsigned int i = mem->cnt, cnt;
452  unsigned long len;
453  void *base, *addr, *last_addr = NULL;
454 
455  sg_init_table(sglist, max_segs);
456 
457  *sg_len = 0;
458  while (sz) {
459  base = page_address(mem->arr[--i].page);
460  cnt = 1 << mem->arr[i].order;
461  while (sz && cnt) {
462  addr = base + PAGE_SIZE * --cnt;
463  if (last_addr && last_addr + PAGE_SIZE == addr)
464  continue;
465  last_addr = addr;
466  len = PAGE_SIZE;
467  if (len > max_seg_sz)
468  len = max_seg_sz;
469  if (len > sz)
470  len = sz;
471  if (sg)
472  sg = sg_next(sg);
473  else
474  sg = sglist;
475  if (!sg)
476  return -EINVAL;
477  sg_set_page(sg, virt_to_page(addr), len, 0);
478  sz -= len;
479  *sg_len += 1;
480  }
481  if (i == 0)
482  i = mem->cnt;
483  }
484 
485  if (sg)
486  sg_mark_end(sg);
487 
488  return 0;
489 }
490 
491 /*
492  * Calculate transfer rate in bytes per second.
493  */
494 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
495 {
496  uint64_t ns;
497 
498  ns = ts->tv_sec;
499  ns *= 1000000000;
500  ns += ts->tv_nsec;
501 
502  bytes *= 1000000000;
503 
504  while (ns > UINT_MAX) {
505  bytes >>= 1;
506  ns >>= 1;
507  }
508 
509  if (!ns)
510  return 0;
511 
512  do_div(bytes, (uint32_t)ns);
513 
514  return bytes;
515 }
516 
517 /*
518  * Save transfer results for future usage
519  */
520 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
521  unsigned int count, unsigned int sectors, struct timespec ts,
522  unsigned int rate, unsigned int iops)
523 {
525 
526  if (!test->gr)
527  return;
528 
529  tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
530  if (!tr)
531  return;
532 
533  tr->count = count;
534  tr->sectors = sectors;
535  tr->ts = ts;
536  tr->rate = rate;
537  tr->iops = iops;
538 
539  list_add_tail(&tr->link, &test->gr->tr_lst);
540 }
541 
542 /*
543  * Print the transfer rate.
544  */
545 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
546  struct timespec *ts1, struct timespec *ts2)
547 {
548  unsigned int rate, iops, sectors = bytes >> 9;
549  struct timespec ts;
550 
551  ts = timespec_sub(*ts2, *ts1);
552 
553  rate = mmc_test_rate(bytes, &ts);
554  iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
555 
556  pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
557  "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
558  mmc_hostname(test->card->host), sectors, sectors >> 1,
559  (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
560  (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
561  iops / 100, iops % 100);
562 
563  mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
564 }
565 
566 /*
567  * Print the average transfer rate.
568  */
569 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
570  unsigned int count, struct timespec *ts1,
571  struct timespec *ts2)
572 {
573  unsigned int rate, iops, sectors = bytes >> 9;
574  uint64_t tot = bytes * count;
575  struct timespec ts;
576 
577  ts = timespec_sub(*ts2, *ts1);
578 
579  rate = mmc_test_rate(tot, &ts);
580  iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
581 
582  pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
583  "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
584  "%u.%02u IOPS, sg_len %d)\n",
585  mmc_hostname(test->card->host), count, sectors, count,
586  sectors >> 1, (sectors & 1 ? ".5" : ""),
587  (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
588  rate / 1000, rate / 1024, iops / 100, iops % 100,
589  test->area.sg_len);
590 
591  mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
592 }
593 
594 /*
595  * Return the card size in sectors.
596  */
597 static unsigned int mmc_test_capacity(struct mmc_card *card)
598 {
599  if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
600  return card->ext_csd.sectors;
601  else
602  return card->csd.capacity << (card->csd.read_blkbits - 9);
603 }
604 
605 /*******************************************************************/
606 /* Test preparation and cleanup */
607 /*******************************************************************/
608 
609 /*
610  * Fill the first couple of sectors of the card with known data
611  * so that bad reads/writes can be detected
612  */
613 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
614 {
615  int ret, i;
616 
617  ret = mmc_test_set_blksize(test, 512);
618  if (ret)
619  return ret;
620 
621  if (write)
622  memset(test->buffer, 0xDF, 512);
623  else {
624  for (i = 0;i < 512;i++)
625  test->buffer[i] = i;
626  }
627 
628  for (i = 0;i < BUFFER_SIZE / 512;i++) {
629  ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
630  if (ret)
631  return ret;
632  }
633 
634  return 0;
635 }
636 
637 static int mmc_test_prepare_write(struct mmc_test_card *test)
638 {
639  return __mmc_test_prepare(test, 1);
640 }
641 
642 static int mmc_test_prepare_read(struct mmc_test_card *test)
643 {
644  return __mmc_test_prepare(test, 0);
645 }
646 
647 static int mmc_test_cleanup(struct mmc_test_card *test)
648 {
649  int ret, i;
650 
651  ret = mmc_test_set_blksize(test, 512);
652  if (ret)
653  return ret;
654 
655  memset(test->buffer, 0, 512);
656 
657  for (i = 0;i < BUFFER_SIZE / 512;i++) {
658  ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
659  if (ret)
660  return ret;
661  }
662 
663  return 0;
664 }
665 
666 /*******************************************************************/
667 /* Test execution helpers */
668 /*******************************************************************/
669 
670 /*
671  * Modifies the mmc_request to perform the "short transfer" tests
672  */
673 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
674  struct mmc_request *mrq, int write)
675 {
676  BUG_ON(!mrq || !mrq->cmd || !mrq->data);
677 
678  if (mrq->data->blocks > 1) {
679  mrq->cmd->opcode = write ?
681  mrq->stop = NULL;
682  } else {
683  mrq->cmd->opcode = MMC_SEND_STATUS;
684  mrq->cmd->arg = test->card->rca << 16;
685  }
686 }
687 
688 /*
689  * Checks that a normal transfer didn't have any errors
690  */
691 static int mmc_test_check_result(struct mmc_test_card *test,
692  struct mmc_request *mrq)
693 {
694  int ret;
695 
696  BUG_ON(!mrq || !mrq->cmd || !mrq->data);
697 
698  ret = 0;
699 
700  if (!ret && mrq->cmd->error)
701  ret = mrq->cmd->error;
702  if (!ret && mrq->data->error)
703  ret = mrq->data->error;
704  if (!ret && mrq->stop && mrq->stop->error)
705  ret = mrq->stop->error;
706  if (!ret && mrq->data->bytes_xfered !=
707  mrq->data->blocks * mrq->data->blksz)
708  ret = RESULT_FAIL;
709 
710  if (ret == -EINVAL)
711  ret = RESULT_UNSUP_HOST;
712 
713  return ret;
714 }
715 
716 static int mmc_test_check_result_async(struct mmc_card *card,
717  struct mmc_async_req *areq)
718 {
719  struct mmc_test_async_req *test_async =
720  container_of(areq, struct mmc_test_async_req, areq);
721 
722  mmc_test_wait_busy(test_async->test);
723 
724  return mmc_test_check_result(test_async->test, areq->mrq);
725 }
726 
727 /*
728  * Checks that a "short transfer" behaved as expected
729  */
730 static int mmc_test_check_broken_result(struct mmc_test_card *test,
731  struct mmc_request *mrq)
732 {
733  int ret;
734 
735  BUG_ON(!mrq || !mrq->cmd || !mrq->data);
736 
737  ret = 0;
738 
739  if (!ret && mrq->cmd->error)
740  ret = mrq->cmd->error;
741  if (!ret && mrq->data->error == 0)
742  ret = RESULT_FAIL;
743  if (!ret && mrq->data->error != -ETIMEDOUT)
744  ret = mrq->data->error;
745  if (!ret && mrq->stop && mrq->stop->error)
746  ret = mrq->stop->error;
747  if (mrq->data->blocks > 1) {
748  if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
749  ret = RESULT_FAIL;
750  } else {
751  if (!ret && mrq->data->bytes_xfered > 0)
752  ret = RESULT_FAIL;
753  }
754 
755  if (ret == -EINVAL)
756  ret = RESULT_UNSUP_HOST;
757 
758  return ret;
759 }
760 
761 /*
762  * Tests nonblock transfer with certain parameters
763  */
764 static void mmc_test_nonblock_reset(struct mmc_request *mrq,
765  struct mmc_command *cmd,
766  struct mmc_command *stop,
767  struct mmc_data *data)
768 {
769  memset(mrq, 0, sizeof(struct mmc_request));
770  memset(cmd, 0, sizeof(struct mmc_command));
771  memset(data, 0, sizeof(struct mmc_data));
772  memset(stop, 0, sizeof(struct mmc_command));
773 
774  mrq->cmd = cmd;
775  mrq->data = data;
776  mrq->stop = stop;
777 }
778 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
779  struct scatterlist *sg, unsigned sg_len,
780  unsigned dev_addr, unsigned blocks,
781  unsigned blksz, int write, int count)
782 {
783  struct mmc_request mrq1;
784  struct mmc_command cmd1;
785  struct mmc_command stop1;
786  struct mmc_data data1;
787 
788  struct mmc_request mrq2;
789  struct mmc_command cmd2;
790  struct mmc_command stop2;
791  struct mmc_data data2;
792 
793  struct mmc_test_async_req test_areq[2];
794  struct mmc_async_req *done_areq;
795  struct mmc_async_req *cur_areq = &test_areq[0].areq;
796  struct mmc_async_req *other_areq = &test_areq[1].areq;
797  int i;
798  int ret;
799 
800  test_areq[0].test = test;
801  test_areq[1].test = test;
802 
803  mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
804  mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
805 
806  cur_areq->mrq = &mrq1;
807  cur_areq->err_check = mmc_test_check_result_async;
808  other_areq->mrq = &mrq2;
809  other_areq->err_check = mmc_test_check_result_async;
810 
811  for (i = 0; i < count; i++) {
812  mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
813  blocks, blksz, write);
814  done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
815 
816  if (ret || (!done_areq && i > 0))
817  goto err;
818 
819  if (done_areq) {
820  if (done_areq->mrq == &mrq2)
821  mmc_test_nonblock_reset(&mrq2, &cmd2,
822  &stop2, &data2);
823  else
824  mmc_test_nonblock_reset(&mrq1, &cmd1,
825  &stop1, &data1);
826  }
827  done_areq = cur_areq;
828  cur_areq = other_areq;
829  other_areq = done_areq;
830  dev_addr += blocks;
831  }
832 
833  done_areq = mmc_start_req(test->card->host, NULL, &ret);
834 
835  return ret;
836 err:
837  return ret;
838 }
839 
840 /*
841  * Tests a basic transfer with certain parameters
842  */
843 static int mmc_test_simple_transfer(struct mmc_test_card *test,
844  struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
845  unsigned blocks, unsigned blksz, int write)
846 {
847  struct mmc_request mrq = {0};
848  struct mmc_command cmd = {0};
849  struct mmc_command stop = {0};
850  struct mmc_data data = {0};
851 
852  mrq.cmd = &cmd;
853  mrq.data = &data;
854  mrq.stop = &stop;
855 
856  mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
857  blocks, blksz, write);
858 
859  mmc_wait_for_req(test->card->host, &mrq);
860 
861  mmc_test_wait_busy(test);
862 
863  return mmc_test_check_result(test, &mrq);
864 }
865 
866 /*
867  * Tests a transfer where the card will fail completely or partly
868  */
869 static int mmc_test_broken_transfer(struct mmc_test_card *test,
870  unsigned blocks, unsigned blksz, int write)
871 {
872  struct mmc_request mrq = {0};
873  struct mmc_command cmd = {0};
874  struct mmc_command stop = {0};
875  struct mmc_data data = {0};
876 
877  struct scatterlist sg;
878 
879  mrq.cmd = &cmd;
880  mrq.data = &data;
881  mrq.stop = &stop;
882 
883  sg_init_one(&sg, test->buffer, blocks * blksz);
884 
885  mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
886  mmc_test_prepare_broken_mrq(test, &mrq, write);
887 
888  mmc_wait_for_req(test->card->host, &mrq);
889 
890  mmc_test_wait_busy(test);
891 
892  return mmc_test_check_broken_result(test, &mrq);
893 }
894 
895 /*
896  * Does a complete transfer test where data is also validated
897  *
898  * Note: mmc_test_prepare() must have been done before this call
899  */
900 static int mmc_test_transfer(struct mmc_test_card *test,
901  struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
902  unsigned blocks, unsigned blksz, int write)
903 {
904  int ret, i;
905  unsigned long flags;
906 
907  if (write) {
908  for (i = 0;i < blocks * blksz;i++)
909  test->scratch[i] = i;
910  } else {
911  memset(test->scratch, 0, BUFFER_SIZE);
912  }
913  local_irq_save(flags);
914  sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
915  local_irq_restore(flags);
916 
917  ret = mmc_test_set_blksize(test, blksz);
918  if (ret)
919  return ret;
920 
921  ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
922  blocks, blksz, write);
923  if (ret)
924  return ret;
925 
926  if (write) {
927  int sectors;
928 
929  ret = mmc_test_set_blksize(test, 512);
930  if (ret)
931  return ret;
932 
933  sectors = (blocks * blksz + 511) / 512;
934  if ((sectors * 512) == (blocks * blksz))
935  sectors++;
936 
937  if ((sectors * 512) > BUFFER_SIZE)
938  return -EINVAL;
939 
940  memset(test->buffer, 0, sectors * 512);
941 
942  for (i = 0;i < sectors;i++) {
943  ret = mmc_test_buffer_transfer(test,
944  test->buffer + i * 512,
945  dev_addr + i, 512, 0);
946  if (ret)
947  return ret;
948  }
949 
950  for (i = 0;i < blocks * blksz;i++) {
951  if (test->buffer[i] != (u8)i)
952  return RESULT_FAIL;
953  }
954 
955  for (;i < sectors * 512;i++) {
956  if (test->buffer[i] != 0xDF)
957  return RESULT_FAIL;
958  }
959  } else {
960  local_irq_save(flags);
961  sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
962  local_irq_restore(flags);
963  for (i = 0;i < blocks * blksz;i++) {
964  if (test->scratch[i] != (u8)i)
965  return RESULT_FAIL;
966  }
967  }
968 
969  return 0;
970 }
971 
972 /*******************************************************************/
973 /* Tests */
974 /*******************************************************************/
975 
977  const char *name;
978 
979  int (*prepare)(struct mmc_test_card *);
980  int (*run)(struct mmc_test_card *);
981  int (*cleanup)(struct mmc_test_card *);
982 };
983 
984 static int mmc_test_basic_write(struct mmc_test_card *test)
985 {
986  int ret;
987  struct scatterlist sg;
988 
989  ret = mmc_test_set_blksize(test, 512);
990  if (ret)
991  return ret;
992 
993  sg_init_one(&sg, test->buffer, 512);
994 
995  ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
996  if (ret)
997  return ret;
998 
999  return 0;
1000 }
1001 
1002 static int mmc_test_basic_read(struct mmc_test_card *test)
1003 {
1004  int ret;
1005  struct scatterlist sg;
1006 
1007  ret = mmc_test_set_blksize(test, 512);
1008  if (ret)
1009  return ret;
1010 
1011  sg_init_one(&sg, test->buffer, 512);
1012 
1013  ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1014  if (ret)
1015  return ret;
1016 
1017  return 0;
1018 }
1019 
1020 static int mmc_test_verify_write(struct mmc_test_card *test)
1021 {
1022  int ret;
1023  struct scatterlist sg;
1024 
1025  sg_init_one(&sg, test->buffer, 512);
1026 
1027  ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1028  if (ret)
1029  return ret;
1030 
1031  return 0;
1032 }
1033 
1034 static int mmc_test_verify_read(struct mmc_test_card *test)
1035 {
1036  int ret;
1037  struct scatterlist sg;
1038 
1039  sg_init_one(&sg, test->buffer, 512);
1040 
1041  ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1042  if (ret)
1043  return ret;
1044 
1045  return 0;
1046 }
1047 
1048 static int mmc_test_multi_write(struct mmc_test_card *test)
1049 {
1050  int ret;
1051  unsigned int size;
1052  struct scatterlist sg;
1053 
1054  if (test->card->host->max_blk_count == 1)
1055  return RESULT_UNSUP_HOST;
1056 
1057  size = PAGE_SIZE * 2;
1058  size = min(size, test->card->host->max_req_size);
1059  size = min(size, test->card->host->max_seg_size);
1060  size = min(size, test->card->host->max_blk_count * 512);
1061 
1062  if (size < 1024)
1063  return RESULT_UNSUP_HOST;
1064 
1065  sg_init_one(&sg, test->buffer, size);
1066 
1067  ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1068  if (ret)
1069  return ret;
1070 
1071  return 0;
1072 }
1073 
1074 static int mmc_test_multi_read(struct mmc_test_card *test)
1075 {
1076  int ret;
1077  unsigned int size;
1078  struct scatterlist sg;
1079 
1080  if (test->card->host->max_blk_count == 1)
1081  return RESULT_UNSUP_HOST;
1082 
1083  size = PAGE_SIZE * 2;
1084  size = min(size, test->card->host->max_req_size);
1085  size = min(size, test->card->host->max_seg_size);
1086  size = min(size, test->card->host->max_blk_count * 512);
1087 
1088  if (size < 1024)
1089  return RESULT_UNSUP_HOST;
1090 
1091  sg_init_one(&sg, test->buffer, size);
1092 
1093  ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1094  if (ret)
1095  return ret;
1096 
1097  return 0;
1098 }
1099 
1100 static int mmc_test_pow2_write(struct mmc_test_card *test)
1101 {
1102  int ret, i;
1103  struct scatterlist sg;
1104 
1105  if (!test->card->csd.write_partial)
1106  return RESULT_UNSUP_CARD;
1107 
1108  for (i = 1; i < 512;i <<= 1) {
1109  sg_init_one(&sg, test->buffer, i);
1110  ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1111  if (ret)
1112  return ret;
1113  }
1114 
1115  return 0;
1116 }
1117 
1118 static int mmc_test_pow2_read(struct mmc_test_card *test)
1119 {
1120  int ret, i;
1121  struct scatterlist sg;
1122 
1123  if (!test->card->csd.read_partial)
1124  return RESULT_UNSUP_CARD;
1125 
1126  for (i = 1; i < 512;i <<= 1) {
1127  sg_init_one(&sg, test->buffer, i);
1128  ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1129  if (ret)
1130  return ret;
1131  }
1132 
1133  return 0;
1134 }
1135 
1136 static int mmc_test_weird_write(struct mmc_test_card *test)
1137 {
1138  int ret, i;
1139  struct scatterlist sg;
1140 
1141  if (!test->card->csd.write_partial)
1142  return RESULT_UNSUP_CARD;
1143 
1144  for (i = 3; i < 512;i += 7) {
1145  sg_init_one(&sg, test->buffer, i);
1146  ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1147  if (ret)
1148  return ret;
1149  }
1150 
1151  return 0;
1152 }
1153 
1154 static int mmc_test_weird_read(struct mmc_test_card *test)
1155 {
1156  int ret, i;
1157  struct scatterlist sg;
1158 
1159  if (!test->card->csd.read_partial)
1160  return RESULT_UNSUP_CARD;
1161 
1162  for (i = 3; i < 512;i += 7) {
1163  sg_init_one(&sg, test->buffer, i);
1164  ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1165  if (ret)
1166  return ret;
1167  }
1168 
1169  return 0;
1170 }
1171 
1172 static int mmc_test_align_write(struct mmc_test_card *test)
1173 {
1174  int ret, i;
1175  struct scatterlist sg;
1176 
1177  for (i = 1;i < 4;i++) {
1178  sg_init_one(&sg, test->buffer + i, 512);
1179  ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1180  if (ret)
1181  return ret;
1182  }
1183 
1184  return 0;
1185 }
1186 
1187 static int mmc_test_align_read(struct mmc_test_card *test)
1188 {
1189  int ret, i;
1190  struct scatterlist sg;
1191 
1192  for (i = 1;i < 4;i++) {
1193  sg_init_one(&sg, test->buffer + i, 512);
1194  ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1195  if (ret)
1196  return ret;
1197  }
1198 
1199  return 0;
1200 }
1201 
1202 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1203 {
1204  int ret, i;
1205  unsigned int size;
1206  struct scatterlist sg;
1207 
1208  if (test->card->host->max_blk_count == 1)
1209  return RESULT_UNSUP_HOST;
1210 
1211  size = PAGE_SIZE * 2;
1212  size = min(size, test->card->host->max_req_size);
1213  size = min(size, test->card->host->max_seg_size);
1214  size = min(size, test->card->host->max_blk_count * 512);
1215 
1216  if (size < 1024)
1217  return RESULT_UNSUP_HOST;
1218 
1219  for (i = 1;i < 4;i++) {
1220  sg_init_one(&sg, test->buffer + i, size);
1221  ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1222  if (ret)
1223  return ret;
1224  }
1225 
1226  return 0;
1227 }
1228 
1229 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1230 {
1231  int ret, i;
1232  unsigned int size;
1233  struct scatterlist sg;
1234 
1235  if (test->card->host->max_blk_count == 1)
1236  return RESULT_UNSUP_HOST;
1237 
1238  size = PAGE_SIZE * 2;
1239  size = min(size, test->card->host->max_req_size);
1240  size = min(size, test->card->host->max_seg_size);
1241  size = min(size, test->card->host->max_blk_count * 512);
1242 
1243  if (size < 1024)
1244  return RESULT_UNSUP_HOST;
1245 
1246  for (i = 1;i < 4;i++) {
1247  sg_init_one(&sg, test->buffer + i, size);
1248  ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1249  if (ret)
1250  return ret;
1251  }
1252 
1253  return 0;
1254 }
1255 
1256 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1257 {
1258  int ret;
1259 
1260  ret = mmc_test_set_blksize(test, 512);
1261  if (ret)
1262  return ret;
1263 
1264  ret = mmc_test_broken_transfer(test, 1, 512, 1);
1265  if (ret)
1266  return ret;
1267 
1268  return 0;
1269 }
1270 
1271 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1272 {
1273  int ret;
1274 
1275  ret = mmc_test_set_blksize(test, 512);
1276  if (ret)
1277  return ret;
1278 
1279  ret = mmc_test_broken_transfer(test, 1, 512, 0);
1280  if (ret)
1281  return ret;
1282 
1283  return 0;
1284 }
1285 
1286 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1287 {
1288  int ret;
1289 
1290  if (test->card->host->max_blk_count == 1)
1291  return RESULT_UNSUP_HOST;
1292 
1293  ret = mmc_test_set_blksize(test, 512);
1294  if (ret)
1295  return ret;
1296 
1297  ret = mmc_test_broken_transfer(test, 2, 512, 1);
1298  if (ret)
1299  return ret;
1300 
1301  return 0;
1302 }
1303 
1304 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1305 {
1306  int ret;
1307 
1308  if (test->card->host->max_blk_count == 1)
1309  return RESULT_UNSUP_HOST;
1310 
1311  ret = mmc_test_set_blksize(test, 512);
1312  if (ret)
1313  return ret;
1314 
1315  ret = mmc_test_broken_transfer(test, 2, 512, 0);
1316  if (ret)
1317  return ret;
1318 
1319  return 0;
1320 }
1321 
1322 #ifdef CONFIG_HIGHMEM
1323 
1324 static int mmc_test_write_high(struct mmc_test_card *test)
1325 {
1326  int ret;
1327  struct scatterlist sg;
1328 
1329  sg_init_table(&sg, 1);
1330  sg_set_page(&sg, test->highmem, 512, 0);
1331 
1332  ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1333  if (ret)
1334  return ret;
1335 
1336  return 0;
1337 }
1338 
1339 static int mmc_test_read_high(struct mmc_test_card *test)
1340 {
1341  int ret;
1342  struct scatterlist sg;
1343 
1344  sg_init_table(&sg, 1);
1345  sg_set_page(&sg, test->highmem, 512, 0);
1346 
1347  ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1348  if (ret)
1349  return ret;
1350 
1351  return 0;
1352 }
1353 
1354 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1355 {
1356  int ret;
1357  unsigned int size;
1358  struct scatterlist sg;
1359 
1360  if (test->card->host->max_blk_count == 1)
1361  return RESULT_UNSUP_HOST;
1362 
1363  size = PAGE_SIZE * 2;
1364  size = min(size, test->card->host->max_req_size);
1365  size = min(size, test->card->host->max_seg_size);
1366  size = min(size, test->card->host->max_blk_count * 512);
1367 
1368  if (size < 1024)
1369  return RESULT_UNSUP_HOST;
1370 
1371  sg_init_table(&sg, 1);
1372  sg_set_page(&sg, test->highmem, size, 0);
1373 
1374  ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1375  if (ret)
1376  return ret;
1377 
1378  return 0;
1379 }
1380 
1381 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1382 {
1383  int ret;
1384  unsigned int size;
1385  struct scatterlist sg;
1386 
1387  if (test->card->host->max_blk_count == 1)
1388  return RESULT_UNSUP_HOST;
1389 
1390  size = PAGE_SIZE * 2;
1391  size = min(size, test->card->host->max_req_size);
1392  size = min(size, test->card->host->max_seg_size);
1393  size = min(size, test->card->host->max_blk_count * 512);
1394 
1395  if (size < 1024)
1396  return RESULT_UNSUP_HOST;
1397 
1398  sg_init_table(&sg, 1);
1399  sg_set_page(&sg, test->highmem, size, 0);
1400 
1401  ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1402  if (ret)
1403  return ret;
1404 
1405  return 0;
1406 }
1407 
1408 #else
1409 
1410 static int mmc_test_no_highmem(struct mmc_test_card *test)
1411 {
1412  pr_info("%s: Highmem not configured - test skipped\n",
1413  mmc_hostname(test->card->host));
1414  return 0;
1415 }
1416 
1417 #endif /* CONFIG_HIGHMEM */
1418 
1419 /*
1420  * Map sz bytes so that it can be transferred.
1421  */
1422 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1423  int max_scatter, int min_sg_len)
1424 {
1425  struct mmc_test_area *t = &test->area;
1426  int err;
1427 
1428  t->blocks = sz >> 9;
1429 
1430  if (max_scatter) {
1431  err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1432  t->max_segs, t->max_seg_sz,
1433  &t->sg_len);
1434  } else {
1435  err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1436  t->max_seg_sz, &t->sg_len, min_sg_len);
1437  }
1438  if (err)
1439  pr_info("%s: Failed to map sg list\n",
1440  mmc_hostname(test->card->host));
1441  return err;
1442 }
1443 
1444 /*
1445  * Transfer bytes mapped by mmc_test_area_map().
1446  */
1447 static int mmc_test_area_transfer(struct mmc_test_card *test,
1448  unsigned int dev_addr, int write)
1449 {
1450  struct mmc_test_area *t = &test->area;
1451 
1452  return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1453  t->blocks, 512, write);
1454 }
1455 
1456 /*
1457  * Map and transfer bytes for multiple transfers.
1458  */
1459 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1460  unsigned int dev_addr, int write,
1461  int max_scatter, int timed, int count,
1462  bool nonblock, int min_sg_len)
1463 {
1464  struct timespec ts1, ts2;
1465  int ret = 0;
1466  int i;
1467  struct mmc_test_area *t = &test->area;
1468 
1469  /*
1470  * In the case of a maximally scattered transfer, the maximum transfer
1471  * size is further limited by using PAGE_SIZE segments.
1472  */
1473  if (max_scatter) {
1474  struct mmc_test_area *t = &test->area;
1475  unsigned long max_tfr;
1476 
1477  if (t->max_seg_sz >= PAGE_SIZE)
1478  max_tfr = t->max_segs * PAGE_SIZE;
1479  else
1480  max_tfr = t->max_segs * t->max_seg_sz;
1481  if (sz > max_tfr)
1482  sz = max_tfr;
1483  }
1484 
1485  ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1486  if (ret)
1487  return ret;
1488 
1489  if (timed)
1490  getnstimeofday(&ts1);
1491  if (nonblock)
1492  ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1493  dev_addr, t->blocks, 512, write, count);
1494  else
1495  for (i = 0; i < count && ret == 0; i++) {
1496  ret = mmc_test_area_transfer(test, dev_addr, write);
1497  dev_addr += sz >> 9;
1498  }
1499 
1500  if (ret)
1501  return ret;
1502 
1503  if (timed)
1504  getnstimeofday(&ts2);
1505 
1506  if (timed)
1507  mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1508 
1509  return 0;
1510 }
1511 
1512 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1513  unsigned int dev_addr, int write, int max_scatter,
1514  int timed)
1515 {
1516  return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1517  timed, 1, false, 0);
1518 }
1519 
1520 /*
1521  * Write the test area entirely.
1522  */
1523 static int mmc_test_area_fill(struct mmc_test_card *test)
1524 {
1525  struct mmc_test_area *t = &test->area;
1526 
1527  return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1528 }
1529 
1530 /*
1531  * Erase the test area entirely.
1532  */
1533 static int mmc_test_area_erase(struct mmc_test_card *test)
1534 {
1535  struct mmc_test_area *t = &test->area;
1536 
1537  if (!mmc_can_erase(test->card))
1538  return 0;
1539 
1540  return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1541  MMC_ERASE_ARG);
1542 }
1543 
1544 /*
1545  * Cleanup struct mmc_test_area.
1546  */
1547 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1548 {
1549  struct mmc_test_area *t = &test->area;
1550 
1551  kfree(t->sg);
1552  mmc_test_free_mem(t->mem);
1553 
1554  return 0;
1555 }
1556 
1557 /*
1558  * Initialize an area for testing large transfers. The test area is set to the
1559  * middle of the card because cards may have different charateristics at the
1560  * front (for FAT file system optimization). Optionally, the area is erased
1561  * (if the card supports it) which may improve write performance. Optionally,
1562  * the area is filled with data for subsequent read tests.
1563  */
1564 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1565 {
1566  struct mmc_test_area *t = &test->area;
1567  unsigned long min_sz = 64 * 1024, sz;
1568  int ret;
1569 
1570  ret = mmc_test_set_blksize(test, 512);
1571  if (ret)
1572  return ret;
1573 
1574  /* Make the test area size about 4MiB */
1575  sz = (unsigned long)test->card->pref_erase << 9;
1576  t->max_sz = sz;
1577  while (t->max_sz < 4 * 1024 * 1024)
1578  t->max_sz += sz;
1579  while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1580  t->max_sz -= sz;
1581 
1582  t->max_segs = test->card->host->max_segs;
1583  t->max_seg_sz = test->card->host->max_seg_size;
1584  t->max_seg_sz -= t->max_seg_sz % 512;
1585 
1586  t->max_tfr = t->max_sz;
1587  if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1588  t->max_tfr = test->card->host->max_blk_count << 9;
1589  if (t->max_tfr > test->card->host->max_req_size)
1590  t->max_tfr = test->card->host->max_req_size;
1591  if (t->max_tfr / t->max_seg_sz > t->max_segs)
1592  t->max_tfr = t->max_segs * t->max_seg_sz;
1593 
1594  /*
1595  * Try to allocate enough memory for a max. sized transfer. Less is OK
1596  * because the same memory can be mapped into the scatterlist more than
1597  * once. Also, take into account the limits imposed on scatterlist
1598  * segments by the host driver.
1599  */
1600  t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1601  t->max_seg_sz);
1602  if (!t->mem)
1603  return -ENOMEM;
1604 
1605  t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1606  if (!t->sg) {
1607  ret = -ENOMEM;
1608  goto out_free;
1609  }
1610 
1611  t->dev_addr = mmc_test_capacity(test->card) / 2;
1612  t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1613 
1614  if (erase) {
1615  ret = mmc_test_area_erase(test);
1616  if (ret)
1617  goto out_free;
1618  }
1619 
1620  if (fill) {
1621  ret = mmc_test_area_fill(test);
1622  if (ret)
1623  goto out_free;
1624  }
1625 
1626  return 0;
1627 
1628 out_free:
1629  mmc_test_area_cleanup(test);
1630  return ret;
1631 }
1632 
1633 /*
1634  * Prepare for large transfers. Do not erase the test area.
1635  */
1636 static int mmc_test_area_prepare(struct mmc_test_card *test)
1637 {
1638  return mmc_test_area_init(test, 0, 0);
1639 }
1640 
1641 /*
1642  * Prepare for large transfers. Do erase the test area.
1643  */
1644 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1645 {
1646  return mmc_test_area_init(test, 1, 0);
1647 }
1648 
1649 /*
1650  * Prepare for large transfers. Erase and fill the test area.
1651  */
1652 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1653 {
1654  return mmc_test_area_init(test, 1, 1);
1655 }
1656 
1657 /*
1658  * Test best-case performance. Best-case performance is expected from
1659  * a single large transfer.
1660  *
1661  * An additional option (max_scatter) allows the measurement of the same
1662  * transfer but with no contiguous pages in the scatter list. This tests
1663  * the efficiency of DMA to handle scattered pages.
1664  */
1665 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1666  int max_scatter)
1667 {
1668  struct mmc_test_area *t = &test->area;
1669 
1670  return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1671  max_scatter, 1);
1672 }
1673 
1674 /*
1675  * Best-case read performance.
1676  */
1677 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1678 {
1679  return mmc_test_best_performance(test, 0, 0);
1680 }
1681 
1682 /*
1683  * Best-case write performance.
1684  */
1685 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1686 {
1687  return mmc_test_best_performance(test, 1, 0);
1688 }
1689 
1690 /*
1691  * Best-case read performance into scattered pages.
1692  */
1693 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1694 {
1695  return mmc_test_best_performance(test, 0, 1);
1696 }
1697 
1698 /*
1699  * Best-case write performance from scattered pages.
1700  */
1701 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1702 {
1703  return mmc_test_best_performance(test, 1, 1);
1704 }
1705 
1706 /*
1707  * Single read performance by transfer size.
1708  */
1709 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1710 {
1711  struct mmc_test_area *t = &test->area;
1712  unsigned long sz;
1713  unsigned int dev_addr;
1714  int ret;
1715 
1716  for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1717  dev_addr = t->dev_addr + (sz >> 9);
1718  ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1719  if (ret)
1720  return ret;
1721  }
1722  sz = t->max_tfr;
1723  dev_addr = t->dev_addr;
1724  return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1725 }
1726 
1727 /*
1728  * Single write performance by transfer size.
1729  */
1730 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1731 {
1732  struct mmc_test_area *t = &test->area;
1733  unsigned long sz;
1734  unsigned int dev_addr;
1735  int ret;
1736 
1737  ret = mmc_test_area_erase(test);
1738  if (ret)
1739  return ret;
1740  for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1741  dev_addr = t->dev_addr + (sz >> 9);
1742  ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1743  if (ret)
1744  return ret;
1745  }
1746  ret = mmc_test_area_erase(test);
1747  if (ret)
1748  return ret;
1749  sz = t->max_tfr;
1750  dev_addr = t->dev_addr;
1751  return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1752 }
1753 
1754 /*
1755  * Single trim performance by transfer size.
1756  */
1757 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1758 {
1759  struct mmc_test_area *t = &test->area;
1760  unsigned long sz;
1761  unsigned int dev_addr;
1762  struct timespec ts1, ts2;
1763  int ret;
1764 
1765  if (!mmc_can_trim(test->card))
1766  return RESULT_UNSUP_CARD;
1767 
1768  if (!mmc_can_erase(test->card))
1769  return RESULT_UNSUP_HOST;
1770 
1771  for (sz = 512; sz < t->max_sz; sz <<= 1) {
1772  dev_addr = t->dev_addr + (sz >> 9);
1773  getnstimeofday(&ts1);
1774  ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1775  if (ret)
1776  return ret;
1777  getnstimeofday(&ts2);
1778  mmc_test_print_rate(test, sz, &ts1, &ts2);
1779  }
1780  dev_addr = t->dev_addr;
1781  getnstimeofday(&ts1);
1782  ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1783  if (ret)
1784  return ret;
1785  getnstimeofday(&ts2);
1786  mmc_test_print_rate(test, sz, &ts1, &ts2);
1787  return 0;
1788 }
1789 
1790 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1791 {
1792  struct mmc_test_area *t = &test->area;
1793  unsigned int dev_addr, i, cnt;
1794  struct timespec ts1, ts2;
1795  int ret;
1796 
1797  cnt = t->max_sz / sz;
1798  dev_addr = t->dev_addr;
1799  getnstimeofday(&ts1);
1800  for (i = 0; i < cnt; i++) {
1801  ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1802  if (ret)
1803  return ret;
1804  dev_addr += (sz >> 9);
1805  }
1806  getnstimeofday(&ts2);
1807  mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1808  return 0;
1809 }
1810 
1811 /*
1812  * Consecutive read performance by transfer size.
1813  */
1814 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1815 {
1816  struct mmc_test_area *t = &test->area;
1817  unsigned long sz;
1818  int ret;
1819 
1820  for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1821  ret = mmc_test_seq_read_perf(test, sz);
1822  if (ret)
1823  return ret;
1824  }
1825  sz = t->max_tfr;
1826  return mmc_test_seq_read_perf(test, sz);
1827 }
1828 
1829 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1830 {
1831  struct mmc_test_area *t = &test->area;
1832  unsigned int dev_addr, i, cnt;
1833  struct timespec ts1, ts2;
1834  int ret;
1835 
1836  ret = mmc_test_area_erase(test);
1837  if (ret)
1838  return ret;
1839  cnt = t->max_sz / sz;
1840  dev_addr = t->dev_addr;
1841  getnstimeofday(&ts1);
1842  for (i = 0; i < cnt; i++) {
1843  ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1844  if (ret)
1845  return ret;
1846  dev_addr += (sz >> 9);
1847  }
1848  getnstimeofday(&ts2);
1849  mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1850  return 0;
1851 }
1852 
1853 /*
1854  * Consecutive write performance by transfer size.
1855  */
1856 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1857 {
1858  struct mmc_test_area *t = &test->area;
1859  unsigned long sz;
1860  int ret;
1861 
1862  for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1863  ret = mmc_test_seq_write_perf(test, sz);
1864  if (ret)
1865  return ret;
1866  }
1867  sz = t->max_tfr;
1868  return mmc_test_seq_write_perf(test, sz);
1869 }
1870 
1871 /*
1872  * Consecutive trim performance by transfer size.
1873  */
1874 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1875 {
1876  struct mmc_test_area *t = &test->area;
1877  unsigned long sz;
1878  unsigned int dev_addr, i, cnt;
1879  struct timespec ts1, ts2;
1880  int ret;
1881 
1882  if (!mmc_can_trim(test->card))
1883  return RESULT_UNSUP_CARD;
1884 
1885  if (!mmc_can_erase(test->card))
1886  return RESULT_UNSUP_HOST;
1887 
1888  for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1889  ret = mmc_test_area_erase(test);
1890  if (ret)
1891  return ret;
1892  ret = mmc_test_area_fill(test);
1893  if (ret)
1894  return ret;
1895  cnt = t->max_sz / sz;
1896  dev_addr = t->dev_addr;
1897  getnstimeofday(&ts1);
1898  for (i = 0; i < cnt; i++) {
1899  ret = mmc_erase(test->card, dev_addr, sz >> 9,
1900  MMC_TRIM_ARG);
1901  if (ret)
1902  return ret;
1903  dev_addr += (sz >> 9);
1904  }
1905  getnstimeofday(&ts2);
1906  mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1907  }
1908  return 0;
1909 }
1910 
1911 static unsigned int rnd_next = 1;
1912 
1913 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1914 {
1915  uint64_t r;
1916 
1917  rnd_next = rnd_next * 1103515245 + 12345;
1918  r = (rnd_next >> 16) & 0x7fff;
1919  return (r * rnd_cnt) >> 15;
1920 }
1921 
1922 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1923  unsigned long sz)
1924 {
1925  unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1926  unsigned int ssz;
1927  struct timespec ts1, ts2, ts;
1928  int ret;
1929 
1930  ssz = sz >> 9;
1931 
1932  rnd_addr = mmc_test_capacity(test->card) / 4;
1933  range1 = rnd_addr / test->card->pref_erase;
1934  range2 = range1 / ssz;
1935 
1936  getnstimeofday(&ts1);
1937  for (cnt = 0; cnt < UINT_MAX; cnt++) {
1938  getnstimeofday(&ts2);
1939  ts = timespec_sub(ts2, ts1);
1940  if (ts.tv_sec >= 10)
1941  break;
1942  ea = mmc_test_rnd_num(range1);
1943  if (ea == last_ea)
1944  ea -= 1;
1945  last_ea = ea;
1946  dev_addr = rnd_addr + test->card->pref_erase * ea +
1947  ssz * mmc_test_rnd_num(range2);
1948  ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1949  if (ret)
1950  return ret;
1951  }
1952  if (print)
1953  mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1954  return 0;
1955 }
1956 
1957 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1958 {
1959  struct mmc_test_area *t = &test->area;
1960  unsigned int next;
1961  unsigned long sz;
1962  int ret;
1963 
1964  for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1965  /*
1966  * When writing, try to get more consistent results by running
1967  * the test twice with exactly the same I/O but outputting the
1968  * results only for the 2nd run.
1969  */
1970  if (write) {
1971  next = rnd_next;
1972  ret = mmc_test_rnd_perf(test, write, 0, sz);
1973  if (ret)
1974  return ret;
1975  rnd_next = next;
1976  }
1977  ret = mmc_test_rnd_perf(test, write, 1, sz);
1978  if (ret)
1979  return ret;
1980  }
1981  sz = t->max_tfr;
1982  if (write) {
1983  next = rnd_next;
1984  ret = mmc_test_rnd_perf(test, write, 0, sz);
1985  if (ret)
1986  return ret;
1987  rnd_next = next;
1988  }
1989  return mmc_test_rnd_perf(test, write, 1, sz);
1990 }
1991 
1992 /*
1993  * Random read performance by transfer size.
1994  */
1995 static int mmc_test_random_read_perf(struct mmc_test_card *test)
1996 {
1997  return mmc_test_random_perf(test, 0);
1998 }
1999 
2000 /*
2001  * Random write performance by transfer size.
2002  */
2003 static int mmc_test_random_write_perf(struct mmc_test_card *test)
2004 {
2005  return mmc_test_random_perf(test, 1);
2006 }
2007 
2008 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
2009  unsigned int tot_sz, int max_scatter)
2010 {
2011  struct mmc_test_area *t = &test->area;
2012  unsigned int dev_addr, i, cnt, sz, ssz;
2013  struct timespec ts1, ts2;
2014  int ret;
2015 
2016  sz = t->max_tfr;
2017 
2018  /*
2019  * In the case of a maximally scattered transfer, the maximum transfer
2020  * size is further limited by using PAGE_SIZE segments.
2021  */
2022  if (max_scatter) {
2023  unsigned long max_tfr;
2024 
2025  if (t->max_seg_sz >= PAGE_SIZE)
2026  max_tfr = t->max_segs * PAGE_SIZE;
2027  else
2028  max_tfr = t->max_segs * t->max_seg_sz;
2029  if (sz > max_tfr)
2030  sz = max_tfr;
2031  }
2032 
2033  ssz = sz >> 9;
2034  dev_addr = mmc_test_capacity(test->card) / 4;
2035  if (tot_sz > dev_addr << 9)
2036  tot_sz = dev_addr << 9;
2037  cnt = tot_sz / sz;
2038  dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2039 
2040  getnstimeofday(&ts1);
2041  for (i = 0; i < cnt; i++) {
2042  ret = mmc_test_area_io(test, sz, dev_addr, write,
2043  max_scatter, 0);
2044  if (ret)
2045  return ret;
2046  dev_addr += ssz;
2047  }
2048  getnstimeofday(&ts2);
2049 
2050  mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2051 
2052  return 0;
2053 }
2054 
2055 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2056 {
2057  int ret, i;
2058 
2059  for (i = 0; i < 10; i++) {
2060  ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2061  if (ret)
2062  return ret;
2063  }
2064  for (i = 0; i < 5; i++) {
2065  ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2066  if (ret)
2067  return ret;
2068  }
2069  for (i = 0; i < 3; i++) {
2070  ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2071  if (ret)
2072  return ret;
2073  }
2074 
2075  return ret;
2076 }
2077 
2078 /*
2079  * Large sequential read performance.
2080  */
2081 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2082 {
2083  return mmc_test_large_seq_perf(test, 0);
2084 }
2085 
2086 /*
2087  * Large sequential write performance.
2088  */
2089 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2090 {
2091  return mmc_test_large_seq_perf(test, 1);
2092 }
2093 
2094 static int mmc_test_rw_multiple(struct mmc_test_card *test,
2095  struct mmc_test_multiple_rw *tdata,
2096  unsigned int reqsize, unsigned int size,
2097  int min_sg_len)
2098 {
2099  unsigned int dev_addr;
2100  struct mmc_test_area *t = &test->area;
2101  int ret = 0;
2102 
2103  /* Set up test area */
2104  if (size > mmc_test_capacity(test->card) / 2 * 512)
2105  size = mmc_test_capacity(test->card) / 2 * 512;
2106  if (reqsize > t->max_tfr)
2107  reqsize = t->max_tfr;
2108  dev_addr = mmc_test_capacity(test->card) / 4;
2109  if ((dev_addr & 0xffff0000))
2110  dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2111  else
2112  dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2113  if (!dev_addr)
2114  goto err;
2115 
2116  if (reqsize > size)
2117  return 0;
2118 
2119  /* prepare test area */
2120  if (mmc_can_erase(test->card) &&
2121  tdata->prepare & MMC_TEST_PREP_ERASE) {
2122  ret = mmc_erase(test->card, dev_addr,
2123  size / 512, MMC_SECURE_ERASE_ARG);
2124  if (ret)
2125  ret = mmc_erase(test->card, dev_addr,
2126  size / 512, MMC_ERASE_ARG);
2127  if (ret)
2128  goto err;
2129  }
2130 
2131  /* Run test */
2132  ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2133  tdata->do_write, 0, 1, size / reqsize,
2134  tdata->do_nonblock_req, min_sg_len);
2135  if (ret)
2136  goto err;
2137 
2138  return ret;
2139  err:
2140  pr_info("[%s] error\n", __func__);
2141  return ret;
2142 }
2143 
2144 static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2145  struct mmc_test_multiple_rw *rw)
2146 {
2147  int ret = 0;
2148  int i;
2149  void *pre_req = test->card->host->ops->pre_req;
2150  void *post_req = test->card->host->ops->post_req;
2151 
2152  if (rw->do_nonblock_req &&
2153  ((!pre_req && post_req) || (pre_req && !post_req))) {
2154  pr_info("error: only one of pre/post is defined\n");
2155  return -EINVAL;
2156  }
2157 
2158  for (i = 0 ; i < rw->len && ret == 0; i++) {
2159  ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2160  if (ret)
2161  break;
2162  }
2163  return ret;
2164 }
2165 
2166 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2167  struct mmc_test_multiple_rw *rw)
2168 {
2169  int ret = 0;
2170  int i;
2171 
2172  for (i = 0 ; i < rw->len && ret == 0; i++) {
2173  ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2174  rw->sg_len[i]);
2175  if (ret)
2176  break;
2177  }
2178  return ret;
2179 }
2180 
2181 /*
2182  * Multiple blocking write 4k to 4 MB chunks
2183  */
2184 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2185 {
2186  unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2187  1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2188  struct mmc_test_multiple_rw test_data = {
2189  .bs = bs,
2190  .size = TEST_AREA_MAX_SIZE,
2191  .len = ARRAY_SIZE(bs),
2192  .do_write = true,
2193  .do_nonblock_req = false,
2194  .prepare = MMC_TEST_PREP_ERASE,
2195  };
2196 
2197  return mmc_test_rw_multiple_size(test, &test_data);
2198 };
2199 
2200 /*
2201  * Multiple non-blocking write 4k to 4 MB chunks
2202  */
2203 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2204 {
2205  unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2206  1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2207  struct mmc_test_multiple_rw test_data = {
2208  .bs = bs,
2209  .size = TEST_AREA_MAX_SIZE,
2210  .len = ARRAY_SIZE(bs),
2211  .do_write = true,
2212  .do_nonblock_req = true,
2213  .prepare = MMC_TEST_PREP_ERASE,
2214  };
2215 
2216  return mmc_test_rw_multiple_size(test, &test_data);
2217 }
2218 
2219 /*
2220  * Multiple blocking read 4k to 4 MB chunks
2221  */
2222 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2223 {
2224  unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2225  1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2226  struct mmc_test_multiple_rw test_data = {
2227  .bs = bs,
2228  .size = TEST_AREA_MAX_SIZE,
2229  .len = ARRAY_SIZE(bs),
2230  .do_write = false,
2231  .do_nonblock_req = false,
2232  .prepare = MMC_TEST_PREP_NONE,
2233  };
2234 
2235  return mmc_test_rw_multiple_size(test, &test_data);
2236 }
2237 
2238 /*
2239  * Multiple non-blocking read 4k to 4 MB chunks
2240  */
2241 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2242 {
2243  unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2244  1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2245  struct mmc_test_multiple_rw test_data = {
2246  .bs = bs,
2247  .size = TEST_AREA_MAX_SIZE,
2248  .len = ARRAY_SIZE(bs),
2249  .do_write = false,
2250  .do_nonblock_req = true,
2251  .prepare = MMC_TEST_PREP_NONE,
2252  };
2253 
2254  return mmc_test_rw_multiple_size(test, &test_data);
2255 }
2256 
2257 /*
2258  * Multiple blocking write 1 to 512 sg elements
2259  */
2260 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2261 {
2262  unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2263  1 << 7, 1 << 8, 1 << 9};
2264  struct mmc_test_multiple_rw test_data = {
2265  .sg_len = sg_len,
2266  .size = TEST_AREA_MAX_SIZE,
2267  .len = ARRAY_SIZE(sg_len),
2268  .do_write = true,
2269  .do_nonblock_req = false,
2270  .prepare = MMC_TEST_PREP_ERASE,
2271  };
2272 
2273  return mmc_test_rw_multiple_sg_len(test, &test_data);
2274 };
2275 
2276 /*
2277  * Multiple non-blocking write 1 to 512 sg elements
2278  */
2279 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2280 {
2281  unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2282  1 << 7, 1 << 8, 1 << 9};
2283  struct mmc_test_multiple_rw test_data = {
2284  .sg_len = sg_len,
2285  .size = TEST_AREA_MAX_SIZE,
2286  .len = ARRAY_SIZE(sg_len),
2287  .do_write = true,
2288  .do_nonblock_req = true,
2289  .prepare = MMC_TEST_PREP_ERASE,
2290  };
2291 
2292  return mmc_test_rw_multiple_sg_len(test, &test_data);
2293 }
2294 
2295 /*
2296  * Multiple blocking read 1 to 512 sg elements
2297  */
2298 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2299 {
2300  unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2301  1 << 7, 1 << 8, 1 << 9};
2302  struct mmc_test_multiple_rw test_data = {
2303  .sg_len = sg_len,
2304  .size = TEST_AREA_MAX_SIZE,
2305  .len = ARRAY_SIZE(sg_len),
2306  .do_write = false,
2307  .do_nonblock_req = false,
2308  .prepare = MMC_TEST_PREP_NONE,
2309  };
2310 
2311  return mmc_test_rw_multiple_sg_len(test, &test_data);
2312 }
2313 
2314 /*
2315  * Multiple non-blocking read 1 to 512 sg elements
2316  */
2317 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2318 {
2319  unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2320  1 << 7, 1 << 8, 1 << 9};
2321  struct mmc_test_multiple_rw test_data = {
2322  .sg_len = sg_len,
2323  .size = TEST_AREA_MAX_SIZE,
2324  .len = ARRAY_SIZE(sg_len),
2325  .do_write = false,
2326  .do_nonblock_req = true,
2327  .prepare = MMC_TEST_PREP_NONE,
2328  };
2329 
2330  return mmc_test_rw_multiple_sg_len(test, &test_data);
2331 }
2332 
2333 /*
2334  * eMMC hardware reset.
2335  */
2336 static int mmc_test_hw_reset(struct mmc_test_card *test)
2337 {
2338  struct mmc_card *card = test->card;
2339  struct mmc_host *host = card->host;
2340  int err;
2341 
2342  err = mmc_hw_reset_check(host);
2343  if (!err)
2344  return RESULT_OK;
2345 
2346  if (err == -ENOSYS)
2347  return RESULT_FAIL;
2348 
2349  if (err != -EOPNOTSUPP)
2350  return err;
2351 
2352  if (!mmc_can_reset(card))
2353  return RESULT_UNSUP_CARD;
2354 
2355  return RESULT_UNSUP_HOST;
2356 }
2357 
2358 static const struct mmc_test_case mmc_test_cases[] = {
2359  {
2360  .name = "Basic write (no data verification)",
2361  .run = mmc_test_basic_write,
2362  },
2363 
2364  {
2365  .name = "Basic read (no data verification)",
2366  .run = mmc_test_basic_read,
2367  },
2368 
2369  {
2370  .name = "Basic write (with data verification)",
2371  .prepare = mmc_test_prepare_write,
2372  .run = mmc_test_verify_write,
2373  .cleanup = mmc_test_cleanup,
2374  },
2375 
2376  {
2377  .name = "Basic read (with data verification)",
2378  .prepare = mmc_test_prepare_read,
2379  .run = mmc_test_verify_read,
2380  .cleanup = mmc_test_cleanup,
2381  },
2382 
2383  {
2384  .name = "Multi-block write",
2385  .prepare = mmc_test_prepare_write,
2386  .run = mmc_test_multi_write,
2387  .cleanup = mmc_test_cleanup,
2388  },
2389 
2390  {
2391  .name = "Multi-block read",
2392  .prepare = mmc_test_prepare_read,
2393  .run = mmc_test_multi_read,
2394  .cleanup = mmc_test_cleanup,
2395  },
2396 
2397  {
2398  .name = "Power of two block writes",
2399  .prepare = mmc_test_prepare_write,
2400  .run = mmc_test_pow2_write,
2401  .cleanup = mmc_test_cleanup,
2402  },
2403 
2404  {
2405  .name = "Power of two block reads",
2406  .prepare = mmc_test_prepare_read,
2407  .run = mmc_test_pow2_read,
2408  .cleanup = mmc_test_cleanup,
2409  },
2410 
2411  {
2412  .name = "Weird sized block writes",
2413  .prepare = mmc_test_prepare_write,
2414  .run = mmc_test_weird_write,
2415  .cleanup = mmc_test_cleanup,
2416  },
2417 
2418  {
2419  .name = "Weird sized block reads",
2420  .prepare = mmc_test_prepare_read,
2421  .run = mmc_test_weird_read,
2422  .cleanup = mmc_test_cleanup,
2423  },
2424 
2425  {
2426  .name = "Badly aligned write",
2427  .prepare = mmc_test_prepare_write,
2428  .run = mmc_test_align_write,
2429  .cleanup = mmc_test_cleanup,
2430  },
2431 
2432  {
2433  .name = "Badly aligned read",
2434  .prepare = mmc_test_prepare_read,
2435  .run = mmc_test_align_read,
2436  .cleanup = mmc_test_cleanup,
2437  },
2438 
2439  {
2440  .name = "Badly aligned multi-block write",
2441  .prepare = mmc_test_prepare_write,
2442  .run = mmc_test_align_multi_write,
2443  .cleanup = mmc_test_cleanup,
2444  },
2445 
2446  {
2447  .name = "Badly aligned multi-block read",
2448  .prepare = mmc_test_prepare_read,
2449  .run = mmc_test_align_multi_read,
2450  .cleanup = mmc_test_cleanup,
2451  },
2452 
2453  {
2454  .name = "Correct xfer_size at write (start failure)",
2455  .run = mmc_test_xfersize_write,
2456  },
2457 
2458  {
2459  .name = "Correct xfer_size at read (start failure)",
2460  .run = mmc_test_xfersize_read,
2461  },
2462 
2463  {
2464  .name = "Correct xfer_size at write (midway failure)",
2465  .run = mmc_test_multi_xfersize_write,
2466  },
2467 
2468  {
2469  .name = "Correct xfer_size at read (midway failure)",
2470  .run = mmc_test_multi_xfersize_read,
2471  },
2472 
2473 #ifdef CONFIG_HIGHMEM
2474 
2475  {
2476  .name = "Highmem write",
2477  .prepare = mmc_test_prepare_write,
2478  .run = mmc_test_write_high,
2479  .cleanup = mmc_test_cleanup,
2480  },
2481 
2482  {
2483  .name = "Highmem read",
2484  .prepare = mmc_test_prepare_read,
2485  .run = mmc_test_read_high,
2486  .cleanup = mmc_test_cleanup,
2487  },
2488 
2489  {
2490  .name = "Multi-block highmem write",
2491  .prepare = mmc_test_prepare_write,
2492  .run = mmc_test_multi_write_high,
2493  .cleanup = mmc_test_cleanup,
2494  },
2495 
2496  {
2497  .name = "Multi-block highmem read",
2498  .prepare = mmc_test_prepare_read,
2499  .run = mmc_test_multi_read_high,
2500  .cleanup = mmc_test_cleanup,
2501  },
2502 
2503 #else
2504 
2505  {
2506  .name = "Highmem write",
2507  .run = mmc_test_no_highmem,
2508  },
2509 
2510  {
2511  .name = "Highmem read",
2512  .run = mmc_test_no_highmem,
2513  },
2514 
2515  {
2516  .name = "Multi-block highmem write",
2517  .run = mmc_test_no_highmem,
2518  },
2519 
2520  {
2521  .name = "Multi-block highmem read",
2522  .run = mmc_test_no_highmem,
2523  },
2524 
2525 #endif /* CONFIG_HIGHMEM */
2526 
2527  {
2528  .name = "Best-case read performance",
2529  .prepare = mmc_test_area_prepare_fill,
2530  .run = mmc_test_best_read_performance,
2531  .cleanup = mmc_test_area_cleanup,
2532  },
2533 
2534  {
2535  .name = "Best-case write performance",
2536  .prepare = mmc_test_area_prepare_erase,
2537  .run = mmc_test_best_write_performance,
2538  .cleanup = mmc_test_area_cleanup,
2539  },
2540 
2541  {
2542  .name = "Best-case read performance into scattered pages",
2543  .prepare = mmc_test_area_prepare_fill,
2544  .run = mmc_test_best_read_perf_max_scatter,
2545  .cleanup = mmc_test_area_cleanup,
2546  },
2547 
2548  {
2549  .name = "Best-case write performance from scattered pages",
2550  .prepare = mmc_test_area_prepare_erase,
2551  .run = mmc_test_best_write_perf_max_scatter,
2552  .cleanup = mmc_test_area_cleanup,
2553  },
2554 
2555  {
2556  .name = "Single read performance by transfer size",
2557  .prepare = mmc_test_area_prepare_fill,
2558  .run = mmc_test_profile_read_perf,
2559  .cleanup = mmc_test_area_cleanup,
2560  },
2561 
2562  {
2563  .name = "Single write performance by transfer size",
2564  .prepare = mmc_test_area_prepare,
2565  .run = mmc_test_profile_write_perf,
2566  .cleanup = mmc_test_area_cleanup,
2567  },
2568 
2569  {
2570  .name = "Single trim performance by transfer size",
2571  .prepare = mmc_test_area_prepare_fill,
2572  .run = mmc_test_profile_trim_perf,
2573  .cleanup = mmc_test_area_cleanup,
2574  },
2575 
2576  {
2577  .name = "Consecutive read performance by transfer size",
2578  .prepare = mmc_test_area_prepare_fill,
2579  .run = mmc_test_profile_seq_read_perf,
2580  .cleanup = mmc_test_area_cleanup,
2581  },
2582 
2583  {
2584  .name = "Consecutive write performance by transfer size",
2585  .prepare = mmc_test_area_prepare,
2586  .run = mmc_test_profile_seq_write_perf,
2587  .cleanup = mmc_test_area_cleanup,
2588  },
2589 
2590  {
2591  .name = "Consecutive trim performance by transfer size",
2592  .prepare = mmc_test_area_prepare,
2593  .run = mmc_test_profile_seq_trim_perf,
2594  .cleanup = mmc_test_area_cleanup,
2595  },
2596 
2597  {
2598  .name = "Random read performance by transfer size",
2599  .prepare = mmc_test_area_prepare,
2600  .run = mmc_test_random_read_perf,
2601  .cleanup = mmc_test_area_cleanup,
2602  },
2603 
2604  {
2605  .name = "Random write performance by transfer size",
2606  .prepare = mmc_test_area_prepare,
2607  .run = mmc_test_random_write_perf,
2608  .cleanup = mmc_test_area_cleanup,
2609  },
2610 
2611  {
2612  .name = "Large sequential read into scattered pages",
2613  .prepare = mmc_test_area_prepare,
2614  .run = mmc_test_large_seq_read_perf,
2615  .cleanup = mmc_test_area_cleanup,
2616  },
2617 
2618  {
2619  .name = "Large sequential write from scattered pages",
2620  .prepare = mmc_test_area_prepare,
2621  .run = mmc_test_large_seq_write_perf,
2622  .cleanup = mmc_test_area_cleanup,
2623  },
2624 
2625  {
2626  .name = "Write performance with blocking req 4k to 4MB",
2627  .prepare = mmc_test_area_prepare,
2628  .run = mmc_test_profile_mult_write_blocking_perf,
2629  .cleanup = mmc_test_area_cleanup,
2630  },
2631 
2632  {
2633  .name = "Write performance with non-blocking req 4k to 4MB",
2634  .prepare = mmc_test_area_prepare,
2635  .run = mmc_test_profile_mult_write_nonblock_perf,
2636  .cleanup = mmc_test_area_cleanup,
2637  },
2638 
2639  {
2640  .name = "Read performance with blocking req 4k to 4MB",
2641  .prepare = mmc_test_area_prepare,
2642  .run = mmc_test_profile_mult_read_blocking_perf,
2643  .cleanup = mmc_test_area_cleanup,
2644  },
2645 
2646  {
2647  .name = "Read performance with non-blocking req 4k to 4MB",
2648  .prepare = mmc_test_area_prepare,
2649  .run = mmc_test_profile_mult_read_nonblock_perf,
2650  .cleanup = mmc_test_area_cleanup,
2651  },
2652 
2653  {
2654  .name = "Write performance blocking req 1 to 512 sg elems",
2655  .prepare = mmc_test_area_prepare,
2656  .run = mmc_test_profile_sglen_wr_blocking_perf,
2657  .cleanup = mmc_test_area_cleanup,
2658  },
2659 
2660  {
2661  .name = "Write performance non-blocking req 1 to 512 sg elems",
2662  .prepare = mmc_test_area_prepare,
2663  .run = mmc_test_profile_sglen_wr_nonblock_perf,
2664  .cleanup = mmc_test_area_cleanup,
2665  },
2666 
2667  {
2668  .name = "Read performance blocking req 1 to 512 sg elems",
2669  .prepare = mmc_test_area_prepare,
2670  .run = mmc_test_profile_sglen_r_blocking_perf,
2671  .cleanup = mmc_test_area_cleanup,
2672  },
2673 
2674  {
2675  .name = "Read performance non-blocking req 1 to 512 sg elems",
2676  .prepare = mmc_test_area_prepare,
2677  .run = mmc_test_profile_sglen_r_nonblock_perf,
2678  .cleanup = mmc_test_area_cleanup,
2679  },
2680 
2681  {
2682  .name = "eMMC hardware reset",
2683  .run = mmc_test_hw_reset,
2684  },
2685 };
2686 
2687 static DEFINE_MUTEX(mmc_test_lock);
2688 
2689 static LIST_HEAD(mmc_test_result);
2690 
2691 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2692 {
2693  int i, ret;
2694 
2695  pr_info("%s: Starting tests of card %s...\n",
2696  mmc_hostname(test->card->host), mmc_card_id(test->card));
2697 
2698  mmc_claim_host(test->card->host);
2699 
2700  for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2701  struct mmc_test_general_result *gr;
2702 
2703  if (testcase && ((i + 1) != testcase))
2704  continue;
2705 
2706  pr_info("%s: Test case %d. %s...\n",
2707  mmc_hostname(test->card->host), i + 1,
2708  mmc_test_cases[i].name);
2709 
2710  if (mmc_test_cases[i].prepare) {
2711  ret = mmc_test_cases[i].prepare(test);
2712  if (ret) {
2713  pr_info("%s: Result: Prepare "
2714  "stage failed! (%d)\n",
2715  mmc_hostname(test->card->host),
2716  ret);
2717  continue;
2718  }
2719  }
2720 
2721  gr = kzalloc(sizeof(struct mmc_test_general_result),
2722  GFP_KERNEL);
2723  if (gr) {
2724  INIT_LIST_HEAD(&gr->tr_lst);
2725 
2726  /* Assign data what we know already */
2727  gr->card = test->card;
2728  gr->testcase = i;
2729 
2730  /* Append container to global one */
2731  list_add_tail(&gr->link, &mmc_test_result);
2732 
2733  /*
2734  * Save the pointer to created container in our private
2735  * structure.
2736  */
2737  test->gr = gr;
2738  }
2739 
2740  ret = mmc_test_cases[i].run(test);
2741  switch (ret) {
2742  case RESULT_OK:
2743  pr_info("%s: Result: OK\n",
2744  mmc_hostname(test->card->host));
2745  break;
2746  case RESULT_FAIL:
2747  pr_info("%s: Result: FAILED\n",
2748  mmc_hostname(test->card->host));
2749  break;
2750  case RESULT_UNSUP_HOST:
2751  pr_info("%s: Result: UNSUPPORTED "
2752  "(by host)\n",
2753  mmc_hostname(test->card->host));
2754  break;
2755  case RESULT_UNSUP_CARD:
2756  pr_info("%s: Result: UNSUPPORTED "
2757  "(by card)\n",
2758  mmc_hostname(test->card->host));
2759  break;
2760  default:
2761  pr_info("%s: Result: ERROR (%d)\n",
2762  mmc_hostname(test->card->host), ret);
2763  }
2764 
2765  /* Save the result */
2766  if (gr)
2767  gr->result = ret;
2768 
2769  if (mmc_test_cases[i].cleanup) {
2770  ret = mmc_test_cases[i].cleanup(test);
2771  if (ret) {
2772  pr_info("%s: Warning: Cleanup "
2773  "stage failed! (%d)\n",
2774  mmc_hostname(test->card->host),
2775  ret);
2776  }
2777  }
2778  }
2779 
2780  mmc_release_host(test->card->host);
2781 
2782  pr_info("%s: Tests completed.\n",
2783  mmc_hostname(test->card->host));
2784 }
2785 
2786 static void mmc_test_free_result(struct mmc_card *card)
2787 {
2788  struct mmc_test_general_result *gr, *grs;
2789 
2790  mutex_lock(&mmc_test_lock);
2791 
2792  list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2793  struct mmc_test_transfer_result *tr, *trs;
2794 
2795  if (card && gr->card != card)
2796  continue;
2797 
2798  list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2799  list_del(&tr->link);
2800  kfree(tr);
2801  }
2802 
2803  list_del(&gr->link);
2804  kfree(gr);
2805  }
2806 
2807  mutex_unlock(&mmc_test_lock);
2808 }
2809 
2810 static LIST_HEAD(mmc_test_file_test);
2811 
2812 static int mtf_test_show(struct seq_file *sf, void *data)
2813 {
2814  struct mmc_card *card = (struct mmc_card *)sf->private;
2815  struct mmc_test_general_result *gr;
2816 
2817  mutex_lock(&mmc_test_lock);
2818 
2819  list_for_each_entry(gr, &mmc_test_result, link) {
2820  struct mmc_test_transfer_result *tr;
2821 
2822  if (gr->card != card)
2823  continue;
2824 
2825  seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2826 
2827  list_for_each_entry(tr, &gr->tr_lst, link) {
2828  seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2829  tr->count, tr->sectors,
2830  (unsigned long)tr->ts.tv_sec,
2831  (unsigned long)tr->ts.tv_nsec,
2832  tr->rate, tr->iops / 100, tr->iops % 100);
2833  }
2834  }
2835 
2836  mutex_unlock(&mmc_test_lock);
2837 
2838  return 0;
2839 }
2840 
2841 static int mtf_test_open(struct inode *inode, struct file *file)
2842 {
2843  return single_open(file, mtf_test_show, inode->i_private);
2844 }
2845 
2846 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2847  size_t count, loff_t *pos)
2848 {
2849  struct seq_file *sf = (struct seq_file *)file->private_data;
2850  struct mmc_card *card = (struct mmc_card *)sf->private;
2851  struct mmc_test_card *test;
2852  char lbuf[12];
2853  long testcase;
2854 
2855  if (count >= sizeof(lbuf))
2856  return -EINVAL;
2857 
2858  if (copy_from_user(lbuf, buf, count))
2859  return -EFAULT;
2860  lbuf[count] = '\0';
2861 
2862  if (strict_strtol(lbuf, 10, &testcase))
2863  return -EINVAL;
2864 
2865  test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2866  if (!test)
2867  return -ENOMEM;
2868 
2869  /*
2870  * Remove all test cases associated with given card. Thus we have only
2871  * actual data of the last run.
2872  */
2873  mmc_test_free_result(card);
2874 
2875  test->card = card;
2876 
2877  test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2878 #ifdef CONFIG_HIGHMEM
2879  test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2880 #endif
2881 
2882 #ifdef CONFIG_HIGHMEM
2883  if (test->buffer && test->highmem) {
2884 #else
2885  if (test->buffer) {
2886 #endif
2887  mutex_lock(&mmc_test_lock);
2888  mmc_test_run(test, testcase);
2889  mutex_unlock(&mmc_test_lock);
2890  }
2891 
2892 #ifdef CONFIG_HIGHMEM
2893  __free_pages(test->highmem, BUFFER_ORDER);
2894 #endif
2895  kfree(test->buffer);
2896  kfree(test);
2897 
2898  return count;
2899 }
2900 
2901 static const struct file_operations mmc_test_fops_test = {
2902  .open = mtf_test_open,
2903  .read = seq_read,
2904  .write = mtf_test_write,
2905  .llseek = seq_lseek,
2906  .release = single_release,
2907 };
2908 
2909 static int mtf_testlist_show(struct seq_file *sf, void *data)
2910 {
2911  int i;
2912 
2913  mutex_lock(&mmc_test_lock);
2914 
2915  for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
2916  seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
2917 
2918  mutex_unlock(&mmc_test_lock);
2919 
2920  return 0;
2921 }
2922 
2923 static int mtf_testlist_open(struct inode *inode, struct file *file)
2924 {
2925  return single_open(file, mtf_testlist_show, inode->i_private);
2926 }
2927 
2928 static const struct file_operations mmc_test_fops_testlist = {
2929  .open = mtf_testlist_open,
2930  .read = seq_read,
2931  .llseek = seq_lseek,
2932  .release = single_release,
2933 };
2934 
2935 static void mmc_test_free_dbgfs_file(struct mmc_card *card)
2936 {
2937  struct mmc_test_dbgfs_file *df, *dfs;
2938 
2939  mutex_lock(&mmc_test_lock);
2940 
2941  list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2942  if (card && df->card != card)
2943  continue;
2944  debugfs_remove(df->file);
2945  list_del(&df->link);
2946  kfree(df);
2947  }
2948 
2949  mutex_unlock(&mmc_test_lock);
2950 }
2951 
2952 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
2953  const char *name, umode_t mode, const struct file_operations *fops)
2954 {
2955  struct dentry *file = NULL;
2956  struct mmc_test_dbgfs_file *df;
2957 
2958  if (card->debugfs_root)
2959  file = debugfs_create_file(name, mode, card->debugfs_root,
2960  card, fops);
2961 
2962  if (IS_ERR_OR_NULL(file)) {
2963  dev_err(&card->dev,
2964  "Can't create %s. Perhaps debugfs is disabled.\n",
2965  name);
2966  return -ENODEV;
2967  }
2968 
2969  df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2970  if (!df) {
2971  debugfs_remove(file);
2972  dev_err(&card->dev,
2973  "Can't allocate memory for internal usage.\n");
2974  return -ENOMEM;
2975  }
2976 
2977  df->card = card;
2978  df->file = file;
2979 
2980  list_add(&df->link, &mmc_test_file_test);
2981  return 0;
2982 }
2983 
2984 static int mmc_test_register_dbgfs_file(struct mmc_card *card)
2985 {
2986  int ret;
2987 
2988  mutex_lock(&mmc_test_lock);
2989 
2990  ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
2991  &mmc_test_fops_test);
2992  if (ret)
2993  goto err;
2994 
2995  ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
2996  &mmc_test_fops_testlist);
2997  if (ret)
2998  goto err;
2999 
3000 err:
3001  mutex_unlock(&mmc_test_lock);
3002 
3003  return ret;
3004 }
3005 
3006 static int mmc_test_probe(struct mmc_card *card)
3007 {
3008  int ret;
3009 
3010  if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3011  return -ENODEV;
3012 
3013  ret = mmc_test_register_dbgfs_file(card);
3014  if (ret)
3015  return ret;
3016 
3017  dev_info(&card->dev, "Card claimed for testing.\n");
3018 
3019  return 0;
3020 }
3021 
3022 static void mmc_test_remove(struct mmc_card *card)
3023 {
3024  mmc_test_free_result(card);
3025  mmc_test_free_dbgfs_file(card);
3026 }
3027 
3028 static struct mmc_driver mmc_driver = {
3029  .drv = {
3030  .name = "mmc_test",
3031  },
3032  .probe = mmc_test_probe,
3033  .remove = mmc_test_remove,
3034 };
3035 
3036 static int __init mmc_test_init(void)
3037 {
3038  return mmc_register_driver(&mmc_driver);
3039 }
3040 
3041 static void __exit mmc_test_exit(void)
3042 {
3043  /* Clear stalled data if card is still plugged */
3044  mmc_test_free_result(NULL);
3045  mmc_test_free_dbgfs_file(NULL);
3046 
3047  mmc_unregister_driver(&mmc_driver);
3048 }
3049 
3050 module_init(mmc_test_init);
3051 module_exit(mmc_test_exit);
3052 
3053 MODULE_LICENSE("GPL");
3054 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3055 MODULE_AUTHOR("Pierre Ossman");