Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dw_mmc.c
Go to the documentation of this file.
1 /*
2  * Synopsys DesignWare Multimedia Card Interface driver
3  * (Based on NXP driver for lpc 31xx)
4  *
5  * Copyright (C) 2009 NXP Semiconductors
6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13 
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/dw_mmc.h>
33 #include <linux/bitops.h>
35 #include <linux/workqueue.h>
36 #include <linux/of.h>
37 
38 #include "dw_mmc.h"
39 
40 /* Common flag combinations */
41 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
42  SDMMC_INT_HTO | SDMMC_INT_SBE | \
43  SDMMC_INT_EBE)
44 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
45  SDMMC_INT_RESP_ERR)
46 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
47  DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
48 #define DW_MCI_SEND_STATUS 1
49 #define DW_MCI_RECV_STATUS 2
50 #define DW_MCI_DMA_THRESHOLD 16
51 
52 #ifdef CONFIG_MMC_DW_IDMAC
53 struct idmac_desc {
54  u32 des0; /* Control Descriptor */
55 #define IDMAC_DES0_DIC BIT(1)
56 #define IDMAC_DES0_LD BIT(2)
57 #define IDMAC_DES0_FD BIT(3)
58 #define IDMAC_DES0_CH BIT(4)
59 #define IDMAC_DES0_ER BIT(5)
60 #define IDMAC_DES0_CES BIT(30)
61 #define IDMAC_DES0_OWN BIT(31)
62 
63  u32 des1; /* Buffer sizes */
64 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
65  ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
66 
67  u32 des2; /* buffer 1 physical address */
68 
69  u32 des3; /* buffer 2 physical address */
70 };
71 #endif /* CONFIG_MMC_DW_IDMAC */
72 
87 struct dw_mci_slot {
88  struct mmc_host *mmc;
89  struct dw_mci *host;
90 
92 
93  struct mmc_request *mrq;
95 
96  unsigned int clock;
97  unsigned long flags;
98 #define DW_MMC_CARD_PRESENT 0
99 #define DW_MMC_CARD_NEED_INIT 1
100  int id;
102 };
103 
104 #if defined(CONFIG_DEBUG_FS)
105 static int dw_mci_req_show(struct seq_file *s, void *v)
106 {
107  struct dw_mci_slot *slot = s->private;
108  struct mmc_request *mrq;
109  struct mmc_command *cmd;
110  struct mmc_command *stop;
111  struct mmc_data *data;
112 
113  /* Make sure we get a consistent snapshot */
114  spin_lock_bh(&slot->host->lock);
115  mrq = slot->mrq;
116 
117  if (mrq) {
118  cmd = mrq->cmd;
119  data = mrq->data;
120  stop = mrq->stop;
121 
122  if (cmd)
123  seq_printf(s,
124  "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
125  cmd->opcode, cmd->arg, cmd->flags,
126  cmd->resp[0], cmd->resp[1], cmd->resp[2],
127  cmd->resp[2], cmd->error);
128  if (data)
129  seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
130  data->bytes_xfered, data->blocks,
131  data->blksz, data->flags, data->error);
132  if (stop)
133  seq_printf(s,
134  "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
135  stop->opcode, stop->arg, stop->flags,
136  stop->resp[0], stop->resp[1], stop->resp[2],
137  stop->resp[2], stop->error);
138  }
139 
140  spin_unlock_bh(&slot->host->lock);
141 
142  return 0;
143 }
144 
145 static int dw_mci_req_open(struct inode *inode, struct file *file)
146 {
147  return single_open(file, dw_mci_req_show, inode->i_private);
148 }
149 
150 static const struct file_operations dw_mci_req_fops = {
151  .owner = THIS_MODULE,
152  .open = dw_mci_req_open,
153  .read = seq_read,
154  .llseek = seq_lseek,
155  .release = single_release,
156 };
157 
158 static int dw_mci_regs_show(struct seq_file *s, void *v)
159 {
160  seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
161  seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
162  seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
163  seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
164  seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
165  seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
166 
167  return 0;
168 }
169 
170 static int dw_mci_regs_open(struct inode *inode, struct file *file)
171 {
172  return single_open(file, dw_mci_regs_show, inode->i_private);
173 }
174 
175 static const struct file_operations dw_mci_regs_fops = {
176  .owner = THIS_MODULE,
177  .open = dw_mci_regs_open,
178  .read = seq_read,
179  .llseek = seq_lseek,
180  .release = single_release,
181 };
182 
183 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
184 {
185  struct mmc_host *mmc = slot->mmc;
186  struct dw_mci *host = slot->host;
187  struct dentry *root;
188  struct dentry *node;
189 
190  root = mmc->debugfs_root;
191  if (!root)
192  return;
193 
194  node = debugfs_create_file("regs", S_IRUSR, root, host,
195  &dw_mci_regs_fops);
196  if (!node)
197  goto err;
198 
199  node = debugfs_create_file("req", S_IRUSR, root, slot,
200  &dw_mci_req_fops);
201  if (!node)
202  goto err;
203 
204  node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
205  if (!node)
206  goto err;
207 
208  node = debugfs_create_x32("pending_events", S_IRUSR, root,
209  (u32 *)&host->pending_events);
210  if (!node)
211  goto err;
212 
213  node = debugfs_create_x32("completed_events", S_IRUSR, root,
214  (u32 *)&host->completed_events);
215  if (!node)
216  goto err;
217 
218  return;
219 
220 err:
221  dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
222 }
223 #endif /* defined(CONFIG_DEBUG_FS) */
224 
225 static void dw_mci_set_timeout(struct dw_mci *host)
226 {
227  /* timeout (maximum) */
228  mci_writel(host, TMOUT, 0xffffffff);
229 }
230 
231 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
232 {
233  struct mmc_data *data;
234  struct dw_mci_slot *slot = mmc_priv(mmc);
235  struct dw_mci_drv_data *drv_data = slot->host->drv_data;
236  u32 cmdr;
237  cmd->error = -EINPROGRESS;
238 
239  cmdr = cmd->opcode;
240 
241  if (cmdr == MMC_STOP_TRANSMISSION)
242  cmdr |= SDMMC_CMD_STOP;
243  else
244  cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
245 
246  if (cmd->flags & MMC_RSP_PRESENT) {
247  /* We expect a response, so set this bit */
248  cmdr |= SDMMC_CMD_RESP_EXP;
249  if (cmd->flags & MMC_RSP_136)
250  cmdr |= SDMMC_CMD_RESP_LONG;
251  }
252 
253  if (cmd->flags & MMC_RSP_CRC)
254  cmdr |= SDMMC_CMD_RESP_CRC;
255 
256  data = cmd->data;
257  if (data) {
258  cmdr |= SDMMC_CMD_DAT_EXP;
259  if (data->flags & MMC_DATA_STREAM)
260  cmdr |= SDMMC_CMD_STRM_MODE;
261  if (data->flags & MMC_DATA_WRITE)
262  cmdr |= SDMMC_CMD_DAT_WR;
263  }
264 
265  if (drv_data && drv_data->prepare_command)
266  drv_data->prepare_command(slot->host, &cmdr);
267 
268  return cmdr;
269 }
270 
271 static void dw_mci_start_command(struct dw_mci *host,
272  struct mmc_command *cmd, u32 cmd_flags)
273 {
274  host->cmd = cmd;
275  dev_vdbg(host->dev,
276  "start command: ARGR=0x%08x CMDR=0x%08x\n",
277  cmd->arg, cmd_flags);
278 
279  mci_writel(host, CMDARG, cmd->arg);
280  wmb();
281 
282  mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
283 }
284 
285 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
286 {
287  dw_mci_start_command(host, data->stop, host->stop_cmdr);
288 }
289 
290 /* DMA interface functions */
291 static void dw_mci_stop_dma(struct dw_mci *host)
292 {
293  if (host->using_dma) {
294  host->dma_ops->stop(host);
295  host->dma_ops->cleanup(host);
296  } else {
297  /* Data transfer was stopped by the interrupt handler */
299  }
300 }
301 
302 static int dw_mci_get_dma_dir(struct mmc_data *data)
303 {
304  if (data->flags & MMC_DATA_WRITE)
305  return DMA_TO_DEVICE;
306  else
307  return DMA_FROM_DEVICE;
308 }
309 
310 #ifdef CONFIG_MMC_DW_IDMAC
311 static void dw_mci_dma_cleanup(struct dw_mci *host)
312 {
313  struct mmc_data *data = host->data;
314 
315  if (data)
316  if (!data->host_cookie)
317  dma_unmap_sg(host->dev,
318  data->sg,
319  data->sg_len,
320  dw_mci_get_dma_dir(data));
321 }
322 
323 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
324 {
325  u32 temp;
326 
327  /* Disable and reset the IDMAC interface */
328  temp = mci_readl(host, CTRL);
329  temp &= ~SDMMC_CTRL_USE_IDMAC;
330  temp |= SDMMC_CTRL_DMA_RESET;
331  mci_writel(host, CTRL, temp);
332 
333  /* Stop the IDMAC running */
334  temp = mci_readl(host, BMOD);
335  temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
336  mci_writel(host, BMOD, temp);
337 }
338 
339 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
340 {
341  struct mmc_data *data = host->data;
342 
343  dev_vdbg(host->dev, "DMA complete\n");
344 
345  host->dma_ops->cleanup(host);
346 
347  /*
348  * If the card was removed, data will be NULL. No point in trying to
349  * send the stop command or waiting for NBUSY in this case.
350  */
351  if (data) {
353  tasklet_schedule(&host->tasklet);
354  }
355 }
356 
357 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
358  unsigned int sg_len)
359 {
360  int i;
361  struct idmac_desc *desc = host->sg_cpu;
362 
363  for (i = 0; i < sg_len; i++, desc++) {
364  unsigned int length = sg_dma_len(&data->sg[i]);
365  u32 mem_addr = sg_dma_address(&data->sg[i]);
366 
367  /* Set the OWN bit and disable interrupts for this descriptor */
368  desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
369 
370  /* Buffer length */
371  IDMAC_SET_BUFFER1_SIZE(desc, length);
372 
373  /* Physical address to DMA to/from */
374  desc->des2 = mem_addr;
375  }
376 
377  /* Set first descriptor */
378  desc = host->sg_cpu;
379  desc->des0 |= IDMAC_DES0_FD;
380 
381  /* Set last descriptor */
382  desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
383  desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
384  desc->des0 |= IDMAC_DES0_LD;
385 
386  wmb();
387 }
388 
389 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
390 {
391  u32 temp;
392 
393  dw_mci_translate_sglist(host, host->data, sg_len);
394 
395  /* Select IDMAC interface */
396  temp = mci_readl(host, CTRL);
397  temp |= SDMMC_CTRL_USE_IDMAC;
398  mci_writel(host, CTRL, temp);
399 
400  wmb();
401 
402  /* Enable the IDMAC */
403  temp = mci_readl(host, BMOD);
405  mci_writel(host, BMOD, temp);
406 
407  /* Start it running */
408  mci_writel(host, PLDMND, 1);
409 }
410 
411 static int dw_mci_idmac_init(struct dw_mci *host)
412 {
413  struct idmac_desc *p;
414  int i;
415 
416  /* Number of descriptors in the ring buffer */
417  host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
418 
419  /* Forward link the descriptor list */
420  for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
421  p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
422 
423  /* Set the last descriptor as the end-of-ring descriptor */
424  p->des3 = host->sg_dma;
425  p->des0 = IDMAC_DES0_ER;
426 
427  mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
428 
429  /* Mask out interrupts - get Tx & Rx complete only */
432 
433  /* Set the descriptor base address */
434  mci_writel(host, DBADDR, host->sg_dma);
435  return 0;
436 }
437 
438 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
439  .init = dw_mci_idmac_init,
440  .start = dw_mci_idmac_start_dma,
441  .stop = dw_mci_idmac_stop_dma,
442  .complete = dw_mci_idmac_complete_dma,
443  .cleanup = dw_mci_dma_cleanup,
444 };
445 #endif /* CONFIG_MMC_DW_IDMAC */
446 
447 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
448  struct mmc_data *data,
449  bool next)
450 {
451  struct scatterlist *sg;
452  unsigned int i, sg_len;
453 
454  if (!next && data->host_cookie)
455  return data->host_cookie;
456 
457  /*
458  * We don't do DMA on "complex" transfers, i.e. with
459  * non-word-aligned buffers or lengths. Also, we don't bother
460  * with all the DMA setup overhead for short transfers.
461  */
462  if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
463  return -EINVAL;
464 
465  if (data->blksz & 3)
466  return -EINVAL;
467 
468  for_each_sg(data->sg, sg, data->sg_len, i) {
469  if (sg->offset & 3 || sg->length & 3)
470  return -EINVAL;
471  }
472 
473  sg_len = dma_map_sg(host->dev,
474  data->sg,
475  data->sg_len,
476  dw_mci_get_dma_dir(data));
477  if (sg_len == 0)
478  return -EINVAL;
479 
480  if (next)
481  data->host_cookie = sg_len;
482 
483  return sg_len;
484 }
485 
486 static void dw_mci_pre_req(struct mmc_host *mmc,
487  struct mmc_request *mrq,
488  bool is_first_req)
489 {
490  struct dw_mci_slot *slot = mmc_priv(mmc);
491  struct mmc_data *data = mrq->data;
492 
493  if (!slot->host->use_dma || !data)
494  return;
495 
496  if (data->host_cookie) {
497  data->host_cookie = 0;
498  return;
499  }
500 
501  if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
502  data->host_cookie = 0;
503 }
504 
505 static void dw_mci_post_req(struct mmc_host *mmc,
506  struct mmc_request *mrq,
507  int err)
508 {
509  struct dw_mci_slot *slot = mmc_priv(mmc);
510  struct mmc_data *data = mrq->data;
511 
512  if (!slot->host->use_dma || !data)
513  return;
514 
515  if (data->host_cookie)
516  dma_unmap_sg(slot->host->dev,
517  data->sg,
518  data->sg_len,
519  dw_mci_get_dma_dir(data));
520  data->host_cookie = 0;
521 }
522 
523 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
524 {
525  int sg_len;
526  u32 temp;
527 
528  host->using_dma = 0;
529 
530  /* If we don't have a channel, we can't do DMA */
531  if (!host->use_dma)
532  return -ENODEV;
533 
534  sg_len = dw_mci_pre_dma_transfer(host, data, 0);
535  if (sg_len < 0) {
536  host->dma_ops->stop(host);
537  return sg_len;
538  }
539 
540  host->using_dma = 1;
541 
542  dev_vdbg(host->dev,
543  "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
544  (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
545  sg_len);
546 
547  /* Enable the DMA interface */
548  temp = mci_readl(host, CTRL);
549  temp |= SDMMC_CTRL_DMA_ENABLE;
550  mci_writel(host, CTRL, temp);
551 
552  /* Disable RX/TX IRQs, let DMA handle it */
553  temp = mci_readl(host, INTMASK);
554  temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
555  mci_writel(host, INTMASK, temp);
556 
557  host->dma_ops->start(host, sg_len);
558 
559  return 0;
560 }
561 
562 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
563 {
564  u32 temp;
565 
566  data->error = -EINPROGRESS;
567 
568  WARN_ON(host->data);
569  host->sg = NULL;
570  host->data = data;
571 
572  if (data->flags & MMC_DATA_READ)
574  else
576 
577  if (dw_mci_submit_data_dma(host, data)) {
578  int flags = SG_MITER_ATOMIC;
579  if (host->data->flags & MMC_DATA_READ)
580  flags |= SG_MITER_TO_SG;
581  else
582  flags |= SG_MITER_FROM_SG;
583 
584  sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
585  host->sg = data->sg;
586  host->part_buf_start = 0;
587  host->part_buf_count = 0;
588 
589  mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
590  temp = mci_readl(host, INTMASK);
591  temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
592  mci_writel(host, INTMASK, temp);
593 
594  temp = mci_readl(host, CTRL);
595  temp &= ~SDMMC_CTRL_DMA_ENABLE;
596  mci_writel(host, CTRL, temp);
597  }
598 }
599 
600 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
601 {
602  struct dw_mci *host = slot->host;
603  unsigned long timeout = jiffies + msecs_to_jiffies(500);
604  unsigned int cmd_status = 0;
605 
606  mci_writel(host, CMDARG, arg);
607  wmb();
608  mci_writel(host, CMD, SDMMC_CMD_START | cmd);
609 
610  while (time_before(jiffies, timeout)) {
611  cmd_status = mci_readl(host, CMD);
612  if (!(cmd_status & SDMMC_CMD_START))
613  return;
614  }
615  dev_err(&slot->mmc->class_dev,
616  "Timeout sending command (cmd %#x arg %#x status %#x)\n",
617  cmd, arg, cmd_status);
618 }
619 
620 static void dw_mci_setup_bus(struct dw_mci_slot *slot)
621 {
622  struct dw_mci *host = slot->host;
623  u32 div;
624  u32 clk_en_a;
625 
626  if (slot->clock != host->current_speed) {
627  div = host->bus_hz / slot->clock;
628  if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
629  /*
630  * move the + 1 after the divide to prevent
631  * over-clocking the card.
632  */
633  div += 1;
634 
635  div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
636 
637  dev_info(&slot->mmc->class_dev,
638  "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
639  " div = %d)\n", slot->id, host->bus_hz, slot->clock,
640  div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
641 
642  /* disable clock */
643  mci_writel(host, CLKENA, 0);
644  mci_writel(host, CLKSRC, 0);
645 
646  /* inform CIU */
647  mci_send_cmd(slot,
649 
650  /* set clock to desired speed */
651  mci_writel(host, CLKDIV, div);
652 
653  /* inform CIU */
654  mci_send_cmd(slot,
656 
657  /* enable clock; only low power if no SDIO */
658  clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
659  if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
660  clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
661  mci_writel(host, CLKENA, clk_en_a);
662 
663  /* inform CIU */
664  mci_send_cmd(slot,
666 
667  host->current_speed = slot->clock;
668  }
669 
670  /* Set the current slot bus width */
671  mci_writel(host, CTYPE, (slot->ctype << slot->id));
672 }
673 
674 static void __dw_mci_start_request(struct dw_mci *host,
675  struct dw_mci_slot *slot,
676  struct mmc_command *cmd)
677 {
678  struct mmc_request *mrq;
679  struct mmc_data *data;
680  u32 cmdflags;
681 
682  mrq = slot->mrq;
683  if (host->pdata->select_slot)
684  host->pdata->select_slot(slot->id);
685 
686  /* Slot specific timing and width adjustment */
687  dw_mci_setup_bus(slot);
688 
689  host->cur_slot = slot;
690  host->mrq = mrq;
691 
692  host->pending_events = 0;
693  host->completed_events = 0;
694  host->data_status = 0;
695 
696  data = cmd->data;
697  if (data) {
698  dw_mci_set_timeout(host);
699  mci_writel(host, BYTCNT, data->blksz*data->blocks);
700  mci_writel(host, BLKSIZ, data->blksz);
701  }
702 
703  cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
704 
705  /* this is the first command, send the initialization clock */
707  cmdflags |= SDMMC_CMD_INIT;
708 
709  if (data) {
710  dw_mci_submit_data(host, data);
711  wmb();
712  }
713 
714  dw_mci_start_command(host, cmd, cmdflags);
715 
716  if (mrq->stop)
717  host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
718 }
719 
720 static void dw_mci_start_request(struct dw_mci *host,
721  struct dw_mci_slot *slot)
722 {
723  struct mmc_request *mrq = slot->mrq;
724  struct mmc_command *cmd;
725 
726  cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
727  __dw_mci_start_request(host, slot, cmd);
728 }
729 
730 /* must be called with host->lock held */
731 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
732  struct mmc_request *mrq)
733 {
734  dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
735  host->state);
736 
737  slot->mrq = mrq;
738 
739  if (host->state == STATE_IDLE) {
740  host->state = STATE_SENDING_CMD;
741  dw_mci_start_request(host, slot);
742  } else {
743  list_add_tail(&slot->queue_node, &host->queue);
744  }
745 }
746 
747 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
748 {
749  struct dw_mci_slot *slot = mmc_priv(mmc);
750  struct dw_mci *host = slot->host;
751 
752  WARN_ON(slot->mrq);
753 
754  /*
755  * The check for card presence and queueing of the request must be
756  * atomic, otherwise the card could be removed in between and the
757  * request wouldn't fail until another card was inserted.
758  */
759  spin_lock_bh(&host->lock);
760 
761  if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
762  spin_unlock_bh(&host->lock);
763  mrq->cmd->error = -ENOMEDIUM;
764  mmc_request_done(mmc, mrq);
765  return;
766  }
767 
768  dw_mci_queue_request(host, slot, mrq);
769 
770  spin_unlock_bh(&host->lock);
771 }
772 
773 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
774 {
775  struct dw_mci_slot *slot = mmc_priv(mmc);
776  struct dw_mci_drv_data *drv_data = slot->host->drv_data;
777  u32 regs;
778 
779  /* set default 1 bit mode */
780  slot->ctype = SDMMC_CTYPE_1BIT;
781 
782  switch (ios->bus_width) {
783  case MMC_BUS_WIDTH_1:
784  slot->ctype = SDMMC_CTYPE_1BIT;
785  break;
786  case MMC_BUS_WIDTH_4:
787  slot->ctype = SDMMC_CTYPE_4BIT;
788  break;
789  case MMC_BUS_WIDTH_8:
790  slot->ctype = SDMMC_CTYPE_8BIT;
791  break;
792  }
793 
794  regs = mci_readl(slot->host, UHS_REG);
795 
796  /* DDR mode set */
797  if (ios->timing == MMC_TIMING_UHS_DDR50)
798  regs |= (0x1 << slot->id) << 16;
799  else
800  regs &= ~(0x1 << slot->id) << 16;
801 
802  mci_writel(slot->host, UHS_REG, regs);
803 
804  if (ios->clock) {
805  /*
806  * Use mirror of ios->clock to prevent race with mmc
807  * core ios update when finding the minimum.
808  */
809  slot->clock = ios->clock;
810  }
811 
812  if (drv_data && drv_data->set_ios)
813  drv_data->set_ios(slot->host, ios);
814 
815  switch (ios->power_mode) {
816  case MMC_POWER_UP:
818  break;
819  default:
820  break;
821  }
822 }
823 
824 static int dw_mci_get_ro(struct mmc_host *mmc)
825 {
826  int read_only;
827  struct dw_mci_slot *slot = mmc_priv(mmc);
828  struct dw_mci_board *brd = slot->host->pdata;
829 
830  /* Use platform get_ro function, else try on board write protect */
832  read_only = 0;
833  else if (brd->get_ro)
834  read_only = brd->get_ro(slot->id);
835  else
836  read_only =
837  mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
838 
839  dev_dbg(&mmc->class_dev, "card is %s\n",
840  read_only ? "read-only" : "read-write");
841 
842  return read_only;
843 }
844 
845 static int dw_mci_get_cd(struct mmc_host *mmc)
846 {
847  int present;
848  struct dw_mci_slot *slot = mmc_priv(mmc);
849  struct dw_mci_board *brd = slot->host->pdata;
850 
851  /* Use platform get_cd function, else try onboard card detect */
853  present = 1;
854  else if (brd->get_cd)
855  present = !brd->get_cd(slot->id);
856  else
857  present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
858  == 0 ? 1 : 0;
859 
860  if (present)
861  dev_dbg(&mmc->class_dev, "card is present\n");
862  else
863  dev_dbg(&mmc->class_dev, "card is not present\n");
864 
865  return present;
866 }
867 
868 /*
869  * Disable lower power mode.
870  *
871  * Low power mode will stop the card clock when idle. According to the
872  * description of the CLKENA register we should disable low power mode
873  * for SDIO cards if we need SDIO interrupts to work.
874  *
875  * This function is fast if low power mode is already disabled.
876  */
877 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
878 {
879  struct dw_mci *host = slot->host;
880  u32 clk_en_a;
881  const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
882 
883  clk_en_a = mci_readl(host, CLKENA);
884 
885  if (clk_en_a & clken_low_pwr) {
886  mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
887  mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
889  }
890 }
891 
892 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
893 {
894  struct dw_mci_slot *slot = mmc_priv(mmc);
895  struct dw_mci *host = slot->host;
896  u32 int_mask;
897 
898  /* Enable/disable Slot Specific SDIO interrupt */
899  int_mask = mci_readl(host, INTMASK);
900  if (enb) {
901  /*
902  * Turn off low power mode if it was enabled. This is a bit of
903  * a heavy operation and we disable / enable IRQs a lot, so
904  * we'll leave low power mode disabled and it will get
905  * re-enabled again in dw_mci_setup_bus().
906  */
907  dw_mci_disable_low_power(slot);
908 
909  mci_writel(host, INTMASK,
910  (int_mask | SDMMC_INT_SDIO(slot->id)));
911  } else {
912  mci_writel(host, INTMASK,
913  (int_mask & ~SDMMC_INT_SDIO(slot->id)));
914  }
915 }
916 
917 static const struct mmc_host_ops dw_mci_ops = {
918  .request = dw_mci_request,
919  .pre_req = dw_mci_pre_req,
920  .post_req = dw_mci_post_req,
921  .set_ios = dw_mci_set_ios,
922  .get_ro = dw_mci_get_ro,
923  .get_cd = dw_mci_get_cd,
924  .enable_sdio_irq = dw_mci_enable_sdio_irq,
925 };
926 
927 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
928  __releases(&host->lock)
929  __acquires(&host->lock)
930 {
931  struct dw_mci_slot *slot;
932  struct mmc_host *prev_mmc = host->cur_slot->mmc;
933 
934  WARN_ON(host->cmd || host->data);
935 
936  host->cur_slot->mrq = NULL;
937  host->mrq = NULL;
938  if (!list_empty(&host->queue)) {
939  slot = list_entry(host->queue.next,
940  struct dw_mci_slot, queue_node);
941  list_del(&slot->queue_node);
942  dev_vdbg(host->dev, "list not empty: %s is next\n",
943  mmc_hostname(slot->mmc));
944  host->state = STATE_SENDING_CMD;
945  dw_mci_start_request(host, slot);
946  } else {
947  dev_vdbg(host->dev, "list empty\n");
948  host->state = STATE_IDLE;
949  }
950 
951  spin_unlock(&host->lock);
952  mmc_request_done(prev_mmc, mrq);
953  spin_lock(&host->lock);
954 }
955 
956 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
957 {
958  u32 status = host->cmd_status;
959 
960  host->cmd_status = 0;
961 
962  /* Read the response from the card (up to 16 bytes) */
963  if (cmd->flags & MMC_RSP_PRESENT) {
964  if (cmd->flags & MMC_RSP_136) {
965  cmd->resp[3] = mci_readl(host, RESP0);
966  cmd->resp[2] = mci_readl(host, RESP1);
967  cmd->resp[1] = mci_readl(host, RESP2);
968  cmd->resp[0] = mci_readl(host, RESP3);
969  } else {
970  cmd->resp[0] = mci_readl(host, RESP0);
971  cmd->resp[1] = 0;
972  cmd->resp[2] = 0;
973  cmd->resp[3] = 0;
974  }
975  }
976 
977  if (status & SDMMC_INT_RTO)
978  cmd->error = -ETIMEDOUT;
979  else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
980  cmd->error = -EILSEQ;
981  else if (status & SDMMC_INT_RESP_ERR)
982  cmd->error = -EIO;
983  else
984  cmd->error = 0;
985 
986  if (cmd->error) {
987  /* newer ip versions need a delay between retries */
988  if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
989  mdelay(20);
990 
991  if (cmd->data) {
992  dw_mci_stop_dma(host);
993  host->data = NULL;
994  }
995  }
996 }
997 
998 static void dw_mci_tasklet_func(unsigned long priv)
999 {
1000  struct dw_mci *host = (struct dw_mci *)priv;
1001  struct mmc_data *data;
1002  struct mmc_command *cmd;
1003  enum dw_mci_state state;
1004  enum dw_mci_state prev_state;
1005  u32 status, ctrl;
1006 
1007  spin_lock(&host->lock);
1008 
1009  state = host->state;
1010  data = host->data;
1011 
1012  do {
1013  prev_state = state;
1014 
1015  switch (state) {
1016  case STATE_IDLE:
1017  break;
1018 
1019  case STATE_SENDING_CMD:
1021  &host->pending_events))
1022  break;
1023 
1024  cmd = host->cmd;
1025  host->cmd = NULL;
1027  dw_mci_command_complete(host, cmd);
1028  if (cmd == host->mrq->sbc && !cmd->error) {
1029  prev_state = state = STATE_SENDING_CMD;
1030  __dw_mci_start_request(host, host->cur_slot,
1031  host->mrq->cmd);
1032  goto unlock;
1033  }
1034 
1035  if (!host->mrq->data || cmd->error) {
1036  dw_mci_request_end(host, host->mrq);
1037  goto unlock;
1038  }
1039 
1040  prev_state = state = STATE_SENDING_DATA;
1041  /* fall through */
1042 
1043  case STATE_SENDING_DATA:
1045  &host->pending_events)) {
1046  dw_mci_stop_dma(host);
1047  if (data->stop)
1048  send_stop_cmd(host, data);
1049  state = STATE_DATA_ERROR;
1050  break;
1051  }
1052 
1054  &host->pending_events))
1055  break;
1056 
1058  prev_state = state = STATE_DATA_BUSY;
1059  /* fall through */
1060 
1061  case STATE_DATA_BUSY:
1063  &host->pending_events))
1064  break;
1065 
1066  host->data = NULL;
1068  status = host->data_status;
1069 
1070  if (status & DW_MCI_DATA_ERROR_FLAGS) {
1071  if (status & SDMMC_INT_DTO) {
1072  data->error = -ETIMEDOUT;
1073  } else if (status & SDMMC_INT_DCRC) {
1074  data->error = -EILSEQ;
1075  } else if (status & SDMMC_INT_EBE &&
1076  host->dir_status ==
1078  /*
1079  * No data CRC status was returned.
1080  * The number of bytes transferred will
1081  * be exaggerated in PIO mode.
1082  */
1083  data->bytes_xfered = 0;
1084  data->error = -ETIMEDOUT;
1085  } else {
1086  dev_err(host->dev,
1087  "data FIFO error "
1088  "(status=%08x)\n",
1089  status);
1090  data->error = -EIO;
1091  }
1092  /*
1093  * After an error, there may be data lingering
1094  * in the FIFO, so reset it - doing so
1095  * generates a block interrupt, hence setting
1096  * the scatter-gather pointer to NULL.
1097  */
1098  sg_miter_stop(&host->sg_miter);
1099  host->sg = NULL;
1100  ctrl = mci_readl(host, CTRL);
1101  ctrl |= SDMMC_CTRL_FIFO_RESET;
1102  mci_writel(host, CTRL, ctrl);
1103  } else {
1104  data->bytes_xfered = data->blocks * data->blksz;
1105  data->error = 0;
1106  }
1107 
1108  if (!data->stop) {
1109  dw_mci_request_end(host, host->mrq);
1110  goto unlock;
1111  }
1112 
1113  if (host->mrq->sbc && !data->error) {
1114  data->stop->error = 0;
1115  dw_mci_request_end(host, host->mrq);
1116  goto unlock;
1117  }
1118 
1119  prev_state = state = STATE_SENDING_STOP;
1120  if (!data->error)
1121  send_stop_cmd(host, data);
1122  /* fall through */
1123 
1124  case STATE_SENDING_STOP:
1126  &host->pending_events))
1127  break;
1128 
1129  host->cmd = NULL;
1130  dw_mci_command_complete(host, host->mrq->stop);
1131  dw_mci_request_end(host, host->mrq);
1132  goto unlock;
1133 
1134  case STATE_DATA_ERROR:
1136  &host->pending_events))
1137  break;
1138 
1139  state = STATE_DATA_BUSY;
1140  break;
1141  }
1142  } while (state != prev_state);
1143 
1144  host->state = state;
1145 unlock:
1146  spin_unlock(&host->lock);
1147 
1148 }
1149 
1150 /* push final bytes to part_buf, only use during push */
1151 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1152 {
1153  memcpy((void *)&host->part_buf, buf, cnt);
1154  host->part_buf_count = cnt;
1155 }
1156 
1157 /* append bytes to part_buf, only use during push */
1158 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1159 {
1160  cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1161  memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1162  host->part_buf_count += cnt;
1163  return cnt;
1164 }
1165 
1166 /* pull first bytes from part_buf, only use during pull */
1167 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1168 {
1169  cnt = min(cnt, (int)host->part_buf_count);
1170  if (cnt) {
1171  memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1172  cnt);
1173  host->part_buf_count -= cnt;
1174  host->part_buf_start += cnt;
1175  }
1176  return cnt;
1177 }
1178 
1179 /* pull final bytes from the part_buf, assuming it's just been filled */
1180 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1181 {
1182  memcpy(buf, &host->part_buf, cnt);
1183  host->part_buf_start = cnt;
1184  host->part_buf_count = (1 << host->data_shift) - cnt;
1185 }
1186 
1187 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1188 {
1189  /* try and push anything in the part_buf */
1190  if (unlikely(host->part_buf_count)) {
1191  int len = dw_mci_push_part_bytes(host, buf, cnt);
1192  buf += len;
1193  cnt -= len;
1194  if (!sg_next(host->sg) || host->part_buf_count == 2) {
1195  mci_writew(host, DATA(host->data_offset),
1196  host->part_buf16);
1197  host->part_buf_count = 0;
1198  }
1199  }
1200 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1201  if (unlikely((unsigned long)buf & 0x1)) {
1202  while (cnt >= 2) {
1203  u16 aligned_buf[64];
1204  int len = min(cnt & -2, (int)sizeof(aligned_buf));
1205  int items = len >> 1;
1206  int i;
1207  /* memcpy from input buffer into aligned buffer */
1208  memcpy(aligned_buf, buf, len);
1209  buf += len;
1210  cnt -= len;
1211  /* push data from aligned buffer into fifo */
1212  for (i = 0; i < items; ++i)
1213  mci_writew(host, DATA(host->data_offset),
1214  aligned_buf[i]);
1215  }
1216  } else
1217 #endif
1218  {
1219  u16 *pdata = buf;
1220  for (; cnt >= 2; cnt -= 2)
1221  mci_writew(host, DATA(host->data_offset), *pdata++);
1222  buf = pdata;
1223  }
1224  /* put anything remaining in the part_buf */
1225  if (cnt) {
1226  dw_mci_set_part_bytes(host, buf, cnt);
1227  if (!sg_next(host->sg))
1228  mci_writew(host, DATA(host->data_offset),
1229  host->part_buf16);
1230  }
1231 }
1232 
1233 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1234 {
1235 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1236  if (unlikely((unsigned long)buf & 0x1)) {
1237  while (cnt >= 2) {
1238  /* pull data from fifo into aligned buffer */
1239  u16 aligned_buf[64];
1240  int len = min(cnt & -2, (int)sizeof(aligned_buf));
1241  int items = len >> 1;
1242  int i;
1243  for (i = 0; i < items; ++i)
1244  aligned_buf[i] = mci_readw(host,
1245  DATA(host->data_offset));
1246  /* memcpy from aligned buffer into output buffer */
1247  memcpy(buf, aligned_buf, len);
1248  buf += len;
1249  cnt -= len;
1250  }
1251  } else
1252 #endif
1253  {
1254  u16 *pdata = buf;
1255  for (; cnt >= 2; cnt -= 2)
1256  *pdata++ = mci_readw(host, DATA(host->data_offset));
1257  buf = pdata;
1258  }
1259  if (cnt) {
1260  host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1261  dw_mci_pull_final_bytes(host, buf, cnt);
1262  }
1263 }
1264 
1265 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1266 {
1267  /* try and push anything in the part_buf */
1268  if (unlikely(host->part_buf_count)) {
1269  int len = dw_mci_push_part_bytes(host, buf, cnt);
1270  buf += len;
1271  cnt -= len;
1272  if (!sg_next(host->sg) || host->part_buf_count == 4) {
1273  mci_writel(host, DATA(host->data_offset),
1274  host->part_buf32);
1275  host->part_buf_count = 0;
1276  }
1277  }
1278 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1279  if (unlikely((unsigned long)buf & 0x3)) {
1280  while (cnt >= 4) {
1281  u32 aligned_buf[32];
1282  int len = min(cnt & -4, (int)sizeof(aligned_buf));
1283  int items = len >> 2;
1284  int i;
1285  /* memcpy from input buffer into aligned buffer */
1286  memcpy(aligned_buf, buf, len);
1287  buf += len;
1288  cnt -= len;
1289  /* push data from aligned buffer into fifo */
1290  for (i = 0; i < items; ++i)
1291  mci_writel(host, DATA(host->data_offset),
1292  aligned_buf[i]);
1293  }
1294  } else
1295 #endif
1296  {
1297  u32 *pdata = buf;
1298  for (; cnt >= 4; cnt -= 4)
1299  mci_writel(host, DATA(host->data_offset), *pdata++);
1300  buf = pdata;
1301  }
1302  /* put anything remaining in the part_buf */
1303  if (cnt) {
1304  dw_mci_set_part_bytes(host, buf, cnt);
1305  if (!sg_next(host->sg))
1306  mci_writel(host, DATA(host->data_offset),
1307  host->part_buf32);
1308  }
1309 }
1310 
1311 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1312 {
1313 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1314  if (unlikely((unsigned long)buf & 0x3)) {
1315  while (cnt >= 4) {
1316  /* pull data from fifo into aligned buffer */
1317  u32 aligned_buf[32];
1318  int len = min(cnt & -4, (int)sizeof(aligned_buf));
1319  int items = len >> 2;
1320  int i;
1321  for (i = 0; i < items; ++i)
1322  aligned_buf[i] = mci_readl(host,
1323  DATA(host->data_offset));
1324  /* memcpy from aligned buffer into output buffer */
1325  memcpy(buf, aligned_buf, len);
1326  buf += len;
1327  cnt -= len;
1328  }
1329  } else
1330 #endif
1331  {
1332  u32 *pdata = buf;
1333  for (; cnt >= 4; cnt -= 4)
1334  *pdata++ = mci_readl(host, DATA(host->data_offset));
1335  buf = pdata;
1336  }
1337  if (cnt) {
1338  host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1339  dw_mci_pull_final_bytes(host, buf, cnt);
1340  }
1341 }
1342 
1343 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1344 {
1345  /* try and push anything in the part_buf */
1346  if (unlikely(host->part_buf_count)) {
1347  int len = dw_mci_push_part_bytes(host, buf, cnt);
1348  buf += len;
1349  cnt -= len;
1350  if (!sg_next(host->sg) || host->part_buf_count == 8) {
1351  mci_writew(host, DATA(host->data_offset),
1352  host->part_buf);
1353  host->part_buf_count = 0;
1354  }
1355  }
1356 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1357  if (unlikely((unsigned long)buf & 0x7)) {
1358  while (cnt >= 8) {
1359  u64 aligned_buf[16];
1360  int len = min(cnt & -8, (int)sizeof(aligned_buf));
1361  int items = len >> 3;
1362  int i;
1363  /* memcpy from input buffer into aligned buffer */
1364  memcpy(aligned_buf, buf, len);
1365  buf += len;
1366  cnt -= len;
1367  /* push data from aligned buffer into fifo */
1368  for (i = 0; i < items; ++i)
1369  mci_writeq(host, DATA(host->data_offset),
1370  aligned_buf[i]);
1371  }
1372  } else
1373 #endif
1374  {
1375  u64 *pdata = buf;
1376  for (; cnt >= 8; cnt -= 8)
1377  mci_writeq(host, DATA(host->data_offset), *pdata++);
1378  buf = pdata;
1379  }
1380  /* put anything remaining in the part_buf */
1381  if (cnt) {
1382  dw_mci_set_part_bytes(host, buf, cnt);
1383  if (!sg_next(host->sg))
1384  mci_writeq(host, DATA(host->data_offset),
1385  host->part_buf);
1386  }
1387 }
1388 
1389 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1390 {
1391 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1392  if (unlikely((unsigned long)buf & 0x7)) {
1393  while (cnt >= 8) {
1394  /* pull data from fifo into aligned buffer */
1395  u64 aligned_buf[16];
1396  int len = min(cnt & -8, (int)sizeof(aligned_buf));
1397  int items = len >> 3;
1398  int i;
1399  for (i = 0; i < items; ++i)
1400  aligned_buf[i] = mci_readq(host,
1401  DATA(host->data_offset));
1402  /* memcpy from aligned buffer into output buffer */
1403  memcpy(buf, aligned_buf, len);
1404  buf += len;
1405  cnt -= len;
1406  }
1407  } else
1408 #endif
1409  {
1410  u64 *pdata = buf;
1411  for (; cnt >= 8; cnt -= 8)
1412  *pdata++ = mci_readq(host, DATA(host->data_offset));
1413  buf = pdata;
1414  }
1415  if (cnt) {
1416  host->part_buf = mci_readq(host, DATA(host->data_offset));
1417  dw_mci_pull_final_bytes(host, buf, cnt);
1418  }
1419 }
1420 
1421 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1422 {
1423  int len;
1424 
1425  /* get remaining partial bytes */
1426  len = dw_mci_pull_part_bytes(host, buf, cnt);
1427  if (unlikely(len == cnt))
1428  return;
1429  buf += len;
1430  cnt -= len;
1431 
1432  /* get the rest of the data */
1433  host->pull_data(host, buf, cnt);
1434 }
1435 
1436 static void dw_mci_read_data_pio(struct dw_mci *host)
1437 {
1438  struct sg_mapping_iter *sg_miter = &host->sg_miter;
1439  void *buf;
1440  unsigned int offset;
1441  struct mmc_data *data = host->data;
1442  int shift = host->data_shift;
1443  u32 status;
1444  unsigned int nbytes = 0, len;
1445  unsigned int remain, fcnt;
1446 
1447  do {
1448  if (!sg_miter_next(sg_miter))
1449  goto done;
1450 
1451  host->sg = sg_miter->__sg;
1452  buf = sg_miter->addr;
1453  remain = sg_miter->length;
1454  offset = 0;
1455 
1456  do {
1457  fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1458  << shift) + host->part_buf_count;
1459  len = min(remain, fcnt);
1460  if (!len)
1461  break;
1462  dw_mci_pull_data(host, (void *)(buf + offset), len);
1463  offset += len;
1464  nbytes += len;
1465  remain -= len;
1466  } while (remain);
1467 
1468  sg_miter->consumed = offset;
1469  status = mci_readl(host, MINTSTS);
1470  mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1471  } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1472  data->bytes_xfered += nbytes;
1473 
1474  if (!remain) {
1475  if (!sg_miter_next(sg_miter))
1476  goto done;
1477  sg_miter->consumed = 0;
1478  }
1479  sg_miter_stop(sg_miter);
1480  return;
1481 
1482 done:
1483  data->bytes_xfered += nbytes;
1484  sg_miter_stop(sg_miter);
1485  host->sg = NULL;
1486  smp_wmb();
1488 }
1489 
1490 static void dw_mci_write_data_pio(struct dw_mci *host)
1491 {
1492  struct sg_mapping_iter *sg_miter = &host->sg_miter;
1493  void *buf;
1494  unsigned int offset;
1495  struct mmc_data *data = host->data;
1496  int shift = host->data_shift;
1497  u32 status;
1498  unsigned int nbytes = 0, len;
1499  unsigned int fifo_depth = host->fifo_depth;
1500  unsigned int remain, fcnt;
1501 
1502  do {
1503  if (!sg_miter_next(sg_miter))
1504  goto done;
1505 
1506  host->sg = sg_miter->__sg;
1507  buf = sg_miter->addr;
1508  remain = sg_miter->length;
1509  offset = 0;
1510 
1511  do {
1512  fcnt = ((fifo_depth -
1514  << shift) - host->part_buf_count;
1515  len = min(remain, fcnt);
1516  if (!len)
1517  break;
1518  host->push_data(host, (void *)(buf + offset), len);
1519  offset += len;
1520  nbytes += len;
1521  remain -= len;
1522  } while (remain);
1523 
1524  sg_miter->consumed = offset;
1525  status = mci_readl(host, MINTSTS);
1526  mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1527  } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1528  data->bytes_xfered += nbytes;
1529 
1530  if (!remain) {
1531  if (!sg_miter_next(sg_miter))
1532  goto done;
1533  sg_miter->consumed = 0;
1534  }
1535  sg_miter_stop(sg_miter);
1536  return;
1537 
1538 done:
1539  data->bytes_xfered += nbytes;
1540  sg_miter_stop(sg_miter);
1541  host->sg = NULL;
1542  smp_wmb();
1544 }
1545 
1546 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1547 {
1548  if (!host->cmd_status)
1549  host->cmd_status = status;
1550 
1551  smp_wmb();
1552 
1554  tasklet_schedule(&host->tasklet);
1555 }
1556 
1557 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1558 {
1559  struct dw_mci *host = dev_id;
1560  u32 pending;
1561  unsigned int pass_count = 0;
1562  int i;
1563 
1564  do {
1565  pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1566 
1567  /*
1568  * DTO fix - version 2.10a and below, and only if internal DMA
1569  * is configured.
1570  */
1571  if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1572  if (!pending &&
1573  ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1574  pending |= SDMMC_INT_DATA_OVER;
1575  }
1576 
1577  if (!pending)
1578  break;
1579 
1580  if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1581  mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1582  host->cmd_status = pending;
1583  smp_wmb();
1585  }
1586 
1587  if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1588  /* if there is an error report DATA_ERROR */
1589  mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1590  host->data_status = pending;
1591  smp_wmb();
1593  tasklet_schedule(&host->tasklet);
1594  }
1595 
1596  if (pending & SDMMC_INT_DATA_OVER) {
1597  mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1598  if (!host->data_status)
1599  host->data_status = pending;
1600  smp_wmb();
1601  if (host->dir_status == DW_MCI_RECV_STATUS) {
1602  if (host->sg != NULL)
1603  dw_mci_read_data_pio(host);
1604  }
1606  tasklet_schedule(&host->tasklet);
1607  }
1608 
1609  if (pending & SDMMC_INT_RXDR) {
1610  mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1611  if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1612  dw_mci_read_data_pio(host);
1613  }
1614 
1615  if (pending & SDMMC_INT_TXDR) {
1616  mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1617  if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1618  dw_mci_write_data_pio(host);
1619  }
1620 
1621  if (pending & SDMMC_INT_CMD_DONE) {
1622  mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1623  dw_mci_cmd_interrupt(host, pending);
1624  }
1625 
1626  if (pending & SDMMC_INT_CD) {
1627  mci_writel(host, RINTSTS, SDMMC_INT_CD);
1628  queue_work(host->card_workqueue, &host->card_work);
1629  }
1630 
1631  /* Handle SDIO Interrupts */
1632  for (i = 0; i < host->num_slots; i++) {
1633  struct dw_mci_slot *slot = host->slot[i];
1634  if (pending & SDMMC_INT_SDIO(i)) {
1635  mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1636  mmc_signal_sdio_irq(slot->mmc);
1637  }
1638  }
1639 
1640  } while (pass_count++ < 5);
1641 
1642 #ifdef CONFIG_MMC_DW_IDMAC
1643  /* Handle DMA interrupts */
1644  pending = mci_readl(host, IDSTS);
1645  if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1647  mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1648  host->dma_ops->complete(host);
1649  }
1650 #endif
1651 
1652  return IRQ_HANDLED;
1653 }
1654 
1655 static void dw_mci_work_routine_card(struct work_struct *work)
1656 {
1657  struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1658  int i;
1659 
1660  for (i = 0; i < host->num_slots; i++) {
1661  struct dw_mci_slot *slot = host->slot[i];
1662  struct mmc_host *mmc = slot->mmc;
1663  struct mmc_request *mrq;
1664  int present;
1665  u32 ctrl;
1666 
1667  present = dw_mci_get_cd(mmc);
1668  while (present != slot->last_detect_state) {
1669  dev_dbg(&slot->mmc->class_dev, "card %s\n",
1670  present ? "inserted" : "removed");
1671 
1672  /* Power up slot (before spin_lock, may sleep) */
1673  if (present != 0 && host->pdata->setpower)
1674  host->pdata->setpower(slot->id, mmc->ocr_avail);
1675 
1676  spin_lock_bh(&host->lock);
1677 
1678  /* Card change detected */
1679  slot->last_detect_state = present;
1680 
1681  /* Mark card as present if applicable */
1682  if (present != 0)
1684 
1685  /* Clean up queue if present */
1686  mrq = slot->mrq;
1687  if (mrq) {
1688  if (mrq == host->mrq) {
1689  host->data = NULL;
1690  host->cmd = NULL;
1691 
1692  switch (host->state) {
1693  case STATE_IDLE:
1694  break;
1695  case STATE_SENDING_CMD:
1696  mrq->cmd->error = -ENOMEDIUM;
1697  if (!mrq->data)
1698  break;
1699  /* fall through */
1700  case STATE_SENDING_DATA:
1701  mrq->data->error = -ENOMEDIUM;
1702  dw_mci_stop_dma(host);
1703  break;
1704  case STATE_DATA_BUSY:
1705  case STATE_DATA_ERROR:
1706  if (mrq->data->error == -EINPROGRESS)
1707  mrq->data->error = -ENOMEDIUM;
1708  if (!mrq->stop)
1709  break;
1710  /* fall through */
1711  case STATE_SENDING_STOP:
1712  mrq->stop->error = -ENOMEDIUM;
1713  break;
1714  }
1715 
1716  dw_mci_request_end(host, mrq);
1717  } else {
1718  list_del(&slot->queue_node);
1719  mrq->cmd->error = -ENOMEDIUM;
1720  if (mrq->data)
1721  mrq->data->error = -ENOMEDIUM;
1722  if (mrq->stop)
1723  mrq->stop->error = -ENOMEDIUM;
1724 
1725  spin_unlock(&host->lock);
1726  mmc_request_done(slot->mmc, mrq);
1727  spin_lock(&host->lock);
1728  }
1729  }
1730 
1731  /* Power down slot */
1732  if (present == 0) {
1734 
1735  /*
1736  * Clear down the FIFO - doing so generates a
1737  * block interrupt, hence setting the
1738  * scatter-gather pointer to NULL.
1739  */
1740  sg_miter_stop(&host->sg_miter);
1741  host->sg = NULL;
1742 
1743  ctrl = mci_readl(host, CTRL);
1744  ctrl |= SDMMC_CTRL_FIFO_RESET;
1745  mci_writel(host, CTRL, ctrl);
1746 
1747 #ifdef CONFIG_MMC_DW_IDMAC
1748  ctrl = mci_readl(host, BMOD);
1749  /* Software reset of DMA */
1750  ctrl |= SDMMC_IDMAC_SWRESET;
1751  mci_writel(host, BMOD, ctrl);
1752 #endif
1753 
1754  }
1755 
1756  spin_unlock_bh(&host->lock);
1757 
1758  /* Power down slot (after spin_unlock, may sleep) */
1759  if (present == 0 && host->pdata->setpower)
1760  host->pdata->setpower(slot->id, 0);
1761 
1762  present = dw_mci_get_cd(mmc);
1763  }
1764 
1765  mmc_detect_change(slot->mmc,
1766  msecs_to_jiffies(host->pdata->detect_delay_ms));
1767  }
1768 }
1769 
1770 #ifdef CONFIG_OF
1771 /* given a slot id, find out the device node representing that slot */
1772 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1773 {
1774  struct device_node *np;
1775  const __be32 *addr;
1776  int len;
1777 
1778  if (!dev || !dev->of_node)
1779  return NULL;
1780 
1781  for_each_child_of_node(dev->of_node, np) {
1782  addr = of_get_property(np, "reg", &len);
1783  if (!addr || (len < sizeof(int)))
1784  continue;
1785  if (be32_to_cpup(addr) == slot)
1786  return np;
1787  }
1788  return NULL;
1789 }
1790 
1791 /* find out bus-width for a given slot */
1792 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1793 {
1794  struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1795  u32 bus_wd = 1;
1796 
1797  if (!np)
1798  return 1;
1799 
1800  if (of_property_read_u32(np, "bus-width", &bus_wd))
1801  dev_err(dev, "bus-width property not found, assuming width"
1802  " as 1\n");
1803  return bus_wd;
1804 }
1805 #else /* CONFIG_OF */
1806 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1807 {
1808  return 1;
1809 }
1810 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1811 {
1812  return NULL;
1813 }
1814 #endif /* CONFIG_OF */
1815 
1816 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1817 {
1818  struct mmc_host *mmc;
1819  struct dw_mci_slot *slot;
1820  struct dw_mci_drv_data *drv_data = host->drv_data;
1821  int ctrl_id, ret;
1822  u8 bus_width;
1823 
1824  mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
1825  if (!mmc)
1826  return -ENOMEM;
1827 
1828  slot = mmc_priv(mmc);
1829  slot->id = id;
1830  slot->mmc = mmc;
1831  slot->host = host;
1832  host->slot[id] = slot;
1833 
1834  mmc->ops = &dw_mci_ops;
1835  mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
1836  mmc->f_max = host->bus_hz;
1837 
1838  if (host->pdata->get_ocr)
1839  mmc->ocr_avail = host->pdata->get_ocr(id);
1840  else
1842 
1843  /*
1844  * Start with slot power disabled, it will be enabled when a card
1845  * is detected.
1846  */
1847  if (host->pdata->setpower)
1848  host->pdata->setpower(id, 0);
1849 
1850  if (host->pdata->caps)
1851  mmc->caps = host->pdata->caps;
1852 
1853  if (host->dev->of_node) {
1854  ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
1855  if (ctrl_id < 0)
1856  ctrl_id = 0;
1857  } else {
1858  ctrl_id = to_platform_device(host->dev)->id;
1859  }
1860  if (drv_data && drv_data->caps)
1861  mmc->caps |= drv_data->caps[ctrl_id];
1862 
1863  if (host->pdata->caps2)
1864  mmc->caps2 = host->pdata->caps2;
1865 
1866  if (host->pdata->get_bus_wd)
1867  bus_width = host->pdata->get_bus_wd(slot->id);
1868  else if (host->dev->of_node)
1869  bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
1870  else
1871  bus_width = 1;
1872 
1873  if (drv_data && drv_data->setup_bus) {
1874  struct device_node *slot_np;
1875  slot_np = dw_mci_of_find_slot_node(host->dev, slot->id);
1876  ret = drv_data->setup_bus(host, slot_np, bus_width);
1877  if (ret)
1878  goto err_setup_bus;
1879  }
1880 
1881  switch (bus_width) {
1882  case 8:
1883  mmc->caps |= MMC_CAP_8_BIT_DATA;
1884  case 4:
1885  mmc->caps |= MMC_CAP_4_BIT_DATA;
1886  }
1887 
1888  if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1890 
1891  if (host->pdata->blk_settings) {
1892  mmc->max_segs = host->pdata->blk_settings->max_segs;
1893  mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
1894  mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
1895  mmc->max_req_size = host->pdata->blk_settings->max_req_size;
1896  mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
1897  } else {
1898  /* Useful defaults if platform data is unset. */
1899 #ifdef CONFIG_MMC_DW_IDMAC
1900  mmc->max_segs = host->ring_size;
1901  mmc->max_blk_size = 65536;
1902  mmc->max_blk_count = host->ring_size;
1903  mmc->max_seg_size = 0x1000;
1904  mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
1905 #else
1906  mmc->max_segs = 64;
1907  mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
1908  mmc->max_blk_count = 512;
1909  mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1910  mmc->max_seg_size = mmc->max_req_size;
1911 #endif /* CONFIG_MMC_DW_IDMAC */
1912  }
1913 
1914  host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
1915  if (IS_ERR(host->vmmc)) {
1916  pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
1917  host->vmmc = NULL;
1918  } else
1919  regulator_enable(host->vmmc);
1920 
1921  if (dw_mci_get_cd(mmc))
1923  else
1925 
1926  mmc_add_host(mmc);
1927 
1928 #if defined(CONFIG_DEBUG_FS)
1929  dw_mci_init_debugfs(slot);
1930 #endif
1931 
1932  /* Card initially undetected */
1933  slot->last_detect_state = 0;
1934 
1935  /*
1936  * Card may have been plugged in prior to boot so we
1937  * need to run the detect tasklet
1938  */
1939  queue_work(host->card_workqueue, &host->card_work);
1940 
1941  return 0;
1942 
1943 err_setup_bus:
1944  mmc_free_host(mmc);
1945  return -EINVAL;
1946 }
1947 
1948 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
1949 {
1950  /* Shutdown detect IRQ */
1951  if (slot->host->pdata->exit)
1952  slot->host->pdata->exit(id);
1953 
1954  /* Debugfs stuff is cleaned up by mmc core */
1955  mmc_remove_host(slot->mmc);
1956  slot->host->slot[id] = NULL;
1957  mmc_free_host(slot->mmc);
1958 }
1959 
1960 static void dw_mci_init_dma(struct dw_mci *host)
1961 {
1962  /* Alloc memory for sg translation */
1963  host->sg_cpu = dma_alloc_coherent(host->dev, PAGE_SIZE,
1964  &host->sg_dma, GFP_KERNEL);
1965  if (!host->sg_cpu) {
1966  dev_err(host->dev, "%s: could not alloc DMA memory\n",
1967  __func__);
1968  goto no_dma;
1969  }
1970 
1971  /* Determine which DMA interface to use */
1972 #ifdef CONFIG_MMC_DW_IDMAC
1973  host->dma_ops = &dw_mci_idmac_ops;
1974  dev_info(host->dev, "Using internal DMA controller.\n");
1975 #endif
1976 
1977  if (!host->dma_ops)
1978  goto no_dma;
1979 
1980  if (host->dma_ops->init && host->dma_ops->start &&
1981  host->dma_ops->stop && host->dma_ops->cleanup) {
1982  if (host->dma_ops->init(host)) {
1983  dev_err(host->dev, "%s: Unable to initialize "
1984  "DMA Controller.\n", __func__);
1985  goto no_dma;
1986  }
1987  } else {
1988  dev_err(host->dev, "DMA initialization not found.\n");
1989  goto no_dma;
1990  }
1991 
1992  host->use_dma = 1;
1993  return;
1994 
1995 no_dma:
1996  dev_info(host->dev, "Using PIO mode.\n");
1997  host->use_dma = 0;
1998  return;
1999 }
2000 
2001 static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
2002 {
2003  unsigned long timeout = jiffies + msecs_to_jiffies(500);
2004  unsigned int ctrl;
2005 
2008 
2009  /* wait till resets clear */
2010  do {
2011  ctrl = mci_readl(host, CTRL);
2012  if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2014  return true;
2015  } while (time_before(jiffies, timeout));
2016 
2017  dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
2018 
2019  return false;
2020 }
2021 
2022 #ifdef CONFIG_OF
2023 static struct dw_mci_of_quirks {
2024  char *quirk;
2025  int id;
2026 } of_quirks[] = {
2027  {
2028  .quirk = "supports-highspeed",
2029  .id = DW_MCI_QUIRK_HIGHSPEED,
2030  }, {
2031  .quirk = "broken-cd",
2033  },
2034 };
2035 
2036 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2037 {
2038  struct dw_mci_board *pdata;
2039  struct device *dev = host->dev;
2040  struct device_node *np = dev->of_node;
2041  struct dw_mci_drv_data *drv_data = host->drv_data;
2042  int idx, ret;
2043 
2044  pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2045  if (!pdata) {
2046  dev_err(dev, "could not allocate memory for pdata\n");
2047  return ERR_PTR(-ENOMEM);
2048  }
2049 
2050  /* find out number of slots supported */
2051  if (of_property_read_u32(dev->of_node, "num-slots",
2052  &pdata->num_slots)) {
2053  dev_info(dev, "num-slots property not found, "
2054  "assuming 1 slot is available\n");
2055  pdata->num_slots = 1;
2056  }
2057 
2058  /* get quirks */
2059  for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2060  if (of_get_property(np, of_quirks[idx].quirk, NULL))
2061  pdata->quirks |= of_quirks[idx].id;
2062 
2063  if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2064  dev_info(dev, "fifo-depth property not found, using "
2065  "value of FIFOTH register as default\n");
2066 
2067  of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2068 
2069  if (drv_data && drv_data->parse_dt) {
2070  ret = drv_data->parse_dt(host);
2071  if (ret)
2072  return ERR_PTR(ret);
2073  }
2074 
2075  return pdata;
2076 }
2077 
2078 #else /* CONFIG_OF */
2079 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2080 {
2081  return ERR_PTR(-EINVAL);
2082 }
2083 #endif /* CONFIG_OF */
2084 
2085 int dw_mci_probe(struct dw_mci *host)
2086 {
2087  struct dw_mci_drv_data *drv_data = host->drv_data;
2088  int width, i, ret = 0;
2089  u32 fifo_size;
2090  int init_slots = 0;
2091 
2092  if (!host->pdata) {
2093  host->pdata = dw_mci_parse_dt(host);
2094  if (IS_ERR(host->pdata)) {
2095  dev_err(host->dev, "platform data not available\n");
2096  return -EINVAL;
2097  }
2098  }
2099 
2100  if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2101  dev_err(host->dev,
2102  "Platform data must supply select_slot function\n");
2103  return -ENODEV;
2104  }
2105 
2106  host->biu_clk = clk_get(host->dev, "biu");
2107  if (IS_ERR(host->biu_clk)) {
2108  dev_dbg(host->dev, "biu clock not available\n");
2109  } else {
2110  ret = clk_prepare_enable(host->biu_clk);
2111  if (ret) {
2112  dev_err(host->dev, "failed to enable biu clock\n");
2113  clk_put(host->biu_clk);
2114  return ret;
2115  }
2116  }
2117 
2118  host->ciu_clk = clk_get(host->dev, "ciu");
2119  if (IS_ERR(host->ciu_clk)) {
2120  dev_dbg(host->dev, "ciu clock not available\n");
2121  } else {
2122  ret = clk_prepare_enable(host->ciu_clk);
2123  if (ret) {
2124  dev_err(host->dev, "failed to enable ciu clock\n");
2125  clk_put(host->ciu_clk);
2126  goto err_clk_biu;
2127  }
2128  }
2129 
2130  if (IS_ERR(host->ciu_clk))
2131  host->bus_hz = host->pdata->bus_hz;
2132  else
2133  host->bus_hz = clk_get_rate(host->ciu_clk);
2134 
2135  if (drv_data && drv_data->setup_clock) {
2136  ret = drv_data->setup_clock(host);
2137  if (ret) {
2138  dev_err(host->dev,
2139  "implementation specific clock setup failed\n");
2140  goto err_clk_ciu;
2141  }
2142  }
2143 
2144  if (!host->bus_hz) {
2145  dev_err(host->dev,
2146  "Platform data must supply bus speed\n");
2147  ret = -ENODEV;
2148  goto err_clk_ciu;
2149  }
2150 
2151  host->quirks = host->pdata->quirks;
2152 
2153  spin_lock_init(&host->lock);
2154  INIT_LIST_HEAD(&host->queue);
2155 
2156  /*
2157  * Get the host data width - this assumes that HCON has been set with
2158  * the correct values.
2159  */
2160  i = (mci_readl(host, HCON) >> 7) & 0x7;
2161  if (!i) {
2162  host->push_data = dw_mci_push_data16;
2163  host->pull_data = dw_mci_pull_data16;
2164  width = 16;
2165  host->data_shift = 1;
2166  } else if (i == 2) {
2167  host->push_data = dw_mci_push_data64;
2168  host->pull_data = dw_mci_pull_data64;
2169  width = 64;
2170  host->data_shift = 3;
2171  } else {
2172  /* Check for a reserved value, and warn if it is */
2173  WARN((i != 1),
2174  "HCON reports a reserved host data width!\n"
2175  "Defaulting to 32-bit access.\n");
2176  host->push_data = dw_mci_push_data32;
2177  host->pull_data = dw_mci_pull_data32;
2178  width = 32;
2179  host->data_shift = 2;
2180  }
2181 
2182  /* Reset all blocks */
2183  if (!mci_wait_reset(host->dev, host))
2184  return -ENODEV;
2185 
2186  host->dma_ops = host->pdata->dma_ops;
2187  dw_mci_init_dma(host);
2188 
2189  /* Clear the interrupts for the host controller */
2190  mci_writel(host, RINTSTS, 0xFFFFFFFF);
2191  mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2192 
2193  /* Put in max timeout */
2194  mci_writel(host, TMOUT, 0xFFFFFFFF);
2195 
2196  /*
2197  * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2198  * Tx Mark = fifo_size / 2 DMA Size = 8
2199  */
2200  if (!host->pdata->fifo_depth) {
2201  /*
2202  * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2203  * have been overwritten by the bootloader, just like we're
2204  * about to do, so if you know the value for your hardware, you
2205  * should put it in the platform data.
2206  */
2207  fifo_size = mci_readl(host, FIFOTH);
2208  fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2209  } else {
2210  fifo_size = host->pdata->fifo_depth;
2211  }
2212  host->fifo_depth = fifo_size;
2213  host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
2214  ((fifo_size/2) << 0));
2215  mci_writel(host, FIFOTH, host->fifoth_val);
2216 
2217  /* disable clock to CIU */
2218  mci_writel(host, CLKENA, 0);
2219  mci_writel(host, CLKSRC, 0);
2220 
2221  tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2222  host->card_workqueue = alloc_workqueue("dw-mci-card",
2224  if (!host->card_workqueue)
2225  goto err_dmaunmap;
2226  INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2227  ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host);
2228  if (ret)
2229  goto err_workqueue;
2230 
2231  if (host->pdata->num_slots)
2232  host->num_slots = host->pdata->num_slots;
2233  else
2234  host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2235 
2236  /*
2237  * Enable interrupts for command done, data over, data empty, card det,
2238  * receive ready and error such as transmit, receive timeout, crc error
2239  */
2240  mci_writel(host, RINTSTS, 0xFFFFFFFF);
2241  mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2242  SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2243  DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2244  mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2245 
2246  dev_info(host->dev, "DW MMC controller at irq %d, "
2247  "%d bit host data width, "
2248  "%u deep fifo\n",
2249  host->irq, width, fifo_size);
2250 
2251  /* We need at least one slot to succeed */
2252  for (i = 0; i < host->num_slots; i++) {
2253  ret = dw_mci_init_slot(host, i);
2254  if (ret)
2255  dev_dbg(host->dev, "slot %d init failed\n", i);
2256  else
2257  init_slots++;
2258  }
2259 
2260  if (init_slots) {
2261  dev_info(host->dev, "%d slots initialized\n", init_slots);
2262  } else {
2263  dev_dbg(host->dev, "attempted to initialize %d slots, "
2264  "but failed on all\n", host->num_slots);
2265  goto err_init_slot;
2266  }
2267 
2268  /*
2269  * In 2.40a spec, Data offset is changed.
2270  * Need to check the version-id and set data-offset for DATA register.
2271  */
2272  host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2273  dev_info(host->dev, "Version ID is %04x\n", host->verid);
2274 
2275  if (host->verid < DW_MMC_240A)
2276  host->data_offset = DATA_OFFSET;
2277  else
2278  host->data_offset = DATA_240A_OFFSET;
2279 
2280  if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2281  dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2282 
2283  return 0;
2284 
2285 err_init_slot:
2286  free_irq(host->irq, host);
2287 
2288 err_workqueue:
2289  destroy_workqueue(host->card_workqueue);
2290 
2291 err_dmaunmap:
2292  if (host->use_dma && host->dma_ops->exit)
2293  host->dma_ops->exit(host);
2294  dma_free_coherent(host->dev, PAGE_SIZE,
2295  host->sg_cpu, host->sg_dma);
2296 
2297  if (host->vmmc) {
2298  regulator_disable(host->vmmc);
2299  regulator_put(host->vmmc);
2300  }
2301 
2302 err_clk_ciu:
2303  if (!IS_ERR(host->ciu_clk)) {
2304  clk_disable_unprepare(host->ciu_clk);
2305  clk_put(host->ciu_clk);
2306  }
2307 err_clk_biu:
2308  if (!IS_ERR(host->biu_clk)) {
2309  clk_disable_unprepare(host->biu_clk);
2310  clk_put(host->biu_clk);
2311  }
2312  return ret;
2313 }
2315 
2316 void dw_mci_remove(struct dw_mci *host)
2317 {
2318  int i;
2319 
2320  mci_writel(host, RINTSTS, 0xFFFFFFFF);
2321  mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2322 
2323  for (i = 0; i < host->num_slots; i++) {
2324  dev_dbg(host->dev, "remove slot %d\n", i);
2325  if (host->slot[i])
2326  dw_mci_cleanup_slot(host->slot[i], i);
2327  }
2328 
2329  /* disable clock to CIU */
2330  mci_writel(host, CLKENA, 0);
2331  mci_writel(host, CLKSRC, 0);
2332 
2333  free_irq(host->irq, host);
2335  dma_free_coherent(host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
2336 
2337  if (host->use_dma && host->dma_ops->exit)
2338  host->dma_ops->exit(host);
2339 
2340  if (host->vmmc) {
2341  regulator_disable(host->vmmc);
2342  regulator_put(host->vmmc);
2343  }
2344 
2345  if (!IS_ERR(host->ciu_clk))
2346  clk_disable_unprepare(host->ciu_clk);
2347  if (!IS_ERR(host->biu_clk))
2348  clk_disable_unprepare(host->biu_clk);
2349  clk_put(host->ciu_clk);
2350  clk_put(host->biu_clk);
2351 }
2353 
2354 
2355 
2356 #ifdef CONFIG_PM_SLEEP
2357 /*
2358  * TODO: we should probably disable the clock to the card in the suspend path.
2359  */
2360 int dw_mci_suspend(struct dw_mci *host)
2361 {
2362  int i, ret = 0;
2363 
2364  for (i = 0; i < host->num_slots; i++) {
2365  struct dw_mci_slot *slot = host->slot[i];
2366  if (!slot)
2367  continue;
2368  ret = mmc_suspend_host(slot->mmc);
2369  if (ret < 0) {
2370  while (--i >= 0) {
2371  slot = host->slot[i];
2372  if (slot)
2373  mmc_resume_host(host->slot[i]->mmc);
2374  }
2375  return ret;
2376  }
2377  }
2378 
2379  if (host->vmmc)
2380  regulator_disable(host->vmmc);
2381 
2382  return 0;
2383 }
2384 EXPORT_SYMBOL(dw_mci_suspend);
2385 
2386 int dw_mci_resume(struct dw_mci *host)
2387 {
2388  int i, ret;
2389 
2390  if (host->vmmc)
2391  regulator_enable(host->vmmc);
2392 
2393  if (!mci_wait_reset(host->dev, host)) {
2394  ret = -ENODEV;
2395  return ret;
2396  }
2397 
2398  if (host->use_dma && host->dma_ops->init)
2399  host->dma_ops->init(host);
2400 
2401  /* Restore the old value at FIFOTH register */
2402  mci_writel(host, FIFOTH, host->fifoth_val);
2403 
2404  mci_writel(host, RINTSTS, 0xFFFFFFFF);
2405  mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2406  SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2407  DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2409 
2410  for (i = 0; i < host->num_slots; i++) {
2411  struct dw_mci_slot *slot = host->slot[i];
2412  if (!slot)
2413  continue;
2414  ret = mmc_resume_host(host->slot[i]->mmc);
2415  if (ret < 0)
2416  return ret;
2417  }
2418  return 0;
2419 }
2420 EXPORT_SYMBOL(dw_mci_resume);
2421 #endif /* CONFIG_PM_SLEEP */
2422 
2423 static int __init dw_mci_init(void)
2424 {
2425  printk(KERN_INFO "Synopsys Designware Multimedia Card Interface Driver");
2426  return 0;
2427 }
2428 
2429 static void __exit dw_mci_exit(void)
2430 {
2431 }
2432 
2433 module_init(dw_mci_init);
2434 module_exit(dw_mci_exit);
2435 
2436 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2437 MODULE_AUTHOR("NXP Semiconductor VietNam");
2438 MODULE_AUTHOR("Imagination Technologies Ltd");
2439 MODULE_LICENSE("GPL v2");